npe.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. /*
  2. * (C) Copyright 2005-2006
  3. * Stefan Roese, DENX Software Engineering, sr@denx.de.
  4. *
  5. * See file CREDITS for list of people who contributed to this
  6. * project.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License as
  10. * published by the Free Software Foundation; either version 2 of
  11. * the License, or (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  21. * MA 02111-1307 USA
  22. */
  23. #if 0
  24. #define DEBUG /* define for debug output */
  25. #endif
  26. #include <config.h>
  27. #include <common.h>
  28. #include <net.h>
  29. #include <miiphy.h>
  30. #include <malloc.h>
  31. #include <asm/processor.h>
  32. #include <asm/arch-ixp/ixp425.h>
  33. #include <IxOsal.h>
  34. #include <IxEthAcc.h>
  35. #include <IxEthDB.h>
  36. #include <IxNpeDl.h>
  37. #include <IxQMgr.h>
  38. #include <IxNpeMh.h>
  39. #include <ix_ossl.h>
  40. #include <IxFeatureCtrl.h>
  41. #include <npe.h>
  42. static IxQMgrDispatcherFuncPtr qDispatcherFunc = NULL;
  43. static int npe_exists[NPE_NUM_PORTS];
  44. static int npe_used[NPE_NUM_PORTS];
  45. /* A little extra so we can align to cacheline. */
  46. static u8 npe_alloc_pool[NPE_MEM_POOL_SIZE + CONFIG_SYS_CACHELINE_SIZE - 1];
  47. static u8 *npe_alloc_end;
  48. static u8 *npe_alloc_free;
  49. static void *npe_alloc(int size)
  50. {
  51. static int count = 0;
  52. void *p = NULL;
  53. size = (size + (CONFIG_SYS_CACHELINE_SIZE-1)) & ~(CONFIG_SYS_CACHELINE_SIZE-1);
  54. count++;
  55. if ((npe_alloc_free + size) < npe_alloc_end) {
  56. p = npe_alloc_free;
  57. npe_alloc_free += size;
  58. } else {
  59. printf("npe_alloc: failed (count=%d, size=%d)!\n", count, size);
  60. }
  61. return p;
  62. }
  63. /* Not interrupt safe! */
  64. static void mbuf_enqueue(IX_OSAL_MBUF **q, IX_OSAL_MBUF *new)
  65. {
  66. IX_OSAL_MBUF *m = *q;
  67. IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(new) = NULL;
  68. if (m) {
  69. while(IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m))
  70. m = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
  71. IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = new;
  72. } else
  73. *q = new;
  74. }
  75. /* Not interrupt safe! */
  76. static IX_OSAL_MBUF *mbuf_dequeue(IX_OSAL_MBUF **q)
  77. {
  78. IX_OSAL_MBUF *m = *q;
  79. if (m)
  80. *q = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
  81. return m;
  82. }
  83. static void reset_tx_mbufs(struct npe* p_npe)
  84. {
  85. IX_OSAL_MBUF *m;
  86. int i;
  87. p_npe->txQHead = NULL;
  88. for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS; i++) {
  89. m = &p_npe->tx_mbufs[i];
  90. memset(m, 0, sizeof(*m));
  91. IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->tx_pkts[i * NPE_PKT_SIZE];
  92. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  93. mbuf_enqueue(&p_npe->txQHead, m);
  94. }
  95. }
  96. static void reset_rx_mbufs(struct npe* p_npe)
  97. {
  98. IX_OSAL_MBUF *m;
  99. int i;
  100. p_npe->rxQHead = NULL;
  101. HAL_DCACHE_INVALIDATE(p_npe->rx_pkts, NPE_PKT_SIZE *
  102. CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
  103. for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS; i++) {
  104. m = &p_npe->rx_mbufs[i];
  105. memset(m, 0, sizeof(*m));
  106. IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->rx_pkts[i * NPE_PKT_SIZE];
  107. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  108. if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
  109. printf("ixEthAccPortRxFreeReplenish failed for port %d\n", p_npe->eth_id);
  110. break;
  111. }
  112. }
  113. }
  114. static void init_rx_mbufs(struct npe* p_npe)
  115. {
  116. p_npe->rxQHead = NULL;
  117. p_npe->rx_pkts = npe_alloc(NPE_PKT_SIZE *
  118. CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
  119. if (p_npe->rx_pkts == NULL) {
  120. printf("alloc of packets failed.\n");
  121. return;
  122. }
  123. p_npe->rx_mbufs = (IX_OSAL_MBUF *)
  124. npe_alloc(sizeof(IX_OSAL_MBUF) *
  125. CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
  126. if (p_npe->rx_mbufs == NULL) {
  127. printf("alloc of mbufs failed.\n");
  128. return;
  129. }
  130. reset_rx_mbufs(p_npe);
  131. }
  132. static void init_tx_mbufs(struct npe* p_npe)
  133. {
  134. p_npe->tx_pkts = npe_alloc(NPE_PKT_SIZE *
  135. CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
  136. if (p_npe->tx_pkts == NULL) {
  137. printf("alloc of packets failed.\n");
  138. return;
  139. }
  140. p_npe->tx_mbufs = (IX_OSAL_MBUF *)
  141. npe_alloc(sizeof(IX_OSAL_MBUF) *
  142. CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
  143. if (p_npe->tx_mbufs == NULL) {
  144. printf("alloc of mbufs failed.\n");
  145. return;
  146. }
  147. reset_tx_mbufs(p_npe);
  148. }
  149. /* Convert IX_ETH_PORT_n to IX_NPEMH_NPEID_NPEx */
  150. static int __eth_to_npe(int eth_id)
  151. {
  152. switch(eth_id) {
  153. case IX_ETH_PORT_1:
  154. return IX_NPEMH_NPEID_NPEB;
  155. case IX_ETH_PORT_2:
  156. return IX_NPEMH_NPEID_NPEC;
  157. case IX_ETH_PORT_3:
  158. return IX_NPEMH_NPEID_NPEA;
  159. }
  160. return 0;
  161. }
  162. /* Poll the CSR machinery. */
  163. static void npe_poll(int eth_id)
  164. {
  165. if (qDispatcherFunc != NULL) {
  166. ixNpeMhMessagesReceive(__eth_to_npe(eth_id));
  167. (*qDispatcherFunc)(IX_QMGR_QUELOW_GROUP);
  168. }
  169. }
  170. /* ethAcc RX callback */
  171. static void npe_rx_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
  172. {
  173. struct npe* p_npe = (struct npe *)cbTag;
  174. if (IX_OSAL_MBUF_MLEN(m) > 0) {
  175. mbuf_enqueue(&p_npe->rxQHead, m);
  176. if (p_npe->rx_write == ((p_npe->rx_read-1) & (PKTBUFSRX-1))) {
  177. debug("Rx overflow: rx_write=%d rx_read=%d\n",
  178. p_npe->rx_write, p_npe->rx_read);
  179. } else {
  180. debug("Received message #%d (len=%d)\n", p_npe->rx_write,
  181. IX_OSAL_MBUF_MLEN(m));
  182. memcpy((void *)NetRxPackets[p_npe->rx_write], IX_OSAL_MBUF_MDATA(m),
  183. IX_OSAL_MBUF_MLEN(m));
  184. p_npe->rx_len[p_npe->rx_write] = IX_OSAL_MBUF_MLEN(m);
  185. p_npe->rx_write++;
  186. if (p_npe->rx_write == PKTBUFSRX)
  187. p_npe->rx_write = 0;
  188. #ifdef CONFIG_PRINT_RX_FRAMES
  189. {
  190. u8 *ptr = IX_OSAL_MBUF_MDATA(m);
  191. int i;
  192. for (i=0; i<60; i++) {
  193. debug("%02x ", *ptr++);
  194. }
  195. debug("\n");
  196. }
  197. #endif
  198. }
  199. m = mbuf_dequeue(&p_npe->rxQHead);
  200. } else {
  201. debug("Received frame with length 0!!!\n");
  202. m = mbuf_dequeue(&p_npe->rxQHead);
  203. }
  204. /* Now return mbuf to NPE */
  205. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  206. IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
  207. IX_OSAL_MBUF_FLAGS(m) = 0;
  208. if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
  209. debug("npe_rx_callback: Error returning mbuf.\n");
  210. }
  211. }
  212. /* ethAcc TX callback */
  213. static void npe_tx_callback(u32 cbTag, IX_OSAL_MBUF *m)
  214. {
  215. struct npe* p_npe = (struct npe *)cbTag;
  216. debug("%s\n", __FUNCTION__);
  217. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  218. IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
  219. IX_OSAL_MBUF_FLAGS(m) = 0;
  220. mbuf_enqueue(&p_npe->txQHead, m);
  221. }
  222. static int npe_set_mac_address(struct eth_device *dev)
  223. {
  224. struct npe *p_npe = (struct npe *)dev->priv;
  225. IxEthAccMacAddr npeMac;
  226. debug("%s\n", __FUNCTION__);
  227. /* Set MAC address */
  228. memcpy(npeMac.macAddress, dev->enetaddr, 6);
  229. if (ixEthAccPortUnicastMacAddressSet(p_npe->eth_id, &npeMac) != IX_ETH_ACC_SUCCESS) {
  230. printf("Error setting unicast address! %02x:%02x:%02x:%02x:%02x:%02x\n",
  231. npeMac.macAddress[0], npeMac.macAddress[1],
  232. npeMac.macAddress[2], npeMac.macAddress[3],
  233. npeMac.macAddress[4], npeMac.macAddress[5]);
  234. return 0;
  235. }
  236. return 1;
  237. }
  238. /* Boot-time CSR library initialization. */
  239. static int npe_csr_load(void)
  240. {
  241. int i;
  242. if (ixQMgrInit() != IX_SUCCESS) {
  243. debug("Error initialising queue manager!\n");
  244. return 0;
  245. }
  246. ixQMgrDispatcherLoopGet(&qDispatcherFunc);
  247. if(ixNpeMhInitialize(IX_NPEMH_NPEINTERRUPTS_YES) != IX_SUCCESS) {
  248. printf("Error initialising NPE Message handler!\n");
  249. return 0;
  250. }
  251. if (npe_used[IX_ETH_PORT_1] && npe_exists[IX_ETH_PORT_1] &&
  252. ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
  253. != IX_SUCCESS) {
  254. printf("Error downloading firmware to NPE-B!\n");
  255. return 0;
  256. }
  257. if (npe_used[IX_ETH_PORT_2] && npe_exists[IX_ETH_PORT_2] &&
  258. ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
  259. != IX_SUCCESS) {
  260. printf("Error downloading firmware to NPE-C!\n");
  261. return 0;
  262. }
  263. /* don't need this for U-Boot */
  264. ixFeatureCtrlSwConfigurationWrite(IX_FEATURECTRL_ETH_LEARNING, FALSE);
  265. if (ixEthAccInit() != IX_ETH_ACC_SUCCESS) {
  266. printf("Error initialising Ethernet access driver!\n");
  267. return 0;
  268. }
  269. for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
  270. if (!npe_used[i] || !npe_exists[i])
  271. continue;
  272. if (ixEthAccPortInit(i) != IX_ETH_ACC_SUCCESS) {
  273. printf("Error initialising Ethernet port%d!\n", i);
  274. }
  275. if (ixEthAccTxSchedulingDisciplineSet(i, FIFO_NO_PRIORITY) != IX_ETH_ACC_SUCCESS) {
  276. printf("Error setting scheduling discipline for port %d.\n", i);
  277. }
  278. if (ixEthAccPortRxFrameAppendFCSDisable(i) != IX_ETH_ACC_SUCCESS) {
  279. printf("Error disabling RX FCS for port %d.\n", i);
  280. }
  281. if (ixEthAccPortTxFrameAppendFCSEnable(i) != IX_ETH_ACC_SUCCESS) {
  282. printf("Error enabling TX FCS for port %d.\n", i);
  283. }
  284. }
  285. return 1;
  286. }
  287. static int npe_init(struct eth_device *dev, bd_t * bis)
  288. {
  289. struct npe *p_npe = (struct npe *)dev->priv;
  290. int i;
  291. u16 reg_short;
  292. int speed;
  293. int duplex;
  294. debug("%s: 1\n", __FUNCTION__);
  295. #ifdef CONFIG_MII_NPE0_FIXEDLINK
  296. if (0 == p_npe->eth_id) {
  297. speed = CONFIG_MII_NPE0_SPEED;
  298. duplex = CONFIG_MII_NPE0_FULLDUPLEX ? FULL : HALF;
  299. } else
  300. #endif
  301. #ifdef CONFIG_MII_NPE1_FIXEDLINK
  302. if (1 == p_npe->eth_id) {
  303. speed = CONFIG_MII_NPE1_SPEED;
  304. duplex = CONFIG_MII_NPE1_FULLDUPLEX ? FULL : HALF;
  305. } else
  306. #endif
  307. {
  308. miiphy_read(dev->name, p_npe->phy_no, MII_BMSR, &reg_short);
  309. /*
  310. * Wait if PHY is capable of autonegotiation and
  311. * autonegotiation is not complete
  312. */
  313. if ((reg_short & BMSR_ANEGCAPABLE) &&
  314. !(reg_short & BMSR_ANEGCOMPLETE)) {
  315. puts("Waiting for PHY auto negotiation to complete");
  316. i = 0;
  317. while (!(reg_short & BMSR_ANEGCOMPLETE)) {
  318. /*
  319. * Timeout reached ?
  320. */
  321. if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
  322. puts(" TIMEOUT !\n");
  323. break;
  324. }
  325. if ((i++ % 1000) == 0) {
  326. putc('.');
  327. miiphy_read(dev->name, p_npe->phy_no,
  328. MII_BMSR, &reg_short);
  329. }
  330. udelay(1000); /* 1 ms */
  331. }
  332. puts(" done\n");
  333. /* another 500 ms (results in faster booting) */
  334. udelay(500000);
  335. }
  336. speed = miiphy_speed(dev->name, p_npe->phy_no);
  337. duplex = miiphy_duplex(dev->name, p_npe->phy_no);
  338. }
  339. if (p_npe->print_speed) {
  340. p_npe->print_speed = 0;
  341. printf ("ENET Speed is %d Mbps - %s duplex connection\n",
  342. (int) speed, (duplex == HALF) ? "HALF" : "FULL");
  343. }
  344. npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
  345. npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
  346. CONFIG_SYS_CACHELINE_SIZE - 1) & ~(CONFIG_SYS_CACHELINE_SIZE - 1));
  347. /* initialize mbuf pool */
  348. init_rx_mbufs(p_npe);
  349. init_tx_mbufs(p_npe);
  350. if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_callback,
  351. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  352. printf("can't register RX callback!\n");
  353. return -1;
  354. }
  355. if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_callback,
  356. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  357. printf("can't register TX callback!\n");
  358. return -1;
  359. }
  360. npe_set_mac_address(dev);
  361. if (ixEthAccPortEnable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
  362. printf("can't enable port!\n");
  363. return -1;
  364. }
  365. p_npe->active = 1;
  366. return 0;
  367. }
  368. #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
  369. /* Uninitialize CSR library. */
  370. static void npe_csr_unload(void)
  371. {
  372. ixEthAccUnload();
  373. ixEthDBUnload();
  374. ixNpeMhUnload();
  375. ixQMgrUnload();
  376. }
  377. /* callback which is used by ethAcc to recover RX buffers when stopping */
  378. static void npe_rx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
  379. {
  380. debug("%s\n", __FUNCTION__);
  381. }
  382. /* callback which is used by ethAcc to recover TX buffers when stopping */
  383. static void npe_tx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m)
  384. {
  385. debug("%s\n", __FUNCTION__);
  386. }
  387. #endif
  388. static void npe_halt(struct eth_device *dev)
  389. {
  390. struct npe *p_npe = (struct npe *)dev->priv;
  391. int i;
  392. debug("%s\n", __FUNCTION__);
  393. /* Delay to give time for recovery of mbufs */
  394. for (i = 0; i < 100; i++) {
  395. npe_poll(p_npe->eth_id);
  396. udelay(100);
  397. }
  398. #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
  399. if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_stop_callback,
  400. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  401. debug("Error registering rx callback!\n");
  402. }
  403. if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_stop_callback,
  404. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  405. debug("Error registering tx callback!\n");
  406. }
  407. if (ixEthAccPortDisable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
  408. debug("npe_stop: Error disabling NPEB!\n");
  409. }
  410. /* Delay to give time for recovery of mbufs */
  411. for (i = 0; i < 100; i++) {
  412. npe_poll(p_npe->eth_id);
  413. udelay(10000);
  414. }
  415. /*
  416. * For U-Boot only, we are probably launching Linux or other OS that
  417. * needs a clean slate for its NPE library.
  418. */
  419. #if 0 /* test-only */
  420. for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
  421. if (npe_used[i] && npe_exists[i])
  422. if (ixNpeDlNpeStopAndReset(__eth_to_npe(i)) != IX_SUCCESS)
  423. printf("Failed to stop and reset NPE B.\n");
  424. }
  425. #endif
  426. #endif
  427. p_npe->active = 0;
  428. }
  429. static int npe_send(struct eth_device *dev, volatile void *packet, int len)
  430. {
  431. struct npe *p_npe = (struct npe *)dev->priv;
  432. u8 *dest;
  433. int err;
  434. IX_OSAL_MBUF *m;
  435. debug("%s\n", __FUNCTION__);
  436. m = mbuf_dequeue(&p_npe->txQHead);
  437. dest = IX_OSAL_MBUF_MDATA(m);
  438. IX_OSAL_MBUF_PKT_LEN(m) = IX_OSAL_MBUF_MLEN(m) = len;
  439. IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = NULL;
  440. memcpy(dest, (char *)packet, len);
  441. if ((err = ixEthAccPortTxFrameSubmit(p_npe->eth_id, m, IX_ETH_ACC_TX_DEFAULT_PRIORITY))
  442. != IX_ETH_ACC_SUCCESS) {
  443. printf("npe_send: Can't submit frame. err[%d]\n", err);
  444. mbuf_enqueue(&p_npe->txQHead, m);
  445. return 0;
  446. }
  447. #ifdef DEBUG_PRINT_TX_FRAMES
  448. {
  449. u8 *ptr = IX_OSAL_MBUF_MDATA(m);
  450. int i;
  451. for (i=0; i<IX_OSAL_MBUF_MLEN(m); i++) {
  452. printf("%02x ", *ptr++);
  453. }
  454. printf(" (tx-len=%d)\n", IX_OSAL_MBUF_MLEN(m));
  455. }
  456. #endif
  457. npe_poll(p_npe->eth_id);
  458. return len;
  459. }
  460. static int npe_rx(struct eth_device *dev)
  461. {
  462. struct npe *p_npe = (struct npe *)dev->priv;
  463. debug("%s\n", __FUNCTION__);
  464. npe_poll(p_npe->eth_id);
  465. debug("%s: rx_write=%d rx_read=%d\n", __FUNCTION__, p_npe->rx_write, p_npe->rx_read);
  466. while (p_npe->rx_write != p_npe->rx_read) {
  467. debug("Reading message #%d\n", p_npe->rx_read);
  468. NetReceive(NetRxPackets[p_npe->rx_read], p_npe->rx_len[p_npe->rx_read]);
  469. p_npe->rx_read++;
  470. if (p_npe->rx_read == PKTBUFSRX)
  471. p_npe->rx_read = 0;
  472. }
  473. return 0;
  474. }
  475. int npe_initialize(bd_t * bis)
  476. {
  477. static int virgin = 0;
  478. struct eth_device *dev;
  479. int eth_num = 0;
  480. struct npe *p_npe = NULL;
  481. uchar enetaddr[6];
  482. for (eth_num = 0; eth_num < CONFIG_SYS_NPE_NUMS; eth_num++) {
  483. /* See if we can actually bring up the interface, otherwise, skip it */
  484. #ifdef CONFIG_HAS_ETH1
  485. if (eth_num == 1) {
  486. if (!eth_getenv_enetaddr("eth1addr", enetaddr))
  487. continue;
  488. } else
  489. #endif
  490. if (!eth_getenv_enetaddr("ethaddr", enetaddr))
  491. continue;
  492. /* Allocate device structure */
  493. dev = (struct eth_device *)malloc(sizeof(*dev));
  494. if (dev == NULL) {
  495. printf ("%s: Cannot allocate eth_device %d\n", __FUNCTION__, eth_num);
  496. return -1;
  497. }
  498. memset(dev, 0, sizeof(*dev));
  499. /* Allocate our private use data */
  500. p_npe = (struct npe *)malloc(sizeof(struct npe));
  501. if (p_npe == NULL) {
  502. printf("%s: Cannot allocate private hw data for eth_device %d",
  503. __FUNCTION__, eth_num);
  504. free(dev);
  505. return -1;
  506. }
  507. memset(p_npe, 0, sizeof(struct npe));
  508. p_npe->eth_id = eth_num;
  509. memcpy(dev->enetaddr, enetaddr, 6);
  510. #ifdef CONFIG_HAS_ETH1
  511. if (eth_num == 1)
  512. p_npe->phy_no = CONFIG_PHY1_ADDR;
  513. else
  514. #endif
  515. p_npe->phy_no = CONFIG_PHY_ADDR;
  516. sprintf(dev->name, "NPE%d", eth_num);
  517. dev->priv = (void *)p_npe;
  518. dev->init = npe_init;
  519. dev->halt = npe_halt;
  520. dev->send = npe_send;
  521. dev->recv = npe_rx;
  522. p_npe->print_speed = 1;
  523. if (0 == virgin) {
  524. virgin = 1;
  525. if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP42X) {
  526. switch (ixFeatureCtrlProductIdRead() & IX_FEATURE_CTRL_SILICON_STEPPING_MASK) {
  527. case IX_FEATURE_CTRL_SILICON_TYPE_B0:
  528. default: /* newer than B0 */
  529. /*
  530. * If it is B0 or newer Silicon, we
  531. * only enable port when its
  532. * corresponding Eth Coprocessor is
  533. * available.
  534. */
  535. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
  536. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  537. npe_exists[IX_ETH_PORT_1] = TRUE;
  538. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
  539. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  540. npe_exists[IX_ETH_PORT_2] = TRUE;
  541. break;
  542. case IX_FEATURE_CTRL_SILICON_TYPE_A0:
  543. /*
  544. * If it is A0 Silicon, we enable both as both Eth Coprocessors
  545. * are available.
  546. */
  547. npe_exists[IX_ETH_PORT_1] = TRUE;
  548. npe_exists[IX_ETH_PORT_2] = TRUE;
  549. break;
  550. }
  551. } else if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP46X) {
  552. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
  553. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  554. npe_exists[IX_ETH_PORT_1] = TRUE;
  555. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
  556. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  557. npe_exists[IX_ETH_PORT_2] = TRUE;
  558. }
  559. npe_used[IX_ETH_PORT_1] = 1;
  560. npe_used[IX_ETH_PORT_2] = 1;
  561. npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
  562. npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
  563. CONFIG_SYS_CACHELINE_SIZE - 1)
  564. & ~(CONFIG_SYS_CACHELINE_SIZE - 1));
  565. if (!npe_csr_load())
  566. return 0;
  567. }
  568. eth_register(dev);
  569. #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
  570. miiphy_register(dev->name, npe_miiphy_read, npe_miiphy_write);
  571. #endif
  572. } /* end for each supported device */
  573. return 1;
  574. }