npe.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. /*
  2. * (C) Copyright 2005-2006
  3. * Stefan Roese, DENX Software Engineering, sr@denx.de.
  4. *
  5. * See file CREDITS for list of people who contributed to this
  6. * project.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License as
  10. * published by the Free Software Foundation; either version 2 of
  11. * the License, or (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  21. * MA 02111-1307 USA
  22. */
  23. #if 0
  24. #define DEBUG /* define for debug output */
  25. #endif
  26. #include <config.h>
  27. #include <common.h>
  28. #include <net.h>
  29. #include <miiphy.h>
  30. #include <malloc.h>
  31. #include <asm/processor.h>
  32. #include <asm/arch-ixp/ixp425.h>
  33. #include <IxOsal.h>
  34. #include <IxEthAcc.h>
  35. #include <IxEthDB.h>
  36. #include <IxNpeDl.h>
  37. #include <IxQMgr.h>
  38. #include <IxNpeMh.h>
  39. #include <ix_ossl.h>
  40. #include <IxFeatureCtrl.h>
  41. #include <npe.h>
  42. static IxQMgrDispatcherFuncPtr qDispatcherFunc = NULL;
  43. static int npe_exists[NPE_NUM_PORTS];
  44. static int npe_used[NPE_NUM_PORTS];
  45. /* A little extra so we can align to cacheline. */
  46. static u8 npe_alloc_pool[NPE_MEM_POOL_SIZE + CONFIG_SYS_CACHELINE_SIZE - 1];
  47. static u8 *npe_alloc_end;
  48. static u8 *npe_alloc_free;
  49. static void *npe_alloc(int size)
  50. {
  51. static int count = 0;
  52. void *p = NULL;
  53. size = (size + (CONFIG_SYS_CACHELINE_SIZE-1)) & ~(CONFIG_SYS_CACHELINE_SIZE-1);
  54. count++;
  55. if ((npe_alloc_free + size) < npe_alloc_end) {
  56. p = npe_alloc_free;
  57. npe_alloc_free += size;
  58. } else {
  59. printf("npe_alloc: failed (count=%d, size=%d)!\n", count, size);
  60. }
  61. return p;
  62. }
  63. /* Not interrupt safe! */
  64. static void mbuf_enqueue(IX_OSAL_MBUF **q, IX_OSAL_MBUF *new)
  65. {
  66. IX_OSAL_MBUF *m = *q;
  67. IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(new) = NULL;
  68. if (m) {
  69. while(IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m))
  70. m = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
  71. IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = new;
  72. } else
  73. *q = new;
  74. }
  75. /* Not interrupt safe! */
  76. static IX_OSAL_MBUF *mbuf_dequeue(IX_OSAL_MBUF **q)
  77. {
  78. IX_OSAL_MBUF *m = *q;
  79. if (m)
  80. *q = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
  81. return m;
  82. }
  83. static void reset_tx_mbufs(struct npe* p_npe)
  84. {
  85. IX_OSAL_MBUF *m;
  86. int i;
  87. p_npe->txQHead = NULL;
  88. for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS; i++) {
  89. m = &p_npe->tx_mbufs[i];
  90. memset(m, 0, sizeof(*m));
  91. IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->tx_pkts[i * NPE_PKT_SIZE];
  92. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  93. mbuf_enqueue(&p_npe->txQHead, m);
  94. }
  95. }
  96. static void reset_rx_mbufs(struct npe* p_npe)
  97. {
  98. IX_OSAL_MBUF *m;
  99. int i;
  100. p_npe->rxQHead = NULL;
  101. HAL_DCACHE_INVALIDATE(p_npe->rx_pkts, NPE_PKT_SIZE *
  102. CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
  103. for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS; i++) {
  104. m = &p_npe->rx_mbufs[i];
  105. memset(m, 0, sizeof(*m));
  106. IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->rx_pkts[i * NPE_PKT_SIZE];
  107. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  108. if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
  109. printf("ixEthAccPortRxFreeReplenish failed for port %d\n", p_npe->eth_id);
  110. break;
  111. }
  112. }
  113. }
  114. static void init_rx_mbufs(struct npe* p_npe)
  115. {
  116. p_npe->rxQHead = NULL;
  117. p_npe->rx_pkts = npe_alloc(NPE_PKT_SIZE *
  118. CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
  119. if (p_npe->rx_pkts == NULL) {
  120. printf("alloc of packets failed.\n");
  121. return;
  122. }
  123. p_npe->rx_mbufs = (IX_OSAL_MBUF *)
  124. npe_alloc(sizeof(IX_OSAL_MBUF) *
  125. CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
  126. if (p_npe->rx_mbufs == NULL) {
  127. printf("alloc of mbufs failed.\n");
  128. return;
  129. }
  130. reset_rx_mbufs(p_npe);
  131. }
  132. static void init_tx_mbufs(struct npe* p_npe)
  133. {
  134. p_npe->tx_pkts = npe_alloc(NPE_PKT_SIZE *
  135. CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
  136. if (p_npe->tx_pkts == NULL) {
  137. printf("alloc of packets failed.\n");
  138. return;
  139. }
  140. p_npe->tx_mbufs = (IX_OSAL_MBUF *)
  141. npe_alloc(sizeof(IX_OSAL_MBUF) *
  142. CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
  143. if (p_npe->tx_mbufs == NULL) {
  144. printf("alloc of mbufs failed.\n");
  145. return;
  146. }
  147. reset_tx_mbufs(p_npe);
  148. }
  149. /* Convert IX_ETH_PORT_n to IX_NPEMH_NPEID_NPEx */
  150. static int __eth_to_npe(int eth_id)
  151. {
  152. switch(eth_id) {
  153. case IX_ETH_PORT_1:
  154. return IX_NPEMH_NPEID_NPEB;
  155. case IX_ETH_PORT_2:
  156. return IX_NPEMH_NPEID_NPEC;
  157. case IX_ETH_PORT_3:
  158. return IX_NPEMH_NPEID_NPEA;
  159. }
  160. return 0;
  161. }
  162. /* Poll the CSR machinery. */
  163. static void npe_poll(int eth_id)
  164. {
  165. if (qDispatcherFunc != NULL) {
  166. ixNpeMhMessagesReceive(__eth_to_npe(eth_id));
  167. (*qDispatcherFunc)(IX_QMGR_QUELOW_GROUP);
  168. }
  169. }
  170. /* ethAcc RX callback */
  171. static void npe_rx_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
  172. {
  173. struct npe* p_npe = (struct npe *)cbTag;
  174. if (IX_OSAL_MBUF_MLEN(m) > 0) {
  175. mbuf_enqueue(&p_npe->rxQHead, m);
  176. if (p_npe->rx_write == ((p_npe->rx_read-1) & (PKTBUFSRX-1))) {
  177. debug("Rx overflow: rx_write=%d rx_read=%d\n",
  178. p_npe->rx_write, p_npe->rx_read);
  179. } else {
  180. debug("Received message #%d (len=%d)\n", p_npe->rx_write,
  181. IX_OSAL_MBUF_MLEN(m));
  182. memcpy((void *)NetRxPackets[p_npe->rx_write], IX_OSAL_MBUF_MDATA(m),
  183. IX_OSAL_MBUF_MLEN(m));
  184. p_npe->rx_len[p_npe->rx_write] = IX_OSAL_MBUF_MLEN(m);
  185. p_npe->rx_write++;
  186. if (p_npe->rx_write == PKTBUFSRX)
  187. p_npe->rx_write = 0;
  188. #ifdef CONFIG_PRINT_RX_FRAMES
  189. {
  190. u8 *ptr = IX_OSAL_MBUF_MDATA(m);
  191. int i;
  192. for (i=0; i<60; i++) {
  193. debug("%02x ", *ptr++);
  194. }
  195. debug("\n");
  196. }
  197. #endif
  198. }
  199. m = mbuf_dequeue(&p_npe->rxQHead);
  200. } else {
  201. debug("Received frame with length 0!!!\n");
  202. m = mbuf_dequeue(&p_npe->rxQHead);
  203. }
  204. /* Now return mbuf to NPE */
  205. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  206. IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
  207. IX_OSAL_MBUF_FLAGS(m) = 0;
  208. if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
  209. debug("npe_rx_callback: Error returning mbuf.\n");
  210. }
  211. }
  212. /* ethAcc TX callback */
  213. static void npe_tx_callback(u32 cbTag, IX_OSAL_MBUF *m)
  214. {
  215. struct npe* p_npe = (struct npe *)cbTag;
  216. debug("%s\n", __FUNCTION__);
  217. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  218. IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
  219. IX_OSAL_MBUF_FLAGS(m) = 0;
  220. mbuf_enqueue(&p_npe->txQHead, m);
  221. }
  222. static int npe_set_mac_address(struct eth_device *dev)
  223. {
  224. struct npe *p_npe = (struct npe *)dev->priv;
  225. IxEthAccMacAddr npeMac;
  226. debug("%s\n", __FUNCTION__);
  227. /* Set MAC address */
  228. memcpy(npeMac.macAddress, dev->enetaddr, 6);
  229. if (ixEthAccPortUnicastMacAddressSet(p_npe->eth_id, &npeMac) != IX_ETH_ACC_SUCCESS) {
  230. printf("Error setting unicast address! %02x:%02x:%02x:%02x:%02x:%02x\n",
  231. npeMac.macAddress[0], npeMac.macAddress[1],
  232. npeMac.macAddress[2], npeMac.macAddress[3],
  233. npeMac.macAddress[4], npeMac.macAddress[5]);
  234. return 0;
  235. }
  236. return 1;
  237. }
  238. /* Boot-time CSR library initialization. */
  239. static int npe_csr_load(void)
  240. {
  241. int i;
  242. if (ixQMgrInit() != IX_SUCCESS) {
  243. debug("Error initialising queue manager!\n");
  244. return 0;
  245. }
  246. ixQMgrDispatcherLoopGet(&qDispatcherFunc);
  247. if(ixNpeMhInitialize(IX_NPEMH_NPEINTERRUPTS_YES) != IX_SUCCESS) {
  248. printf("Error initialising NPE Message handler!\n");
  249. return 0;
  250. }
  251. if (npe_used[IX_ETH_PORT_1] && npe_exists[IX_ETH_PORT_1] &&
  252. ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
  253. != IX_SUCCESS) {
  254. printf("Error downloading firmware to NPE-B!\n");
  255. return 0;
  256. }
  257. if (npe_used[IX_ETH_PORT_2] && npe_exists[IX_ETH_PORT_2] &&
  258. ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
  259. != IX_SUCCESS) {
  260. printf("Error downloading firmware to NPE-C!\n");
  261. return 0;
  262. }
  263. /* don't need this for U-Boot */
  264. ixFeatureCtrlSwConfigurationWrite(IX_FEATURECTRL_ETH_LEARNING, FALSE);
  265. if (ixEthAccInit() != IX_ETH_ACC_SUCCESS) {
  266. printf("Error initialising Ethernet access driver!\n");
  267. return 0;
  268. }
  269. for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
  270. if (!npe_used[i] || !npe_exists[i])
  271. continue;
  272. if (ixEthAccPortInit(i) != IX_ETH_ACC_SUCCESS) {
  273. printf("Error initialising Ethernet port%d!\n", i);
  274. }
  275. if (ixEthAccTxSchedulingDisciplineSet(i, FIFO_NO_PRIORITY) != IX_ETH_ACC_SUCCESS) {
  276. printf("Error setting scheduling discipline for port %d.\n", i);
  277. }
  278. if (ixEthAccPortRxFrameAppendFCSDisable(i) != IX_ETH_ACC_SUCCESS) {
  279. printf("Error disabling RX FCS for port %d.\n", i);
  280. }
  281. if (ixEthAccPortTxFrameAppendFCSEnable(i) != IX_ETH_ACC_SUCCESS) {
  282. printf("Error enabling TX FCS for port %d.\n", i);
  283. }
  284. }
  285. return 1;
  286. }
  287. static int npe_init(struct eth_device *dev, bd_t * bis)
  288. {
  289. struct npe *p_npe = (struct npe *)dev->priv;
  290. int i;
  291. u16 reg_short;
  292. int speed;
  293. int duplex;
  294. debug("%s: 1\n", __FUNCTION__);
  295. miiphy_read (dev->name, p_npe->phy_no, PHY_BMSR, &reg_short);
  296. /*
  297. * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
  298. */
  299. if ((reg_short & PHY_BMSR_AUTN_ABLE) && !(reg_short & PHY_BMSR_AUTN_COMP)) {
  300. puts ("Waiting for PHY auto negotiation to complete");
  301. i = 0;
  302. while (!(reg_short & PHY_BMSR_AUTN_COMP)) {
  303. /*
  304. * Timeout reached ?
  305. */
  306. if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
  307. puts (" TIMEOUT !\n");
  308. break;
  309. }
  310. if ((i++ % 1000) == 0) {
  311. putc ('.');
  312. miiphy_read (dev->name, p_npe->phy_no, PHY_BMSR, &reg_short);
  313. }
  314. udelay (1000); /* 1 ms */
  315. }
  316. puts (" done\n");
  317. udelay (500000); /* another 500 ms (results in faster booting) */
  318. }
  319. speed = miiphy_speed (dev->name, p_npe->phy_no);
  320. duplex = miiphy_duplex (dev->name, p_npe->phy_no);
  321. if (p_npe->print_speed) {
  322. p_npe->print_speed = 0;
  323. printf ("ENET Speed is %d Mbps - %s duplex connection\n",
  324. (int) speed, (duplex == HALF) ? "HALF" : "FULL");
  325. }
  326. npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
  327. npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
  328. CONFIG_SYS_CACHELINE_SIZE - 1) & ~(CONFIG_SYS_CACHELINE_SIZE - 1));
  329. /* initialize mbuf pool */
  330. init_rx_mbufs(p_npe);
  331. init_tx_mbufs(p_npe);
  332. if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_callback,
  333. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  334. printf("can't register RX callback!\n");
  335. return -1;
  336. }
  337. if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_callback,
  338. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  339. printf("can't register TX callback!\n");
  340. return -1;
  341. }
  342. npe_set_mac_address(dev);
  343. if (ixEthAccPortEnable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
  344. printf("can't enable port!\n");
  345. return -1;
  346. }
  347. p_npe->active = 1;
  348. return 0;
  349. }
  350. #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
  351. /* Uninitialize CSR library. */
  352. static void npe_csr_unload(void)
  353. {
  354. ixEthAccUnload();
  355. ixEthDBUnload();
  356. ixNpeMhUnload();
  357. ixQMgrUnload();
  358. }
  359. /* callback which is used by ethAcc to recover RX buffers when stopping */
  360. static void npe_rx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
  361. {
  362. debug("%s\n", __FUNCTION__);
  363. }
  364. /* callback which is used by ethAcc to recover TX buffers when stopping */
  365. static void npe_tx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m)
  366. {
  367. debug("%s\n", __FUNCTION__);
  368. }
  369. #endif
  370. static void npe_halt(struct eth_device *dev)
  371. {
  372. struct npe *p_npe = (struct npe *)dev->priv;
  373. int i;
  374. debug("%s\n", __FUNCTION__);
  375. /* Delay to give time for recovery of mbufs */
  376. for (i = 0; i < 100; i++) {
  377. npe_poll(p_npe->eth_id);
  378. udelay(100);
  379. }
  380. #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
  381. if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_stop_callback,
  382. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  383. debug("Error registering rx callback!\n");
  384. }
  385. if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_stop_callback,
  386. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  387. debug("Error registering tx callback!\n");
  388. }
  389. if (ixEthAccPortDisable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
  390. debug("npe_stop: Error disabling NPEB!\n");
  391. }
  392. /* Delay to give time for recovery of mbufs */
  393. for (i = 0; i < 100; i++) {
  394. npe_poll(p_npe->eth_id);
  395. udelay(10000);
  396. }
  397. /*
  398. * For U-Boot only, we are probably launching Linux or other OS that
  399. * needs a clean slate for its NPE library.
  400. */
  401. #if 0 /* test-only */
  402. for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
  403. if (npe_used[i] && npe_exists[i])
  404. if (ixNpeDlNpeStopAndReset(__eth_to_npe(i)) != IX_SUCCESS)
  405. printf("Failed to stop and reset NPE B.\n");
  406. }
  407. #endif
  408. #endif
  409. p_npe->active = 0;
  410. }
  411. static int npe_send(struct eth_device *dev, volatile void *packet, int len)
  412. {
  413. struct npe *p_npe = (struct npe *)dev->priv;
  414. u8 *dest;
  415. int err;
  416. IX_OSAL_MBUF *m;
  417. debug("%s\n", __FUNCTION__);
  418. m = mbuf_dequeue(&p_npe->txQHead);
  419. dest = IX_OSAL_MBUF_MDATA(m);
  420. IX_OSAL_MBUF_PKT_LEN(m) = IX_OSAL_MBUF_MLEN(m) = len;
  421. IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = NULL;
  422. memcpy(dest, (char *)packet, len);
  423. if ((err = ixEthAccPortTxFrameSubmit(p_npe->eth_id, m, IX_ETH_ACC_TX_DEFAULT_PRIORITY))
  424. != IX_ETH_ACC_SUCCESS) {
  425. printf("npe_send: Can't submit frame. err[%d]\n", err);
  426. mbuf_enqueue(&p_npe->txQHead, m);
  427. return 0;
  428. }
  429. #ifdef DEBUG_PRINT_TX_FRAMES
  430. {
  431. u8 *ptr = IX_OSAL_MBUF_MDATA(m);
  432. int i;
  433. for (i=0; i<IX_OSAL_MBUF_MLEN(m); i++) {
  434. printf("%02x ", *ptr++);
  435. }
  436. printf(" (tx-len=%d)\n", IX_OSAL_MBUF_MLEN(m));
  437. }
  438. #endif
  439. npe_poll(p_npe->eth_id);
  440. return len;
  441. }
  442. static int npe_rx(struct eth_device *dev)
  443. {
  444. struct npe *p_npe = (struct npe *)dev->priv;
  445. debug("%s\n", __FUNCTION__);
  446. npe_poll(p_npe->eth_id);
  447. debug("%s: rx_write=%d rx_read=%d\n", __FUNCTION__, p_npe->rx_write, p_npe->rx_read);
  448. while (p_npe->rx_write != p_npe->rx_read) {
  449. debug("Reading message #%d\n", p_npe->rx_read);
  450. NetReceive(NetRxPackets[p_npe->rx_read], p_npe->rx_len[p_npe->rx_read]);
  451. p_npe->rx_read++;
  452. if (p_npe->rx_read == PKTBUFSRX)
  453. p_npe->rx_read = 0;
  454. }
  455. return 0;
  456. }
  457. int npe_initialize(bd_t * bis)
  458. {
  459. static int virgin = 0;
  460. struct eth_device *dev;
  461. int eth_num = 0;
  462. struct npe *p_npe = NULL;
  463. uchar enetaddr[6];
  464. for (eth_num = 0; eth_num < CONFIG_SYS_NPE_NUMS; eth_num++) {
  465. /* See if we can actually bring up the interface, otherwise, skip it */
  466. #ifdef CONFIG_HAS_ETH1
  467. if (eth_num == 1) {
  468. if (!eth_getenv_enetaddr("eth1addr", enetaddr))
  469. continue;
  470. } else
  471. #endif
  472. if (!eth_getenv_enetaddr("ethaddr", enetaddr))
  473. continue;
  474. /* Allocate device structure */
  475. dev = (struct eth_device *)malloc(sizeof(*dev));
  476. if (dev == NULL) {
  477. printf ("%s: Cannot allocate eth_device %d\n", __FUNCTION__, eth_num);
  478. return -1;
  479. }
  480. memset(dev, 0, sizeof(*dev));
  481. /* Allocate our private use data */
  482. p_npe = (struct npe *)malloc(sizeof(struct npe));
  483. if (p_npe == NULL) {
  484. printf("%s: Cannot allocate private hw data for eth_device %d",
  485. __FUNCTION__, eth_num);
  486. free(dev);
  487. return -1;
  488. }
  489. memset(p_npe, 0, sizeof(struct npe));
  490. p_npe->eth_id = eth_num;
  491. memcpy(dev->enetaddr, enetaddr, 6);
  492. #ifdef CONFIG_HAS_ETH1
  493. if (eth_num == 1)
  494. p_npe->phy_no = CONFIG_PHY1_ADDR;
  495. else
  496. #endif
  497. p_npe->phy_no = CONFIG_PHY_ADDR;
  498. sprintf(dev->name, "NPE%d", eth_num);
  499. dev->priv = (void *)p_npe;
  500. dev->init = npe_init;
  501. dev->halt = npe_halt;
  502. dev->send = npe_send;
  503. dev->recv = npe_rx;
  504. p_npe->print_speed = 1;
  505. if (0 == virgin) {
  506. virgin = 1;
  507. if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP42X) {
  508. switch (ixFeatureCtrlProductIdRead() & IX_FEATURE_CTRL_SILICON_STEPPING_MASK) {
  509. case IX_FEATURE_CTRL_SILICON_TYPE_B0:
  510. /*
  511. * If it is B0 Silicon, we only enable port when its corresponding
  512. * Eth Coprocessor is available.
  513. */
  514. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
  515. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  516. npe_exists[IX_ETH_PORT_1] = TRUE;
  517. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
  518. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  519. npe_exists[IX_ETH_PORT_2] = TRUE;
  520. break;
  521. case IX_FEATURE_CTRL_SILICON_TYPE_A0:
  522. /*
  523. * If it is A0 Silicon, we enable both as both Eth Coprocessors
  524. * are available.
  525. */
  526. npe_exists[IX_ETH_PORT_1] = TRUE;
  527. npe_exists[IX_ETH_PORT_2] = TRUE;
  528. break;
  529. }
  530. } else if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP46X) {
  531. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
  532. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  533. npe_exists[IX_ETH_PORT_1] = TRUE;
  534. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
  535. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  536. npe_exists[IX_ETH_PORT_2] = TRUE;
  537. }
  538. npe_used[IX_ETH_PORT_1] = 1;
  539. npe_used[IX_ETH_PORT_2] = 1;
  540. npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
  541. npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
  542. CONFIG_SYS_CACHELINE_SIZE - 1)
  543. & ~(CONFIG_SYS_CACHELINE_SIZE - 1));
  544. if (!npe_csr_load())
  545. return 0;
  546. }
  547. eth_register(dev);
  548. #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
  549. miiphy_register(dev->name, npe_miiphy_read, npe_miiphy_write);
  550. #endif
  551. } /* end for each supported device */
  552. return 1;
  553. }