mvgbe.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. /*
  2. * (C) Copyright 2009
  3. * Marvell Semiconductor <www.marvell.com>
  4. * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
  5. *
  6. * (C) Copyright 2003
  7. * Ingo Assmus <ingo.assmus@keymile.com>
  8. *
  9. * based on - Driver for MV64360X ethernet ports
  10. * Copyright (C) 2002 rabeeh@galileo.co.il
  11. *
  12. * See file CREDITS for list of people who contributed to this
  13. * project.
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License as
  17. * published by the Free Software Foundation; either version 2 of
  18. * the License, or (at your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful,
  21. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  23. * GNU General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU General Public License
  26. * along with this program; if not, write to the Free Software
  27. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
  28. * MA 02110-1301 USA
  29. */
  30. #include <common.h>
  31. #include <net.h>
  32. #include <malloc.h>
  33. #include <miiphy.h>
  34. #include <asm/io.h>
  35. #include <asm/errno.h>
  36. #include <asm/types.h>
  37. #include <asm/system.h>
  38. #include <asm/byteorder.h>
  39. #if defined(CONFIG_KIRKWOOD)
  40. #include <asm/arch/kirkwood.h>
  41. #elif defined(CONFIG_ORION5X)
  42. #include <asm/arch/orion5x.h>
  43. #endif
  44. #include "mvgbe.h"
  45. DECLARE_GLOBAL_DATA_PTR;
  46. #define MV_PHY_ADR_REQUEST 0xee
  47. #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
  48. /*
  49. * smi_reg_read - miiphy_read callback function.
  50. *
  51. * Returns 16bit phy register value, or 0xffff on error
  52. */
  53. static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
  54. {
  55. struct eth_device *dev = eth_get_dev_by_name(devname);
  56. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  57. struct mvgbe_registers *regs = dmvgbe->regs;
  58. u32 smi_reg;
  59. u32 timeout;
  60. /* Phyadr read request */
  61. if (phy_adr == MV_PHY_ADR_REQUEST &&
  62. reg_ofs == MV_PHY_ADR_REQUEST) {
  63. /* */
  64. *data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
  65. return 0;
  66. }
  67. /* check parameters */
  68. if (phy_adr > PHYADR_MASK) {
  69. printf("Err..(%s) Invalid PHY address %d\n",
  70. __FUNCTION__, phy_adr);
  71. return -EFAULT;
  72. }
  73. if (reg_ofs > PHYREG_MASK) {
  74. printf("Err..(%s) Invalid register offset %d\n",
  75. __FUNCTION__, reg_ofs);
  76. return -EFAULT;
  77. }
  78. timeout = MVGBE_PHY_SMI_TIMEOUT;
  79. /* wait till the SMI is not busy */
  80. do {
  81. /* read smi register */
  82. smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
  83. if (timeout-- == 0) {
  84. printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
  85. return -EFAULT;
  86. }
  87. } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
  88. /* fill the phy address and regiser offset and read opcode */
  89. smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
  90. | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
  91. | MVGBE_PHY_SMI_OPCODE_READ;
  92. /* write the smi register */
  93. MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
  94. /*wait till read value is ready */
  95. timeout = MVGBE_PHY_SMI_TIMEOUT;
  96. do {
  97. /* read smi register */
  98. smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
  99. if (timeout-- == 0) {
  100. printf("Err..(%s) SMI read ready timeout\n",
  101. __FUNCTION__);
  102. return -EFAULT;
  103. }
  104. } while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
  105. /* Wait for the data to update in the SMI register */
  106. for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
  107. ;
  108. *data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
  109. debug("%s:(adr %d, off %d) value= %04x\n", __FUNCTION__, phy_adr,
  110. reg_ofs, *data);
  111. return 0;
  112. }
  113. /*
  114. * smi_reg_write - imiiphy_write callback function.
  115. *
  116. * Returns 0 if write succeed, -EINVAL on bad parameters
  117. * -ETIME on timeout
  118. */
  119. static int smi_reg_write(const char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
  120. {
  121. struct eth_device *dev = eth_get_dev_by_name(devname);
  122. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  123. struct mvgbe_registers *regs = dmvgbe->regs;
  124. u32 smi_reg;
  125. u32 timeout;
  126. /* Phyadr write request*/
  127. if (phy_adr == MV_PHY_ADR_REQUEST &&
  128. reg_ofs == MV_PHY_ADR_REQUEST) {
  129. MVGBE_REG_WR(regs->phyadr, data);
  130. return 0;
  131. }
  132. /* check parameters */
  133. if (phy_adr > PHYADR_MASK) {
  134. printf("Err..(%s) Invalid phy address\n", __FUNCTION__);
  135. return -EINVAL;
  136. }
  137. if (reg_ofs > PHYREG_MASK) {
  138. printf("Err..(%s) Invalid register offset\n", __FUNCTION__);
  139. return -EINVAL;
  140. }
  141. /* wait till the SMI is not busy */
  142. timeout = MVGBE_PHY_SMI_TIMEOUT;
  143. do {
  144. /* read smi register */
  145. smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
  146. if (timeout-- == 0) {
  147. printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
  148. return -ETIME;
  149. }
  150. } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
  151. /* fill the phy addr and reg offset and write opcode and data */
  152. smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
  153. smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
  154. | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
  155. smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
  156. /* write the smi register */
  157. MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
  158. return 0;
  159. }
  160. /* Stop and checks all queues */
  161. static void stop_queue(u32 * qreg)
  162. {
  163. u32 reg_data;
  164. reg_data = readl(qreg);
  165. if (reg_data & 0xFF) {
  166. /* Issue stop command for active channels only */
  167. writel((reg_data << 8), qreg);
  168. /* Wait for all queue activity to terminate. */
  169. do {
  170. /*
  171. * Check port cause register that all queues
  172. * are stopped
  173. */
  174. reg_data = readl(qreg);
  175. }
  176. while (reg_data & 0xFF);
  177. }
  178. }
  179. /*
  180. * set_access_control - Config address decode parameters for Ethernet unit
  181. *
  182. * This function configures the address decode parameters for the Gigabit
  183. * Ethernet Controller according the given parameters struct.
  184. *
  185. * @regs Register struct pointer.
  186. * @param Address decode parameter struct.
  187. */
  188. static void set_access_control(struct mvgbe_registers *regs,
  189. struct mvgbe_winparam *param)
  190. {
  191. u32 access_prot_reg;
  192. /* Set access control register */
  193. access_prot_reg = MVGBE_REG_RD(regs->epap);
  194. /* clear window permission */
  195. access_prot_reg &= (~(3 << (param->win * 2)));
  196. access_prot_reg |= (param->access_ctrl << (param->win * 2));
  197. MVGBE_REG_WR(regs->epap, access_prot_reg);
  198. /* Set window Size reg (SR) */
  199. MVGBE_REG_WR(regs->barsz[param->win].size,
  200. (((param->size / 0x10000) - 1) << 16));
  201. /* Set window Base address reg (BA) */
  202. MVGBE_REG_WR(regs->barsz[param->win].bar,
  203. (param->target | param->attrib | param->base_addr));
  204. /* High address remap reg (HARR) */
  205. if (param->win < 4)
  206. MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
  207. /* Base address enable reg (BARER) */
  208. if (param->enable == 1)
  209. MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
  210. else
  211. MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
  212. }
  213. static void set_dram_access(struct mvgbe_registers *regs)
  214. {
  215. struct mvgbe_winparam win_param;
  216. int i;
  217. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  218. /* Set access parameters for DRAM bank i */
  219. win_param.win = i; /* Use Ethernet window i */
  220. /* Window target - DDR */
  221. win_param.target = MVGBE_TARGET_DRAM;
  222. /* Enable full access */
  223. win_param.access_ctrl = EWIN_ACCESS_FULL;
  224. win_param.high_addr = 0;
  225. /* Get bank base and size */
  226. win_param.base_addr = gd->bd->bi_dram[i].start;
  227. win_param.size = gd->bd->bi_dram[i].size;
  228. if (win_param.size == 0)
  229. win_param.enable = 0;
  230. else
  231. win_param.enable = 1; /* Enable the access */
  232. /* Enable DRAM bank */
  233. switch (i) {
  234. case 0:
  235. win_param.attrib = EBAR_DRAM_CS0;
  236. break;
  237. case 1:
  238. win_param.attrib = EBAR_DRAM_CS1;
  239. break;
  240. case 2:
  241. win_param.attrib = EBAR_DRAM_CS2;
  242. break;
  243. case 3:
  244. win_param.attrib = EBAR_DRAM_CS3;
  245. break;
  246. default:
  247. /* invalid bank, disable access */
  248. win_param.enable = 0;
  249. win_param.attrib = 0;
  250. break;
  251. }
  252. /* Set the access control for address window(EPAPR) RD/WR */
  253. set_access_control(regs, &win_param);
  254. }
  255. }
  256. /*
  257. * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
  258. *
  259. * Go through all the DA filter tables (Unicast, Special Multicast & Other
  260. * Multicast) and set each entry to 0.
  261. */
  262. static void port_init_mac_tables(struct mvgbe_registers *regs)
  263. {
  264. int table_index;
  265. /* Clear DA filter unicast table (Ex_dFUT) */
  266. for (table_index = 0; table_index < 4; ++table_index)
  267. MVGBE_REG_WR(regs->dfut[table_index], 0);
  268. for (table_index = 0; table_index < 64; ++table_index) {
  269. /* Clear DA filter special multicast table (Ex_dFSMT) */
  270. MVGBE_REG_WR(regs->dfsmt[table_index], 0);
  271. /* Clear DA filter other multicast table (Ex_dFOMT) */
  272. MVGBE_REG_WR(regs->dfomt[table_index], 0);
  273. }
  274. }
  275. /*
  276. * port_uc_addr - This function Set the port unicast address table
  277. *
  278. * This function locates the proper entry in the Unicast table for the
  279. * specified MAC nibble and sets its properties according to function
  280. * parameters.
  281. * This function add/removes MAC addresses from the port unicast address
  282. * table.
  283. *
  284. * @uc_nibble Unicast MAC Address last nibble.
  285. * @option 0 = Add, 1 = remove address.
  286. *
  287. * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
  288. */
  289. static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
  290. int option)
  291. {
  292. u32 unicast_reg;
  293. u32 tbl_offset;
  294. u32 reg_offset;
  295. /* Locate the Unicast table entry */
  296. uc_nibble = (0xf & uc_nibble);
  297. /* Register offset from unicast table base */
  298. tbl_offset = (uc_nibble / 4);
  299. /* Entry offset within the above register */
  300. reg_offset = uc_nibble % 4;
  301. switch (option) {
  302. case REJECT_MAC_ADDR:
  303. /*
  304. * Clear accepts frame bit at specified unicast
  305. * DA table entry
  306. */
  307. unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
  308. unicast_reg &= (0xFF << (8 * reg_offset));
  309. MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
  310. break;
  311. case ACCEPT_MAC_ADDR:
  312. /* Set accepts frame bit at unicast DA filter table entry */
  313. unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
  314. unicast_reg &= (0xFF << (8 * reg_offset));
  315. unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
  316. MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
  317. break;
  318. default:
  319. return 0;
  320. }
  321. return 1;
  322. }
  323. /*
  324. * port_uc_addr_set - This function Set the port Unicast address.
  325. */
  326. static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
  327. {
  328. u32 mac_h;
  329. u32 mac_l;
  330. mac_l = (p_addr[4] << 8) | (p_addr[5]);
  331. mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
  332. (p_addr[3] << 0);
  333. MVGBE_REG_WR(regs->macal, mac_l);
  334. MVGBE_REG_WR(regs->macah, mac_h);
  335. /* Accept frames of this address */
  336. port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
  337. }
  338. /*
  339. * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
  340. */
  341. static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
  342. {
  343. struct mvgbe_rxdesc *p_rx_desc;
  344. int i;
  345. /* initialize the Rx descriptors ring */
  346. p_rx_desc = dmvgbe->p_rxdesc;
  347. for (i = 0; i < RINGSZ; i++) {
  348. p_rx_desc->cmd_sts =
  349. MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
  350. p_rx_desc->buf_size = PKTSIZE_ALIGN;
  351. p_rx_desc->byte_cnt = 0;
  352. p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
  353. if (i == (RINGSZ - 1))
  354. p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
  355. else {
  356. p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
  357. ((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
  358. p_rx_desc = p_rx_desc->nxtdesc_p;
  359. }
  360. }
  361. dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
  362. }
  363. static int mvgbe_init(struct eth_device *dev)
  364. {
  365. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  366. struct mvgbe_registers *regs = dmvgbe->regs;
  367. #if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
  368. && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
  369. int i;
  370. #endif
  371. /* setup RX rings */
  372. mvgbe_init_rx_desc_ring(dmvgbe);
  373. /* Clear the ethernet port interrupts */
  374. MVGBE_REG_WR(regs->ic, 0);
  375. MVGBE_REG_WR(regs->ice, 0);
  376. /* Unmask RX buffer and TX end interrupt */
  377. MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
  378. /* Unmask phy and link status changes interrupts */
  379. MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
  380. set_dram_access(regs);
  381. port_init_mac_tables(regs);
  382. port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
  383. /* Assign port configuration and command. */
  384. MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
  385. MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
  386. MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
  387. /* Assign port SDMA configuration */
  388. MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
  389. MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
  390. MVGBE_REG_WR(regs->tqx[0].tqxtbc,
  391. (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
  392. /* Turn off the port/RXUQ bandwidth limitation */
  393. MVGBE_REG_WR(regs->pmtu, 0);
  394. /* Set maximum receive buffer to 9700 bytes */
  395. MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
  396. | (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
  397. /* Enable port initially */
  398. MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
  399. /*
  400. * Set ethernet MTU for leaky bucket mechanism to 0 - this will
  401. * disable the leaky bucket mechanism .
  402. */
  403. MVGBE_REG_WR(regs->pmtu, 0);
  404. /* Assignment of Rx CRDB of given RXUQ */
  405. MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
  406. /* ensure previous write is done before enabling Rx DMA */
  407. isb();
  408. /* Enable port Rx. */
  409. MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
  410. #if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
  411. && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
  412. /* Wait up to 5s for the link status */
  413. for (i = 0; i < 5; i++) {
  414. u16 phyadr;
  415. miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
  416. MV_PHY_ADR_REQUEST, &phyadr);
  417. /* Return if we get link up */
  418. if (miiphy_link(dev->name, phyadr))
  419. return 0;
  420. udelay(1000000);
  421. }
  422. printf("No link on %s\n", dev->name);
  423. return -1;
  424. #endif
  425. return 0;
  426. }
  427. static int mvgbe_halt(struct eth_device *dev)
  428. {
  429. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  430. struct mvgbe_registers *regs = dmvgbe->regs;
  431. /* Disable all gigE address decoder */
  432. MVGBE_REG_WR(regs->bare, 0x3f);
  433. stop_queue(&regs->tqc);
  434. stop_queue(&regs->rqc);
  435. /* Disable port */
  436. MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
  437. /* Set port is not reset */
  438. MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
  439. #ifdef CONFIG_SYS_MII_MODE
  440. /* Set MMI interface up */
  441. MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
  442. #endif
  443. /* Disable & mask ethernet port interrupts */
  444. MVGBE_REG_WR(regs->ic, 0);
  445. MVGBE_REG_WR(regs->ice, 0);
  446. MVGBE_REG_WR(regs->pim, 0);
  447. MVGBE_REG_WR(regs->peim, 0);
  448. return 0;
  449. }
  450. static int mvgbe_write_hwaddr(struct eth_device *dev)
  451. {
  452. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  453. struct mvgbe_registers *regs = dmvgbe->regs;
  454. /* Programs net device MAC address after initialization */
  455. port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
  456. return 0;
  457. }
  458. static int mvgbe_send(struct eth_device *dev, void *dataptr,
  459. int datasize)
  460. {
  461. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  462. struct mvgbe_registers *regs = dmvgbe->regs;
  463. struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
  464. void *p = (void *)dataptr;
  465. u32 cmd_sts;
  466. /* Copy buffer if it's misaligned */
  467. if ((u32) dataptr & 0x07) {
  468. if (datasize > PKTSIZE_ALIGN) {
  469. printf("Non-aligned data too large (%d)\n",
  470. datasize);
  471. return -1;
  472. }
  473. memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
  474. p = dmvgbe->p_aligned_txbuf;
  475. }
  476. p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
  477. p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
  478. p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
  479. p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
  480. p_txdesc->buf_ptr = (u8 *) p;
  481. p_txdesc->byte_cnt = datasize;
  482. /* Set this tc desc as zeroth TXUQ */
  483. MVGBE_REG_WR(regs->tcqdp[TXUQ], (u32) p_txdesc);
  484. /* ensure tx desc writes above are performed before we start Tx DMA */
  485. isb();
  486. /* Apply send command using zeroth TXUQ */
  487. MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
  488. /*
  489. * wait for packet xmit completion
  490. */
  491. cmd_sts = readl(&p_txdesc->cmd_sts);
  492. while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
  493. /* return fail if error is detected */
  494. if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
  495. (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
  496. cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
  497. printf("Err..(%s) in xmit packet\n", __FUNCTION__);
  498. return -1;
  499. }
  500. cmd_sts = readl(&p_txdesc->cmd_sts);
  501. };
  502. return 0;
  503. }
  504. static int mvgbe_recv(struct eth_device *dev)
  505. {
  506. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  507. struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
  508. u32 cmd_sts;
  509. u32 timeout = 0;
  510. /* wait untill rx packet available or timeout */
  511. do {
  512. if (timeout < MVGBE_PHY_SMI_TIMEOUT)
  513. timeout++;
  514. else {
  515. debug("%s time out...\n", __FUNCTION__);
  516. return -1;
  517. }
  518. } while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
  519. if (p_rxdesc_curr->byte_cnt != 0) {
  520. debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
  521. __FUNCTION__, (u32) p_rxdesc_curr->byte_cnt,
  522. (u32) p_rxdesc_curr->buf_ptr,
  523. (u32) p_rxdesc_curr->cmd_sts);
  524. }
  525. /*
  526. * In case received a packet without first/last bits on
  527. * OR the error summary bit is on,
  528. * the packets needs to be dropeed.
  529. */
  530. cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
  531. if ((cmd_sts &
  532. (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
  533. != (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
  534. printf("Err..(%s) Dropping packet spread on"
  535. " multiple descriptors\n", __FUNCTION__);
  536. } else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
  537. printf("Err..(%s) Dropping packet with errors\n",
  538. __FUNCTION__);
  539. } else {
  540. /* !!! call higher layer processing */
  541. debug("%s: Sending Received packet to"
  542. " upper layer (NetReceive)\n", __FUNCTION__);
  543. /* let the upper layer handle the packet */
  544. NetReceive((p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET),
  545. (int)(p_rxdesc_curr->byte_cnt - RX_BUF_OFFSET));
  546. }
  547. /*
  548. * free these descriptors and point next in the ring
  549. */
  550. p_rxdesc_curr->cmd_sts =
  551. MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
  552. p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
  553. p_rxdesc_curr->byte_cnt = 0;
  554. writel((unsigned)p_rxdesc_curr->nxtdesc_p,
  555. (u32) &dmvgbe->p_rxdesc_curr);
  556. return 0;
  557. }
  558. int mvgbe_initialize(bd_t *bis)
  559. {
  560. struct mvgbe_device *dmvgbe;
  561. struct eth_device *dev;
  562. int devnum;
  563. char *s;
  564. u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
  565. for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
  566. /*skip if port is configured not to use */
  567. if (used_ports[devnum] == 0)
  568. continue;
  569. dmvgbe = malloc(sizeof(struct mvgbe_device));
  570. if (!dmvgbe)
  571. goto error1;
  572. memset(dmvgbe, 0, sizeof(struct mvgbe_device));
  573. dmvgbe->p_rxdesc =
  574. (struct mvgbe_rxdesc *)memalign(PKTALIGN,
  575. MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
  576. if (!dmvgbe->p_rxdesc)
  577. goto error2;
  578. dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
  579. RINGSZ*PKTSIZE_ALIGN + 1);
  580. if (!dmvgbe->p_rxbuf)
  581. goto error3;
  582. dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
  583. if (!dmvgbe->p_aligned_txbuf)
  584. goto error4;
  585. dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
  586. PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
  587. if (!dmvgbe->p_txdesc) {
  588. free(dmvgbe->p_aligned_txbuf);
  589. error4:
  590. free(dmvgbe->p_rxbuf);
  591. error3:
  592. free(dmvgbe->p_rxdesc);
  593. error2:
  594. free(dmvgbe);
  595. error1:
  596. printf("Err.. %s Failed to allocate memory\n",
  597. __FUNCTION__);
  598. return -1;
  599. }
  600. dev = &dmvgbe->dev;
  601. /* must be less than NAMESIZE (16) */
  602. sprintf(dev->name, "egiga%d", devnum);
  603. /* Extract the MAC address from the environment */
  604. switch (devnum) {
  605. case 0:
  606. dmvgbe->regs = (void *)MVGBE0_BASE;
  607. s = "ethaddr";
  608. break;
  609. #if defined(MVGBE1_BASE)
  610. case 1:
  611. dmvgbe->regs = (void *)MVGBE1_BASE;
  612. s = "eth1addr";
  613. break;
  614. #endif
  615. default: /* this should never happen */
  616. printf("Err..(%s) Invalid device number %d\n",
  617. __FUNCTION__, devnum);
  618. return -1;
  619. }
  620. while (!eth_getenv_enetaddr(s, dev->enetaddr)) {
  621. /* Generate Private MAC addr if not set */
  622. dev->enetaddr[0] = 0x02;
  623. dev->enetaddr[1] = 0x50;
  624. dev->enetaddr[2] = 0x43;
  625. #if defined (CONFIG_SKIP_LOCAL_MAC_RANDOMIZATION)
  626. /* Generate fixed lower MAC half using devnum */
  627. dev->enetaddr[3] = 0;
  628. dev->enetaddr[4] = 0;
  629. dev->enetaddr[5] = devnum;
  630. #else
  631. /* Generate random lower MAC half */
  632. dev->enetaddr[3] = get_random_hex();
  633. dev->enetaddr[4] = get_random_hex();
  634. dev->enetaddr[5] = get_random_hex();
  635. #endif
  636. eth_setenv_enetaddr(s, dev->enetaddr);
  637. }
  638. dev->init = (void *)mvgbe_init;
  639. dev->halt = (void *)mvgbe_halt;
  640. dev->send = (void *)mvgbe_send;
  641. dev->recv = (void *)mvgbe_recv;
  642. dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
  643. eth_register(dev);
  644. #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
  645. miiphy_register(dev->name, smi_reg_read, smi_reg_write);
  646. /* Set phy address of the port */
  647. miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
  648. MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
  649. #endif
  650. }
  651. return 0;
  652. }