cpsw.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. /*
  2. * CPSW Ethernet Switch Driver
  3. *
  4. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <common.h>
  16. #include <command.h>
  17. #include <net.h>
  18. #include <miiphy.h>
  19. #include <malloc.h>
  20. #include <net.h>
  21. #include <netdev.h>
  22. #include <cpsw.h>
  23. #include <asm/errno.h>
  24. #include <asm/io.h>
  25. #include <phy.h>
  26. #define BITMASK(bits) (BIT(bits) - 1)
  27. #define PHY_REG_MASK 0x1f
  28. #define PHY_ID_MASK 0x1f
  29. #define NUM_DESCS (PKTBUFSRX * 2)
  30. #define PKT_MIN 60
  31. #define PKT_MAX (1500 + 14 + 4 + 4)
  32. #define CLEAR_BIT 1
  33. #define GIGABITEN BIT(7)
  34. #define FULLDUPLEXEN BIT(0)
  35. #define MIIEN BIT(15)
  36. /* DMA Registers */
  37. #define CPDMA_TXCONTROL 0x004
  38. #define CPDMA_RXCONTROL 0x014
  39. #define CPDMA_SOFTRESET 0x01c
  40. #define CPDMA_RXFREE 0x0e0
  41. #define CPDMA_TXHDP_VER1 0x100
  42. #define CPDMA_TXHDP_VER2 0x200
  43. #define CPDMA_RXHDP_VER1 0x120
  44. #define CPDMA_RXHDP_VER2 0x220
  45. #define CPDMA_TXCP_VER1 0x140
  46. #define CPDMA_TXCP_VER2 0x240
  47. #define CPDMA_RXCP_VER1 0x160
  48. #define CPDMA_RXCP_VER2 0x260
  49. #define CPDMA_RAM_ADDR 0x4a102000
  50. /* Descriptor mode bits */
  51. #define CPDMA_DESC_SOP BIT(31)
  52. #define CPDMA_DESC_EOP BIT(30)
  53. #define CPDMA_DESC_OWNER BIT(29)
  54. #define CPDMA_DESC_EOQ BIT(28)
  55. /*
  56. * This timeout definition is a worst-case ultra defensive measure against
  57. * unexpected controller lock ups. Ideally, we should never ever hit this
  58. * scenario in practice.
  59. */
  60. #define MDIO_TIMEOUT 100 /* msecs */
  61. #define CPDMA_TIMEOUT 100 /* msecs */
  62. struct cpsw_mdio_regs {
  63. u32 version;
  64. u32 control;
  65. #define CONTROL_IDLE BIT(31)
  66. #define CONTROL_ENABLE BIT(30)
  67. u32 alive;
  68. u32 link;
  69. u32 linkintraw;
  70. u32 linkintmasked;
  71. u32 __reserved_0[2];
  72. u32 userintraw;
  73. u32 userintmasked;
  74. u32 userintmaskset;
  75. u32 userintmaskclr;
  76. u32 __reserved_1[20];
  77. struct {
  78. u32 access;
  79. u32 physel;
  80. #define USERACCESS_GO BIT(31)
  81. #define USERACCESS_WRITE BIT(30)
  82. #define USERACCESS_ACK BIT(29)
  83. #define USERACCESS_READ (0)
  84. #define USERACCESS_DATA (0xffff)
  85. } user[0];
  86. };
  87. struct cpsw_regs {
  88. u32 id_ver;
  89. u32 control;
  90. u32 soft_reset;
  91. u32 stat_port_en;
  92. u32 ptype;
  93. };
  94. struct cpsw_slave_regs {
  95. u32 max_blks;
  96. u32 blk_cnt;
  97. u32 flow_thresh;
  98. u32 port_vlan;
  99. u32 tx_pri_map;
  100. u32 gap_thresh;
  101. u32 sa_lo;
  102. u32 sa_hi;
  103. };
  104. struct cpsw_host_regs {
  105. u32 max_blks;
  106. u32 blk_cnt;
  107. u32 flow_thresh;
  108. u32 port_vlan;
  109. u32 tx_pri_map;
  110. u32 cpdma_tx_pri_map;
  111. u32 cpdma_rx_chan_map;
  112. };
  113. struct cpsw_sliver_regs {
  114. u32 id_ver;
  115. u32 mac_control;
  116. u32 mac_status;
  117. u32 soft_reset;
  118. u32 rx_maxlen;
  119. u32 __reserved_0;
  120. u32 rx_pause;
  121. u32 tx_pause;
  122. u32 __reserved_1;
  123. u32 rx_pri_map;
  124. };
  125. #define ALE_ENTRY_BITS 68
  126. #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
  127. /* ALE Registers */
  128. #define ALE_CONTROL 0x08
  129. #define ALE_UNKNOWNVLAN 0x18
  130. #define ALE_TABLE_CONTROL 0x20
  131. #define ALE_TABLE 0x34
  132. #define ALE_PORTCTL 0x40
  133. #define ALE_TABLE_WRITE BIT(31)
  134. #define ALE_TYPE_FREE 0
  135. #define ALE_TYPE_ADDR 1
  136. #define ALE_TYPE_VLAN 2
  137. #define ALE_TYPE_VLAN_ADDR 3
  138. #define ALE_UCAST_PERSISTANT 0
  139. #define ALE_UCAST_UNTOUCHED 1
  140. #define ALE_UCAST_OUI 2
  141. #define ALE_UCAST_TOUCHED 3
  142. #define ALE_MCAST_FWD 0
  143. #define ALE_MCAST_BLOCK_LEARN_FWD 1
  144. #define ALE_MCAST_FWD_LEARN 2
  145. #define ALE_MCAST_FWD_2 3
  146. enum cpsw_ale_port_state {
  147. ALE_PORT_STATE_DISABLE = 0x00,
  148. ALE_PORT_STATE_BLOCK = 0x01,
  149. ALE_PORT_STATE_LEARN = 0x02,
  150. ALE_PORT_STATE_FORWARD = 0x03,
  151. };
  152. /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
  153. #define ALE_SECURE 1
  154. #define ALE_BLOCKED 2
  155. struct cpsw_slave {
  156. struct cpsw_slave_regs *regs;
  157. struct cpsw_sliver_regs *sliver;
  158. int slave_num;
  159. u32 mac_control;
  160. struct cpsw_slave_data *data;
  161. };
  162. struct cpdma_desc {
  163. /* hardware fields */
  164. u32 hw_next;
  165. u32 hw_buffer;
  166. u32 hw_len;
  167. u32 hw_mode;
  168. /* software fields */
  169. u32 sw_buffer;
  170. u32 sw_len;
  171. };
  172. struct cpdma_chan {
  173. struct cpdma_desc *head, *tail;
  174. void *hdp, *cp, *rxfree;
  175. };
  176. #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
  177. #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
  178. #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
  179. #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
  180. #define chan_read(chan, fld) __raw_readl((chan)->fld)
  181. #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
  182. #define for_each_slave(slave, priv) \
  183. for (slave = (priv)->slaves; slave != (priv)->slaves + \
  184. (priv)->data.slaves; slave++)
  185. struct cpsw_priv {
  186. struct eth_device *dev;
  187. struct cpsw_platform_data data;
  188. int host_port;
  189. struct cpsw_regs *regs;
  190. void *dma_regs;
  191. struct cpsw_host_regs *host_port_regs;
  192. void *ale_regs;
  193. struct cpdma_desc *descs;
  194. struct cpdma_desc *desc_free;
  195. struct cpdma_chan rx_chan, tx_chan;
  196. struct cpsw_slave *slaves;
  197. struct phy_device *phydev;
  198. struct mii_dev *bus;
  199. };
  200. static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
  201. {
  202. int idx;
  203. idx = start / 32;
  204. start -= idx * 32;
  205. idx = 2 - idx; /* flip */
  206. return (ale_entry[idx] >> start) & BITMASK(bits);
  207. }
  208. static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
  209. u32 value)
  210. {
  211. int idx;
  212. value &= BITMASK(bits);
  213. idx = start / 32;
  214. start -= idx * 32;
  215. idx = 2 - idx; /* flip */
  216. ale_entry[idx] &= ~(BITMASK(bits) << start);
  217. ale_entry[idx] |= (value << start);
  218. }
  219. #define DEFINE_ALE_FIELD(name, start, bits) \
  220. static inline int cpsw_ale_get_##name(u32 *ale_entry) \
  221. { \
  222. return cpsw_ale_get_field(ale_entry, start, bits); \
  223. } \
  224. static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
  225. { \
  226. cpsw_ale_set_field(ale_entry, start, bits, value); \
  227. }
  228. DEFINE_ALE_FIELD(entry_type, 60, 2)
  229. DEFINE_ALE_FIELD(mcast_state, 62, 2)
  230. DEFINE_ALE_FIELD(port_mask, 66, 3)
  231. DEFINE_ALE_FIELD(ucast_type, 62, 2)
  232. DEFINE_ALE_FIELD(port_num, 66, 2)
  233. DEFINE_ALE_FIELD(blocked, 65, 1)
  234. DEFINE_ALE_FIELD(secure, 64, 1)
  235. DEFINE_ALE_FIELD(mcast, 40, 1)
  236. /* The MAC address field in the ALE entry cannot be macroized as above */
  237. static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
  238. {
  239. int i;
  240. for (i = 0; i < 6; i++)
  241. addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
  242. }
  243. static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
  244. {
  245. int i;
  246. for (i = 0; i < 6; i++)
  247. cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
  248. }
  249. static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  250. {
  251. int i;
  252. __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
  253. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  254. ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
  255. return idx;
  256. }
  257. static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  258. {
  259. int i;
  260. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  261. __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
  262. __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
  263. return idx;
  264. }
  265. static int cpsw_ale_match_addr(struct cpsw_priv *priv, u8* addr)
  266. {
  267. u32 ale_entry[ALE_ENTRY_WORDS];
  268. int type, idx;
  269. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  270. u8 entry_addr[6];
  271. cpsw_ale_read(priv, idx, ale_entry);
  272. type = cpsw_ale_get_entry_type(ale_entry);
  273. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  274. continue;
  275. cpsw_ale_get_addr(ale_entry, entry_addr);
  276. if (memcmp(entry_addr, addr, 6) == 0)
  277. return idx;
  278. }
  279. return -ENOENT;
  280. }
  281. static int cpsw_ale_match_free(struct cpsw_priv *priv)
  282. {
  283. u32 ale_entry[ALE_ENTRY_WORDS];
  284. int type, idx;
  285. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  286. cpsw_ale_read(priv, idx, ale_entry);
  287. type = cpsw_ale_get_entry_type(ale_entry);
  288. if (type == ALE_TYPE_FREE)
  289. return idx;
  290. }
  291. return -ENOENT;
  292. }
  293. static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
  294. {
  295. u32 ale_entry[ALE_ENTRY_WORDS];
  296. int type, idx;
  297. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  298. cpsw_ale_read(priv, idx, ale_entry);
  299. type = cpsw_ale_get_entry_type(ale_entry);
  300. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  301. continue;
  302. if (cpsw_ale_get_mcast(ale_entry))
  303. continue;
  304. type = cpsw_ale_get_ucast_type(ale_entry);
  305. if (type != ALE_UCAST_PERSISTANT &&
  306. type != ALE_UCAST_OUI)
  307. return idx;
  308. }
  309. return -ENOENT;
  310. }
  311. static int cpsw_ale_add_ucast(struct cpsw_priv *priv, u8 *addr,
  312. int port, int flags)
  313. {
  314. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  315. int idx;
  316. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  317. cpsw_ale_set_addr(ale_entry, addr);
  318. cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
  319. cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
  320. cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
  321. cpsw_ale_set_port_num(ale_entry, port);
  322. idx = cpsw_ale_match_addr(priv, addr);
  323. if (idx < 0)
  324. idx = cpsw_ale_match_free(priv);
  325. if (idx < 0)
  326. idx = cpsw_ale_find_ageable(priv);
  327. if (idx < 0)
  328. return -ENOMEM;
  329. cpsw_ale_write(priv, idx, ale_entry);
  330. return 0;
  331. }
  332. static int cpsw_ale_add_mcast(struct cpsw_priv *priv, u8 *addr, int port_mask)
  333. {
  334. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  335. int idx, mask;
  336. idx = cpsw_ale_match_addr(priv, addr);
  337. if (idx >= 0)
  338. cpsw_ale_read(priv, idx, ale_entry);
  339. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  340. cpsw_ale_set_addr(ale_entry, addr);
  341. cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
  342. mask = cpsw_ale_get_port_mask(ale_entry);
  343. port_mask |= mask;
  344. cpsw_ale_set_port_mask(ale_entry, port_mask);
  345. if (idx < 0)
  346. idx = cpsw_ale_match_free(priv);
  347. if (idx < 0)
  348. idx = cpsw_ale_find_ageable(priv);
  349. if (idx < 0)
  350. return -ENOMEM;
  351. cpsw_ale_write(priv, idx, ale_entry);
  352. return 0;
  353. }
  354. static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
  355. {
  356. u32 tmp, mask = BIT(bit);
  357. tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
  358. tmp &= ~mask;
  359. tmp |= val ? mask : 0;
  360. __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
  361. }
  362. #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
  363. #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
  364. #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
  365. static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
  366. int val)
  367. {
  368. int offset = ALE_PORTCTL + 4 * port;
  369. u32 tmp, mask = 0x3;
  370. tmp = __raw_readl(priv->ale_regs + offset);
  371. tmp &= ~mask;
  372. tmp |= val & mask;
  373. __raw_writel(tmp, priv->ale_regs + offset);
  374. }
  375. static struct cpsw_mdio_regs *mdio_regs;
  376. /* wait until hardware is ready for another user access */
  377. static inline u32 wait_for_user_access(void)
  378. {
  379. u32 reg = 0;
  380. int timeout = MDIO_TIMEOUT;
  381. while (timeout-- &&
  382. ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
  383. udelay(10);
  384. if (timeout == -1) {
  385. printf("wait_for_user_access Timeout\n");
  386. return -ETIMEDOUT;
  387. }
  388. return reg;
  389. }
  390. /* wait until hardware state machine is idle */
  391. static inline void wait_for_idle(void)
  392. {
  393. int timeout = MDIO_TIMEOUT;
  394. while (timeout-- &&
  395. ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
  396. udelay(10);
  397. if (timeout == -1)
  398. printf("wait_for_idle Timeout\n");
  399. }
  400. static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
  401. int dev_addr, int phy_reg)
  402. {
  403. unsigned short data;
  404. u32 reg;
  405. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  406. return -EINVAL;
  407. wait_for_user_access();
  408. reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
  409. (phy_id << 16));
  410. __raw_writel(reg, &mdio_regs->user[0].access);
  411. reg = wait_for_user_access();
  412. data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
  413. return data;
  414. }
  415. static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
  416. int phy_reg, u16 data)
  417. {
  418. u32 reg;
  419. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  420. return -EINVAL;
  421. wait_for_user_access();
  422. reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
  423. (phy_id << 16) | (data & USERACCESS_DATA));
  424. __raw_writel(reg, &mdio_regs->user[0].access);
  425. wait_for_user_access();
  426. return 0;
  427. }
  428. static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div)
  429. {
  430. struct mii_dev *bus = mdio_alloc();
  431. mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
  432. /* set enable and clock divider */
  433. __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
  434. /*
  435. * wait for scan logic to settle:
  436. * the scan time consists of (a) a large fixed component, and (b) a
  437. * small component that varies with the mii bus frequency. These
  438. * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
  439. * silicon. Since the effect of (b) was found to be largely
  440. * negligible, we keep things simple here.
  441. */
  442. udelay(1000);
  443. bus->read = cpsw_mdio_read;
  444. bus->write = cpsw_mdio_write;
  445. sprintf(bus->name, name);
  446. mdio_register(bus);
  447. }
  448. /* Set a self-clearing bit in a register, and wait for it to clear */
  449. static inline void setbit_and_wait_for_clear32(void *addr)
  450. {
  451. __raw_writel(CLEAR_BIT, addr);
  452. while (__raw_readl(addr) & CLEAR_BIT)
  453. ;
  454. }
  455. #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
  456. ((mac)[2] << 16) | ((mac)[3] << 24))
  457. #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
  458. static void cpsw_set_slave_mac(struct cpsw_slave *slave,
  459. struct cpsw_priv *priv)
  460. {
  461. __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
  462. __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
  463. }
  464. static void cpsw_slave_update_link(struct cpsw_slave *slave,
  465. struct cpsw_priv *priv, int *link)
  466. {
  467. struct phy_device *phy = priv->phydev;
  468. u32 mac_control = 0;
  469. phy_startup(phy);
  470. *link = phy->link;
  471. if (*link) { /* link up */
  472. mac_control = priv->data.mac_control;
  473. if (phy->speed == 1000)
  474. mac_control |= GIGABITEN;
  475. if (phy->duplex == DUPLEX_FULL)
  476. mac_control |= FULLDUPLEXEN;
  477. if (phy->speed == 100)
  478. mac_control |= MIIEN;
  479. }
  480. if (mac_control == slave->mac_control)
  481. return;
  482. if (mac_control) {
  483. printf("link up on port %d, speed %d, %s duplex\n",
  484. slave->slave_num, phy->speed,
  485. (phy->duplex == DUPLEX_FULL) ? "full" : "half");
  486. } else {
  487. printf("link down on port %d\n", slave->slave_num);
  488. }
  489. __raw_writel(mac_control, &slave->sliver->mac_control);
  490. slave->mac_control = mac_control;
  491. }
  492. static int cpsw_update_link(struct cpsw_priv *priv)
  493. {
  494. int link = 0;
  495. struct cpsw_slave *slave;
  496. for_each_slave(slave, priv)
  497. cpsw_slave_update_link(slave, priv, &link);
  498. return link;
  499. }
  500. static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
  501. {
  502. if (priv->host_port == 0)
  503. return slave_num + 1;
  504. else
  505. return slave_num;
  506. }
  507. static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
  508. {
  509. u32 slave_port;
  510. setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
  511. /* setup priority mapping */
  512. __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
  513. __raw_writel(0x33221100, &slave->regs->tx_pri_map);
  514. /* setup max packet size, and mac address */
  515. __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
  516. cpsw_set_slave_mac(slave, priv);
  517. slave->mac_control = 0; /* no link yet */
  518. /* enable forwarding */
  519. slave_port = cpsw_get_slave_port(priv, slave->slave_num);
  520. cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
  521. cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << slave_port);
  522. }
  523. static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
  524. {
  525. struct cpdma_desc *desc = priv->desc_free;
  526. if (desc)
  527. priv->desc_free = desc_read_ptr(desc, hw_next);
  528. return desc;
  529. }
  530. static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
  531. {
  532. if (desc) {
  533. desc_write(desc, hw_next, priv->desc_free);
  534. priv->desc_free = desc;
  535. }
  536. }
  537. static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
  538. void *buffer, int len)
  539. {
  540. struct cpdma_desc *desc, *prev;
  541. u32 mode;
  542. desc = cpdma_desc_alloc(priv);
  543. if (!desc)
  544. return -ENOMEM;
  545. if (len < PKT_MIN)
  546. len = PKT_MIN;
  547. mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  548. desc_write(desc, hw_next, 0);
  549. desc_write(desc, hw_buffer, buffer);
  550. desc_write(desc, hw_len, len);
  551. desc_write(desc, hw_mode, mode | len);
  552. desc_write(desc, sw_buffer, buffer);
  553. desc_write(desc, sw_len, len);
  554. if (!chan->head) {
  555. /* simple case - first packet enqueued */
  556. chan->head = desc;
  557. chan->tail = desc;
  558. chan_write(chan, hdp, desc);
  559. goto done;
  560. }
  561. /* not the first packet - enqueue at the tail */
  562. prev = chan->tail;
  563. desc_write(prev, hw_next, desc);
  564. chan->tail = desc;
  565. /* next check if EOQ has been triggered already */
  566. if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
  567. chan_write(chan, hdp, desc);
  568. done:
  569. if (chan->rxfree)
  570. chan_write(chan, rxfree, 1);
  571. return 0;
  572. }
  573. static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
  574. void **buffer, int *len)
  575. {
  576. struct cpdma_desc *desc = chan->head;
  577. u32 status;
  578. if (!desc)
  579. return -ENOENT;
  580. status = desc_read(desc, hw_mode);
  581. if (len)
  582. *len = status & 0x7ff;
  583. if (buffer)
  584. *buffer = desc_read_ptr(desc, sw_buffer);
  585. if (status & CPDMA_DESC_OWNER) {
  586. if (chan_read(chan, hdp) == 0) {
  587. if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
  588. chan_write(chan, hdp, desc);
  589. }
  590. return -EBUSY;
  591. }
  592. chan->head = desc_read_ptr(desc, hw_next);
  593. chan_write(chan, cp, desc);
  594. cpdma_desc_free(priv, desc);
  595. return 0;
  596. }
  597. static int cpsw_init(struct eth_device *dev, bd_t *bis)
  598. {
  599. struct cpsw_priv *priv = dev->priv;
  600. struct cpsw_slave *slave;
  601. int i, ret;
  602. /* soft reset the controller and initialize priv */
  603. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  604. /* initialize and reset the address lookup engine */
  605. cpsw_ale_enable(priv, 1);
  606. cpsw_ale_clear(priv, 1);
  607. cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
  608. /* setup host port priority mapping */
  609. __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
  610. __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
  611. /* disable priority elevation and enable statistics on all ports */
  612. __raw_writel(0, &priv->regs->ptype);
  613. /* enable statistics collection only on the host port */
  614. __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
  615. cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
  616. cpsw_ale_add_ucast(priv, priv->dev->enetaddr, priv->host_port,
  617. ALE_SECURE);
  618. cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << priv->host_port);
  619. for_each_slave(slave, priv)
  620. cpsw_slave_init(slave, priv);
  621. cpsw_update_link(priv);
  622. /* init descriptor pool */
  623. for (i = 0; i < NUM_DESCS; i++) {
  624. desc_write(&priv->descs[i], hw_next,
  625. (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
  626. }
  627. priv->desc_free = &priv->descs[0];
  628. /* initialize channels */
  629. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  630. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  631. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
  632. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
  633. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  634. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  635. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
  636. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
  637. } else {
  638. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  639. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
  640. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
  641. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  642. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  643. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
  644. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
  645. }
  646. /* clear dma state */
  647. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  648. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  649. for (i = 0; i < priv->data.channels; i++) {
  650. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
  651. * i);
  652. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  653. * i);
  654. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
  655. * i);
  656. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
  657. * i);
  658. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
  659. * i);
  660. }
  661. } else {
  662. for (i = 0; i < priv->data.channels; i++) {
  663. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
  664. * i);
  665. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  666. * i);
  667. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
  668. * i);
  669. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
  670. * i);
  671. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
  672. * i);
  673. }
  674. }
  675. __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
  676. __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
  677. /* submit rx descs */
  678. for (i = 0; i < PKTBUFSRX; i++) {
  679. ret = cpdma_submit(priv, &priv->rx_chan, NetRxPackets[i],
  680. PKTSIZE);
  681. if (ret < 0) {
  682. printf("error %d submitting rx desc\n", ret);
  683. break;
  684. }
  685. }
  686. return 0;
  687. }
  688. static void cpsw_halt(struct eth_device *dev)
  689. {
  690. struct cpsw_priv *priv = dev->priv;
  691. writel(0, priv->dma_regs + CPDMA_TXCONTROL);
  692. writel(0, priv->dma_regs + CPDMA_RXCONTROL);
  693. /* soft reset the controller and initialize priv */
  694. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  695. /* clear dma state */
  696. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  697. priv->data.control(0);
  698. }
  699. static int cpsw_send(struct eth_device *dev, void *packet, int length)
  700. {
  701. struct cpsw_priv *priv = dev->priv;
  702. void *buffer;
  703. int len;
  704. int timeout = CPDMA_TIMEOUT;
  705. if (!cpsw_update_link(priv))
  706. return -EIO;
  707. flush_dcache_range((unsigned long)packet,
  708. (unsigned long)packet + length);
  709. /* first reap completed packets */
  710. while (timeout-- &&
  711. (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
  712. ;
  713. if (timeout == -1) {
  714. printf("cpdma_process timeout\n");
  715. return -ETIMEDOUT;
  716. }
  717. return cpdma_submit(priv, &priv->tx_chan, packet, length);
  718. }
  719. static int cpsw_recv(struct eth_device *dev)
  720. {
  721. struct cpsw_priv *priv = dev->priv;
  722. void *buffer;
  723. int len;
  724. cpsw_update_link(priv);
  725. while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) >= 0) {
  726. invalidate_dcache_range((unsigned long)buffer,
  727. (unsigned long)buffer + PKTSIZE_ALIGN);
  728. NetReceive(buffer, len);
  729. cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE);
  730. }
  731. return 0;
  732. }
  733. static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
  734. struct cpsw_priv *priv)
  735. {
  736. void *regs = priv->regs;
  737. struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
  738. slave->slave_num = slave_num;
  739. slave->data = data;
  740. slave->regs = regs + data->slave_reg_ofs;
  741. slave->sliver = regs + data->sliver_reg_ofs;
  742. }
  743. static int cpsw_phy_init(struct eth_device *dev, struct cpsw_slave *slave)
  744. {
  745. struct cpsw_priv *priv = (struct cpsw_priv *)dev->priv;
  746. struct phy_device *phydev;
  747. u32 supported = (SUPPORTED_10baseT_Half |
  748. SUPPORTED_10baseT_Full |
  749. SUPPORTED_100baseT_Half |
  750. SUPPORTED_100baseT_Full |
  751. SUPPORTED_1000baseT_Full);
  752. phydev = phy_connect(priv->bus,
  753. CONFIG_PHY_ADDR,
  754. dev,
  755. slave->data->phy_if);
  756. phydev->supported &= supported;
  757. phydev->advertising = phydev->supported;
  758. priv->phydev = phydev;
  759. phy_config(phydev);
  760. return 1;
  761. }
  762. int cpsw_register(struct cpsw_platform_data *data)
  763. {
  764. struct cpsw_priv *priv;
  765. struct cpsw_slave *slave;
  766. void *regs = (void *)data->cpsw_base;
  767. struct eth_device *dev;
  768. dev = calloc(sizeof(*dev), 1);
  769. if (!dev)
  770. return -ENOMEM;
  771. priv = calloc(sizeof(*priv), 1);
  772. if (!priv) {
  773. free(dev);
  774. return -ENOMEM;
  775. }
  776. priv->data = *data;
  777. priv->dev = dev;
  778. priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
  779. if (!priv->slaves) {
  780. free(dev);
  781. free(priv);
  782. return -ENOMEM;
  783. }
  784. priv->descs = (void *)CPDMA_RAM_ADDR;
  785. priv->host_port = data->host_port_num;
  786. priv->regs = regs;
  787. priv->host_port_regs = regs + data->host_port_reg_ofs;
  788. priv->dma_regs = regs + data->cpdma_reg_ofs;
  789. priv->ale_regs = regs + data->ale_reg_ofs;
  790. int idx = 0;
  791. for_each_slave(slave, priv) {
  792. cpsw_slave_setup(slave, idx, priv);
  793. idx = idx + 1;
  794. }
  795. strcpy(dev->name, "cpsw");
  796. dev->iobase = 0;
  797. dev->init = cpsw_init;
  798. dev->halt = cpsw_halt;
  799. dev->send = cpsw_send;
  800. dev->recv = cpsw_recv;
  801. dev->priv = priv;
  802. eth_register(dev);
  803. cpsw_mdio_init(dev->name, data->mdio_base, data->mdio_div);
  804. priv->bus = miiphy_get_dev_by_name(dev->name);
  805. for_each_slave(slave, priv)
  806. cpsw_phy_init(dev, slave);
  807. return 1;
  808. }