cpsw.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /*
  2. * CPSW Ethernet Switch Driver
  3. *
  4. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <common.h>
  16. #include <command.h>
  17. #include <net.h>
  18. #include <miiphy.h>
  19. #include <malloc.h>
  20. #include <net.h>
  21. #include <netdev.h>
  22. #include <cpsw.h>
  23. #include <asm/errno.h>
  24. #include <asm/io.h>
  25. #include <phy.h>
  26. #include <asm/arch/cpu.h>
  27. #define BITMASK(bits) (BIT(bits) - 1)
  28. #define PHY_REG_MASK 0x1f
  29. #define PHY_ID_MASK 0x1f
  30. #define NUM_DESCS (PKTBUFSRX * 2)
  31. #define PKT_MIN 60
  32. #define PKT_MAX (1500 + 14 + 4 + 4)
  33. #define CLEAR_BIT 1
  34. #define GIGABITEN BIT(7)
  35. #define FULLDUPLEXEN BIT(0)
  36. #define MIIEN BIT(15)
  37. /* DMA Registers */
  38. #define CPDMA_TXCONTROL 0x004
  39. #define CPDMA_RXCONTROL 0x014
  40. #define CPDMA_SOFTRESET 0x01c
  41. #define CPDMA_RXFREE 0x0e0
  42. #define CPDMA_TXHDP_VER1 0x100
  43. #define CPDMA_TXHDP_VER2 0x200
  44. #define CPDMA_RXHDP_VER1 0x120
  45. #define CPDMA_RXHDP_VER2 0x220
  46. #define CPDMA_TXCP_VER1 0x140
  47. #define CPDMA_TXCP_VER2 0x240
  48. #define CPDMA_RXCP_VER1 0x160
  49. #define CPDMA_RXCP_VER2 0x260
  50. #define CPDMA_RAM_ADDR 0x4a102000
  51. /* Descriptor mode bits */
  52. #define CPDMA_DESC_SOP BIT(31)
  53. #define CPDMA_DESC_EOP BIT(30)
  54. #define CPDMA_DESC_OWNER BIT(29)
  55. #define CPDMA_DESC_EOQ BIT(28)
  56. /*
  57. * This timeout definition is a worst-case ultra defensive measure against
  58. * unexpected controller lock ups. Ideally, we should never ever hit this
  59. * scenario in practice.
  60. */
  61. #define MDIO_TIMEOUT 100 /* msecs */
  62. #define CPDMA_TIMEOUT 100 /* msecs */
  63. struct cpsw_mdio_regs {
  64. u32 version;
  65. u32 control;
  66. #define CONTROL_IDLE BIT(31)
  67. #define CONTROL_ENABLE BIT(30)
  68. u32 alive;
  69. u32 link;
  70. u32 linkintraw;
  71. u32 linkintmasked;
  72. u32 __reserved_0[2];
  73. u32 userintraw;
  74. u32 userintmasked;
  75. u32 userintmaskset;
  76. u32 userintmaskclr;
  77. u32 __reserved_1[20];
  78. struct {
  79. u32 access;
  80. u32 physel;
  81. #define USERACCESS_GO BIT(31)
  82. #define USERACCESS_WRITE BIT(30)
  83. #define USERACCESS_ACK BIT(29)
  84. #define USERACCESS_READ (0)
  85. #define USERACCESS_DATA (0xffff)
  86. } user[0];
  87. };
  88. struct cpsw_regs {
  89. u32 id_ver;
  90. u32 control;
  91. u32 soft_reset;
  92. u32 stat_port_en;
  93. u32 ptype;
  94. };
  95. struct cpsw_slave_regs {
  96. u32 max_blks;
  97. u32 blk_cnt;
  98. u32 flow_thresh;
  99. u32 port_vlan;
  100. u32 tx_pri_map;
  101. u32 gap_thresh;
  102. u32 sa_lo;
  103. u32 sa_hi;
  104. };
  105. struct cpsw_host_regs {
  106. u32 max_blks;
  107. u32 blk_cnt;
  108. u32 flow_thresh;
  109. u32 port_vlan;
  110. u32 tx_pri_map;
  111. u32 cpdma_tx_pri_map;
  112. u32 cpdma_rx_chan_map;
  113. };
  114. struct cpsw_sliver_regs {
  115. u32 id_ver;
  116. u32 mac_control;
  117. u32 mac_status;
  118. u32 soft_reset;
  119. u32 rx_maxlen;
  120. u32 __reserved_0;
  121. u32 rx_pause;
  122. u32 tx_pause;
  123. u32 __reserved_1;
  124. u32 rx_pri_map;
  125. };
  126. #define ALE_ENTRY_BITS 68
  127. #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
  128. /* ALE Registers */
  129. #define ALE_CONTROL 0x08
  130. #define ALE_UNKNOWNVLAN 0x18
  131. #define ALE_TABLE_CONTROL 0x20
  132. #define ALE_TABLE 0x34
  133. #define ALE_PORTCTL 0x40
  134. #define ALE_TABLE_WRITE BIT(31)
  135. #define ALE_TYPE_FREE 0
  136. #define ALE_TYPE_ADDR 1
  137. #define ALE_TYPE_VLAN 2
  138. #define ALE_TYPE_VLAN_ADDR 3
  139. #define ALE_UCAST_PERSISTANT 0
  140. #define ALE_UCAST_UNTOUCHED 1
  141. #define ALE_UCAST_OUI 2
  142. #define ALE_UCAST_TOUCHED 3
  143. #define ALE_MCAST_FWD 0
  144. #define ALE_MCAST_BLOCK_LEARN_FWD 1
  145. #define ALE_MCAST_FWD_LEARN 2
  146. #define ALE_MCAST_FWD_2 3
  147. enum cpsw_ale_port_state {
  148. ALE_PORT_STATE_DISABLE = 0x00,
  149. ALE_PORT_STATE_BLOCK = 0x01,
  150. ALE_PORT_STATE_LEARN = 0x02,
  151. ALE_PORT_STATE_FORWARD = 0x03,
  152. };
  153. /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
  154. #define ALE_SECURE 1
  155. #define ALE_BLOCKED 2
  156. struct cpsw_slave {
  157. struct cpsw_slave_regs *regs;
  158. struct cpsw_sliver_regs *sliver;
  159. int slave_num;
  160. u32 mac_control;
  161. struct cpsw_slave_data *data;
  162. };
  163. struct cpdma_desc {
  164. /* hardware fields */
  165. u32 hw_next;
  166. u32 hw_buffer;
  167. u32 hw_len;
  168. u32 hw_mode;
  169. /* software fields */
  170. u32 sw_buffer;
  171. u32 sw_len;
  172. };
  173. struct cpdma_chan {
  174. struct cpdma_desc *head, *tail;
  175. void *hdp, *cp, *rxfree;
  176. };
  177. #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
  178. #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
  179. #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
  180. #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
  181. #define chan_read(chan, fld) __raw_readl((chan)->fld)
  182. #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
  183. #define for_each_slave(slave, priv) \
  184. for (slave = (priv)->slaves; slave != (priv)->slaves + \
  185. (priv)->data.slaves; slave++)
  186. struct cpsw_priv {
  187. struct eth_device *dev;
  188. struct cpsw_platform_data data;
  189. int host_port;
  190. struct cpsw_regs *regs;
  191. void *dma_regs;
  192. struct cpsw_host_regs *host_port_regs;
  193. void *ale_regs;
  194. struct cpdma_desc *descs;
  195. struct cpdma_desc *desc_free;
  196. struct cpdma_chan rx_chan, tx_chan;
  197. struct cpsw_slave *slaves;
  198. struct phy_device *phydev;
  199. struct mii_dev *bus;
  200. u32 mdio_link;
  201. u32 phy_mask;
  202. };
  203. static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
  204. {
  205. int idx;
  206. idx = start / 32;
  207. start -= idx * 32;
  208. idx = 2 - idx; /* flip */
  209. return (ale_entry[idx] >> start) & BITMASK(bits);
  210. }
  211. static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
  212. u32 value)
  213. {
  214. int idx;
  215. value &= BITMASK(bits);
  216. idx = start / 32;
  217. start -= idx * 32;
  218. idx = 2 - idx; /* flip */
  219. ale_entry[idx] &= ~(BITMASK(bits) << start);
  220. ale_entry[idx] |= (value << start);
  221. }
  222. #define DEFINE_ALE_FIELD(name, start, bits) \
  223. static inline int cpsw_ale_get_##name(u32 *ale_entry) \
  224. { \
  225. return cpsw_ale_get_field(ale_entry, start, bits); \
  226. } \
  227. static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
  228. { \
  229. cpsw_ale_set_field(ale_entry, start, bits, value); \
  230. }
  231. DEFINE_ALE_FIELD(entry_type, 60, 2)
  232. DEFINE_ALE_FIELD(mcast_state, 62, 2)
  233. DEFINE_ALE_FIELD(port_mask, 66, 3)
  234. DEFINE_ALE_FIELD(ucast_type, 62, 2)
  235. DEFINE_ALE_FIELD(port_num, 66, 2)
  236. DEFINE_ALE_FIELD(blocked, 65, 1)
  237. DEFINE_ALE_FIELD(secure, 64, 1)
  238. DEFINE_ALE_FIELD(mcast, 40, 1)
  239. /* The MAC address field in the ALE entry cannot be macroized as above */
  240. static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
  241. {
  242. int i;
  243. for (i = 0; i < 6; i++)
  244. addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
  245. }
  246. static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
  247. {
  248. int i;
  249. for (i = 0; i < 6; i++)
  250. cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
  251. }
  252. static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  253. {
  254. int i;
  255. __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
  256. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  257. ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
  258. return idx;
  259. }
  260. static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  261. {
  262. int i;
  263. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  264. __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
  265. __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
  266. return idx;
  267. }
  268. static int cpsw_ale_match_addr(struct cpsw_priv *priv, u8* addr)
  269. {
  270. u32 ale_entry[ALE_ENTRY_WORDS];
  271. int type, idx;
  272. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  273. u8 entry_addr[6];
  274. cpsw_ale_read(priv, idx, ale_entry);
  275. type = cpsw_ale_get_entry_type(ale_entry);
  276. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  277. continue;
  278. cpsw_ale_get_addr(ale_entry, entry_addr);
  279. if (memcmp(entry_addr, addr, 6) == 0)
  280. return idx;
  281. }
  282. return -ENOENT;
  283. }
  284. static int cpsw_ale_match_free(struct cpsw_priv *priv)
  285. {
  286. u32 ale_entry[ALE_ENTRY_WORDS];
  287. int type, idx;
  288. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  289. cpsw_ale_read(priv, idx, ale_entry);
  290. type = cpsw_ale_get_entry_type(ale_entry);
  291. if (type == ALE_TYPE_FREE)
  292. return idx;
  293. }
  294. return -ENOENT;
  295. }
  296. static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
  297. {
  298. u32 ale_entry[ALE_ENTRY_WORDS];
  299. int type, idx;
  300. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  301. cpsw_ale_read(priv, idx, ale_entry);
  302. type = cpsw_ale_get_entry_type(ale_entry);
  303. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  304. continue;
  305. if (cpsw_ale_get_mcast(ale_entry))
  306. continue;
  307. type = cpsw_ale_get_ucast_type(ale_entry);
  308. if (type != ALE_UCAST_PERSISTANT &&
  309. type != ALE_UCAST_OUI)
  310. return idx;
  311. }
  312. return -ENOENT;
  313. }
  314. static int cpsw_ale_add_ucast(struct cpsw_priv *priv, u8 *addr,
  315. int port, int flags)
  316. {
  317. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  318. int idx;
  319. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  320. cpsw_ale_set_addr(ale_entry, addr);
  321. cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
  322. cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
  323. cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
  324. cpsw_ale_set_port_num(ale_entry, port);
  325. idx = cpsw_ale_match_addr(priv, addr);
  326. if (idx < 0)
  327. idx = cpsw_ale_match_free(priv);
  328. if (idx < 0)
  329. idx = cpsw_ale_find_ageable(priv);
  330. if (idx < 0)
  331. return -ENOMEM;
  332. cpsw_ale_write(priv, idx, ale_entry);
  333. return 0;
  334. }
  335. static int cpsw_ale_add_mcast(struct cpsw_priv *priv, u8 *addr, int port_mask)
  336. {
  337. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  338. int idx, mask;
  339. idx = cpsw_ale_match_addr(priv, addr);
  340. if (idx >= 0)
  341. cpsw_ale_read(priv, idx, ale_entry);
  342. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  343. cpsw_ale_set_addr(ale_entry, addr);
  344. cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
  345. mask = cpsw_ale_get_port_mask(ale_entry);
  346. port_mask |= mask;
  347. cpsw_ale_set_port_mask(ale_entry, port_mask);
  348. if (idx < 0)
  349. idx = cpsw_ale_match_free(priv);
  350. if (idx < 0)
  351. idx = cpsw_ale_find_ageable(priv);
  352. if (idx < 0)
  353. return -ENOMEM;
  354. cpsw_ale_write(priv, idx, ale_entry);
  355. return 0;
  356. }
  357. static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
  358. {
  359. u32 tmp, mask = BIT(bit);
  360. tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
  361. tmp &= ~mask;
  362. tmp |= val ? mask : 0;
  363. __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
  364. }
  365. #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
  366. #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
  367. #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
  368. static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
  369. int val)
  370. {
  371. int offset = ALE_PORTCTL + 4 * port;
  372. u32 tmp, mask = 0x3;
  373. tmp = __raw_readl(priv->ale_regs + offset);
  374. tmp &= ~mask;
  375. tmp |= val & mask;
  376. __raw_writel(tmp, priv->ale_regs + offset);
  377. }
  378. static struct cpsw_mdio_regs *mdio_regs;
  379. /* wait until hardware is ready for another user access */
  380. static inline u32 wait_for_user_access(void)
  381. {
  382. u32 reg = 0;
  383. int timeout = MDIO_TIMEOUT;
  384. while (timeout-- &&
  385. ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
  386. udelay(10);
  387. if (timeout == -1) {
  388. printf("wait_for_user_access Timeout\n");
  389. return -ETIMEDOUT;
  390. }
  391. return reg;
  392. }
  393. /* wait until hardware state machine is idle */
  394. static inline void wait_for_idle(void)
  395. {
  396. int timeout = MDIO_TIMEOUT;
  397. while (timeout-- &&
  398. ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
  399. udelay(10);
  400. if (timeout == -1)
  401. printf("wait_for_idle Timeout\n");
  402. }
  403. static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
  404. int dev_addr, int phy_reg)
  405. {
  406. unsigned short data;
  407. u32 reg;
  408. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  409. return -EINVAL;
  410. wait_for_user_access();
  411. reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
  412. (phy_id << 16));
  413. __raw_writel(reg, &mdio_regs->user[0].access);
  414. reg = wait_for_user_access();
  415. data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
  416. return data;
  417. }
  418. static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
  419. int phy_reg, u16 data)
  420. {
  421. u32 reg;
  422. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  423. return -EINVAL;
  424. wait_for_user_access();
  425. reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
  426. (phy_id << 16) | (data & USERACCESS_DATA));
  427. __raw_writel(reg, &mdio_regs->user[0].access);
  428. wait_for_user_access();
  429. return 0;
  430. }
  431. static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div)
  432. {
  433. struct mii_dev *bus = mdio_alloc();
  434. mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
  435. /* set enable and clock divider */
  436. __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
  437. /*
  438. * wait for scan logic to settle:
  439. * the scan time consists of (a) a large fixed component, and (b) a
  440. * small component that varies with the mii bus frequency. These
  441. * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
  442. * silicon. Since the effect of (b) was found to be largely
  443. * negligible, we keep things simple here.
  444. */
  445. udelay(1000);
  446. bus->read = cpsw_mdio_read;
  447. bus->write = cpsw_mdio_write;
  448. sprintf(bus->name, name);
  449. mdio_register(bus);
  450. }
  451. /* Set a self-clearing bit in a register, and wait for it to clear */
  452. static inline void setbit_and_wait_for_clear32(void *addr)
  453. {
  454. __raw_writel(CLEAR_BIT, addr);
  455. while (__raw_readl(addr) & CLEAR_BIT)
  456. ;
  457. }
  458. #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
  459. ((mac)[2] << 16) | ((mac)[3] << 24))
  460. #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
  461. static void cpsw_set_slave_mac(struct cpsw_slave *slave,
  462. struct cpsw_priv *priv)
  463. {
  464. __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
  465. __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
  466. }
  467. static void cpsw_slave_update_link(struct cpsw_slave *slave,
  468. struct cpsw_priv *priv, int *link)
  469. {
  470. struct phy_device *phy = priv->phydev;
  471. u32 mac_control = 0;
  472. phy_startup(phy);
  473. *link = phy->link;
  474. if (*link) { /* link up */
  475. mac_control = priv->data.mac_control;
  476. if (phy->speed == 1000)
  477. mac_control |= GIGABITEN;
  478. if (phy->duplex == DUPLEX_FULL)
  479. mac_control |= FULLDUPLEXEN;
  480. if (phy->speed == 100)
  481. mac_control |= MIIEN;
  482. }
  483. if (mac_control == slave->mac_control)
  484. return;
  485. if (mac_control) {
  486. printf("link up on port %d, speed %d, %s duplex\n",
  487. slave->slave_num, phy->speed,
  488. (phy->duplex == DUPLEX_FULL) ? "full" : "half");
  489. } else {
  490. printf("link down on port %d\n", slave->slave_num);
  491. }
  492. __raw_writel(mac_control, &slave->sliver->mac_control);
  493. slave->mac_control = mac_control;
  494. }
  495. static int cpsw_update_link(struct cpsw_priv *priv)
  496. {
  497. int link = 0;
  498. struct cpsw_slave *slave;
  499. for_each_slave(slave, priv)
  500. cpsw_slave_update_link(slave, priv, &link);
  501. priv->mdio_link = readl(&mdio_regs->link);
  502. return link;
  503. }
  504. static int cpsw_check_link(struct cpsw_priv *priv)
  505. {
  506. u32 link = 0;
  507. link = __raw_readl(&mdio_regs->link) & priv->phy_mask;
  508. if ((link) && (link == priv->mdio_link))
  509. return 1;
  510. return cpsw_update_link(priv);
  511. }
  512. static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
  513. {
  514. if (priv->host_port == 0)
  515. return slave_num + 1;
  516. else
  517. return slave_num;
  518. }
  519. static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
  520. {
  521. u32 slave_port;
  522. setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
  523. /* setup priority mapping */
  524. __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
  525. __raw_writel(0x33221100, &slave->regs->tx_pri_map);
  526. /* setup max packet size, and mac address */
  527. __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
  528. cpsw_set_slave_mac(slave, priv);
  529. slave->mac_control = 0; /* no link yet */
  530. /* enable forwarding */
  531. slave_port = cpsw_get_slave_port(priv, slave->slave_num);
  532. cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
  533. cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << slave_port);
  534. priv->phy_mask |= 1 << slave->data->phy_id;
  535. }
  536. static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
  537. {
  538. struct cpdma_desc *desc = priv->desc_free;
  539. if (desc)
  540. priv->desc_free = desc_read_ptr(desc, hw_next);
  541. return desc;
  542. }
  543. static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
  544. {
  545. if (desc) {
  546. desc_write(desc, hw_next, priv->desc_free);
  547. priv->desc_free = desc;
  548. }
  549. }
  550. static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
  551. void *buffer, int len)
  552. {
  553. struct cpdma_desc *desc, *prev;
  554. u32 mode;
  555. desc = cpdma_desc_alloc(priv);
  556. if (!desc)
  557. return -ENOMEM;
  558. if (len < PKT_MIN)
  559. len = PKT_MIN;
  560. mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  561. desc_write(desc, hw_next, 0);
  562. desc_write(desc, hw_buffer, buffer);
  563. desc_write(desc, hw_len, len);
  564. desc_write(desc, hw_mode, mode | len);
  565. desc_write(desc, sw_buffer, buffer);
  566. desc_write(desc, sw_len, len);
  567. if (!chan->head) {
  568. /* simple case - first packet enqueued */
  569. chan->head = desc;
  570. chan->tail = desc;
  571. chan_write(chan, hdp, desc);
  572. goto done;
  573. }
  574. /* not the first packet - enqueue at the tail */
  575. prev = chan->tail;
  576. desc_write(prev, hw_next, desc);
  577. chan->tail = desc;
  578. /* next check if EOQ has been triggered already */
  579. if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
  580. chan_write(chan, hdp, desc);
  581. done:
  582. if (chan->rxfree)
  583. chan_write(chan, rxfree, 1);
  584. return 0;
  585. }
  586. static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
  587. void **buffer, int *len)
  588. {
  589. struct cpdma_desc *desc = chan->head;
  590. u32 status;
  591. if (!desc)
  592. return -ENOENT;
  593. status = desc_read(desc, hw_mode);
  594. if (len)
  595. *len = status & 0x7ff;
  596. if (buffer)
  597. *buffer = desc_read_ptr(desc, sw_buffer);
  598. if (status & CPDMA_DESC_OWNER) {
  599. if (chan_read(chan, hdp) == 0) {
  600. if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
  601. chan_write(chan, hdp, desc);
  602. }
  603. return -EBUSY;
  604. }
  605. chan->head = desc_read_ptr(desc, hw_next);
  606. chan_write(chan, cp, desc);
  607. cpdma_desc_free(priv, desc);
  608. return 0;
  609. }
  610. static int cpsw_init(struct eth_device *dev, bd_t *bis)
  611. {
  612. struct cpsw_priv *priv = dev->priv;
  613. struct cpsw_slave *slave;
  614. int i, ret;
  615. /* soft reset the controller and initialize priv */
  616. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  617. /* initialize and reset the address lookup engine */
  618. cpsw_ale_enable(priv, 1);
  619. cpsw_ale_clear(priv, 1);
  620. cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
  621. /* setup host port priority mapping */
  622. __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
  623. __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
  624. /* disable priority elevation and enable statistics on all ports */
  625. __raw_writel(0, &priv->regs->ptype);
  626. /* enable statistics collection only on the host port */
  627. __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
  628. cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
  629. cpsw_ale_add_ucast(priv, priv->dev->enetaddr, priv->host_port,
  630. ALE_SECURE);
  631. cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << priv->host_port);
  632. for_each_slave(slave, priv)
  633. cpsw_slave_init(slave, priv);
  634. cpsw_update_link(priv);
  635. /* init descriptor pool */
  636. for (i = 0; i < NUM_DESCS; i++) {
  637. desc_write(&priv->descs[i], hw_next,
  638. (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
  639. }
  640. priv->desc_free = &priv->descs[0];
  641. /* initialize channels */
  642. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  643. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  644. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
  645. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
  646. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  647. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  648. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
  649. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
  650. } else {
  651. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  652. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
  653. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
  654. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  655. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  656. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
  657. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
  658. }
  659. /* clear dma state */
  660. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  661. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  662. for (i = 0; i < priv->data.channels; i++) {
  663. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
  664. * i);
  665. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  666. * i);
  667. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
  668. * i);
  669. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
  670. * i);
  671. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
  672. * i);
  673. }
  674. } else {
  675. for (i = 0; i < priv->data.channels; i++) {
  676. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
  677. * i);
  678. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  679. * i);
  680. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
  681. * i);
  682. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
  683. * i);
  684. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
  685. * i);
  686. }
  687. }
  688. __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
  689. __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
  690. /* submit rx descs */
  691. for (i = 0; i < PKTBUFSRX; i++) {
  692. ret = cpdma_submit(priv, &priv->rx_chan, NetRxPackets[i],
  693. PKTSIZE);
  694. if (ret < 0) {
  695. printf("error %d submitting rx desc\n", ret);
  696. break;
  697. }
  698. }
  699. return 0;
  700. }
  701. static void cpsw_halt(struct eth_device *dev)
  702. {
  703. struct cpsw_priv *priv = dev->priv;
  704. writel(0, priv->dma_regs + CPDMA_TXCONTROL);
  705. writel(0, priv->dma_regs + CPDMA_RXCONTROL);
  706. /* soft reset the controller and initialize priv */
  707. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  708. /* clear dma state */
  709. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  710. priv->data.control(0);
  711. }
  712. static int cpsw_send(struct eth_device *dev, void *packet, int length)
  713. {
  714. struct cpsw_priv *priv = dev->priv;
  715. void *buffer;
  716. int len;
  717. int timeout = CPDMA_TIMEOUT;
  718. if (!cpsw_check_link(priv))
  719. return -EIO;
  720. flush_dcache_range((unsigned long)packet,
  721. (unsigned long)packet + length);
  722. /* first reap completed packets */
  723. while (timeout-- &&
  724. (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
  725. ;
  726. if (timeout == -1) {
  727. printf("cpdma_process timeout\n");
  728. return -ETIMEDOUT;
  729. }
  730. return cpdma_submit(priv, &priv->tx_chan, packet, length);
  731. }
  732. static int cpsw_recv(struct eth_device *dev)
  733. {
  734. struct cpsw_priv *priv = dev->priv;
  735. void *buffer;
  736. int len;
  737. cpsw_update_link(priv);
  738. while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) >= 0) {
  739. invalidate_dcache_range((unsigned long)buffer,
  740. (unsigned long)buffer + PKTSIZE_ALIGN);
  741. NetReceive(buffer, len);
  742. cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE);
  743. }
  744. return 0;
  745. }
  746. static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
  747. struct cpsw_priv *priv)
  748. {
  749. void *regs = priv->regs;
  750. struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
  751. slave->slave_num = slave_num;
  752. slave->data = data;
  753. slave->regs = regs + data->slave_reg_ofs;
  754. slave->sliver = regs + data->sliver_reg_ofs;
  755. }
  756. static int cpsw_phy_init(struct eth_device *dev, struct cpsw_slave *slave)
  757. {
  758. struct cpsw_priv *priv = (struct cpsw_priv *)dev->priv;
  759. struct phy_device *phydev;
  760. u32 supported = (SUPPORTED_10baseT_Half |
  761. SUPPORTED_10baseT_Full |
  762. SUPPORTED_100baseT_Half |
  763. SUPPORTED_100baseT_Full |
  764. SUPPORTED_1000baseT_Full);
  765. phydev = phy_connect(priv->bus,
  766. CONFIG_PHY_ADDR,
  767. dev,
  768. slave->data->phy_if);
  769. phydev->supported &= supported;
  770. phydev->advertising = phydev->supported;
  771. priv->phydev = phydev;
  772. phy_config(phydev);
  773. return 1;
  774. }
  775. int cpsw_register(struct cpsw_platform_data *data)
  776. {
  777. struct cpsw_priv *priv;
  778. struct cpsw_slave *slave;
  779. void *regs = (void *)data->cpsw_base;
  780. struct eth_device *dev;
  781. dev = calloc(sizeof(*dev), 1);
  782. if (!dev)
  783. return -ENOMEM;
  784. priv = calloc(sizeof(*priv), 1);
  785. if (!priv) {
  786. free(dev);
  787. return -ENOMEM;
  788. }
  789. priv->data = *data;
  790. priv->dev = dev;
  791. priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
  792. if (!priv->slaves) {
  793. free(dev);
  794. free(priv);
  795. return -ENOMEM;
  796. }
  797. priv->descs = (void *)CPDMA_RAM_ADDR;
  798. priv->host_port = data->host_port_num;
  799. priv->regs = regs;
  800. priv->host_port_regs = regs + data->host_port_reg_ofs;
  801. priv->dma_regs = regs + data->cpdma_reg_ofs;
  802. priv->ale_regs = regs + data->ale_reg_ofs;
  803. int idx = 0;
  804. for_each_slave(slave, priv) {
  805. cpsw_slave_setup(slave, idx, priv);
  806. idx = idx + 1;
  807. }
  808. strcpy(dev->name, "cpsw");
  809. dev->iobase = 0;
  810. dev->init = cpsw_init;
  811. dev->halt = cpsw_halt;
  812. dev->send = cpsw_send;
  813. dev->recv = cpsw_recv;
  814. dev->priv = priv;
  815. eth_register(dev);
  816. cpsw_mdio_init(dev->name, data->mdio_base, data->mdio_div);
  817. priv->bus = miiphy_get_dev_by_name(dev->name);
  818. for_each_slave(slave, priv)
  819. cpsw_phy_init(dev, slave);
  820. return 1;
  821. }