mac-scc.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /*
  2. * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
  3. *
  4. * Copyright (c) 2003 Intracom S.A.
  5. * by Pantelis Antoniou <panto@intracom.gr>
  6. *
  7. * 2005 (c) MontaVista Software, Inc.
  8. * Vitaly Bordug <vbordug@ru.mvista.com>
  9. *
  10. * This file is licensed under the terms of the GNU General Public License
  11. * version 2. This program is licensed "as is" without any warranty of any
  12. * kind, whether express or implied.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <linux/types.h>
  17. #include <linux/string.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/errno.h>
  20. #include <linux/ioport.h>
  21. #include <linux/slab.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/init.h>
  24. #include <linux/delay.h>
  25. #include <linux/netdevice.h>
  26. #include <linux/etherdevice.h>
  27. #include <linux/skbuff.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/mii.h>
  30. #include <linux/ethtool.h>
  31. #include <linux/bitops.h>
  32. #include <linux/fs.h>
  33. #include <linux/platform_device.h>
  34. #include <asm/irq.h>
  35. #include <asm/uaccess.h>
  36. #ifdef CONFIG_8xx
  37. #include <asm/8xx_immap.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/mpc8xx.h>
  40. #include <asm/commproc.h>
  41. #endif
  42. #ifdef CONFIG_PPC_CPM_NEW_BINDING
  43. #include <asm/of_platform.h>
  44. #endif
  45. #include "fs_enet.h"
  46. /*************************************************/
  47. #if defined(CONFIG_CPM1)
  48. /* for a 8xx __raw_xxx's are sufficient */
  49. #define __fs_out32(addr, x) __raw_writel(x, addr)
  50. #define __fs_out16(addr, x) __raw_writew(x, addr)
  51. #define __fs_out8(addr, x) __raw_writeb(x, addr)
  52. #define __fs_in32(addr) __raw_readl(addr)
  53. #define __fs_in16(addr) __raw_readw(addr)
  54. #define __fs_in8(addr) __raw_readb(addr)
  55. #else
  56. /* for others play it safe */
  57. #define __fs_out32(addr, x) out_be32(addr, x)
  58. #define __fs_out16(addr, x) out_be16(addr, x)
  59. #define __fs_in32(addr) in_be32(addr)
  60. #define __fs_in16(addr) in_be16(addr)
  61. #endif
  62. /* write, read, set bits, clear bits */
  63. #define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
  64. #define R32(_p, _m) __fs_in32(&(_p)->_m)
  65. #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
  66. #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
  67. #define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
  68. #define R16(_p, _m) __fs_in16(&(_p)->_m)
  69. #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
  70. #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
  71. #define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
  72. #define R8(_p, _m) __fs_in8(&(_p)->_m)
  73. #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
  74. #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
  75. #define SCC_MAX_MULTICAST_ADDRS 64
  76. /*
  77. * Delay to wait for SCC reset command to complete (in us)
  78. */
  79. #define SCC_RESET_DELAY 50
  80. #define MAX_CR_CMD_LOOPS 10000
  81. static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
  82. {
  83. const struct fs_platform_info *fpi = fep->fpi;
  84. int i;
  85. W16(cpmp, cp_cpcr, fpi->cp_command | CPM_CR_FLG | (op << 8));
  86. for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
  87. if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
  88. return 0;
  89. printk(KERN_ERR "%s(): Not able to issue CPM command\n",
  90. __FUNCTION__);
  91. return 1;
  92. }
  93. static int do_pd_setup(struct fs_enet_private *fep)
  94. {
  95. #ifdef CONFIG_PPC_CPM_NEW_BINDING
  96. struct of_device *ofdev = to_of_device(fep->dev);
  97. fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
  98. if (fep->interrupt == NO_IRQ)
  99. return -EINVAL;
  100. fep->scc.sccp = of_iomap(ofdev->node, 0);
  101. if (!fep->scc.sccp)
  102. return -EINVAL;
  103. fep->scc.ep = of_iomap(ofdev->node, 1);
  104. if (!fep->scc.ep) {
  105. iounmap(fep->scc.sccp);
  106. return -EINVAL;
  107. }
  108. #else
  109. struct platform_device *pdev = to_platform_device(fep->dev);
  110. struct resource *r;
  111. /* Fill out IRQ field */
  112. fep->interrupt = platform_get_irq_byname(pdev, "interrupt");
  113. if (fep->interrupt < 0)
  114. return -EINVAL;
  115. r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  116. fep->scc.sccp = ioremap(r->start, r->end - r->start + 1);
  117. if (fep->scc.sccp == NULL)
  118. return -EINVAL;
  119. r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram");
  120. fep->scc.ep = ioremap(r->start, r->end - r->start + 1);
  121. if (fep->scc.ep == NULL)
  122. return -EINVAL;
  123. #endif
  124. return 0;
  125. }
  126. #define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
  127. #define SCC_RX_EVENT (SCCE_ENET_RXF)
  128. #define SCC_TX_EVENT (SCCE_ENET_TXB)
  129. #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
  130. static int setup_data(struct net_device *dev)
  131. {
  132. struct fs_enet_private *fep = netdev_priv(dev);
  133. #ifdef CONFIG_PPC_CPM_NEW_BINDING
  134. struct fs_platform_info *fpi = fep->fpi;
  135. fep->scc.idx = fs_get_scc_index(fpi->fs_no);
  136. if ((unsigned int)fep->fcc.idx >= 4) /* max 4 SCCs */
  137. return -EINVAL;
  138. fpi->cp_command = fep->fcc.idx << 6;
  139. #endif
  140. do_pd_setup(fep);
  141. fep->scc.hthi = 0;
  142. fep->scc.htlo = 0;
  143. fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
  144. fep->ev_rx = SCC_RX_EVENT;
  145. fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
  146. fep->ev_err = SCC_ERR_EVENT_MSK;
  147. return 0;
  148. }
  149. static int allocate_bd(struct net_device *dev)
  150. {
  151. struct fs_enet_private *fep = netdev_priv(dev);
  152. const struct fs_platform_info *fpi = fep->fpi;
  153. fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
  154. sizeof(cbd_t), 8);
  155. if (IS_ERR_VALUE(fep->ring_mem_addr))
  156. return -ENOMEM;
  157. fep->ring_base = (void __iomem __force*)
  158. cpm_dpram_addr(fep->ring_mem_addr);
  159. return 0;
  160. }
  161. static void free_bd(struct net_device *dev)
  162. {
  163. struct fs_enet_private *fep = netdev_priv(dev);
  164. if (fep->ring_base)
  165. cpm_dpfree(fep->ring_mem_addr);
  166. }
  167. static void cleanup_data(struct net_device *dev)
  168. {
  169. /* nothing */
  170. }
  171. static void set_promiscuous_mode(struct net_device *dev)
  172. {
  173. struct fs_enet_private *fep = netdev_priv(dev);
  174. scc_t __iomem *sccp = fep->scc.sccp;
  175. S16(sccp, scc_psmr, SCC_PSMR_PRO);
  176. }
  177. static void set_multicast_start(struct net_device *dev)
  178. {
  179. struct fs_enet_private *fep = netdev_priv(dev);
  180. scc_enet_t __iomem *ep = fep->scc.ep;
  181. W16(ep, sen_gaddr1, 0);
  182. W16(ep, sen_gaddr2, 0);
  183. W16(ep, sen_gaddr3, 0);
  184. W16(ep, sen_gaddr4, 0);
  185. }
  186. static void set_multicast_one(struct net_device *dev, const u8 * mac)
  187. {
  188. struct fs_enet_private *fep = netdev_priv(dev);
  189. scc_enet_t __iomem *ep = fep->scc.ep;
  190. u16 taddrh, taddrm, taddrl;
  191. taddrh = ((u16) mac[5] << 8) | mac[4];
  192. taddrm = ((u16) mac[3] << 8) | mac[2];
  193. taddrl = ((u16) mac[1] << 8) | mac[0];
  194. W16(ep, sen_taddrh, taddrh);
  195. W16(ep, sen_taddrm, taddrm);
  196. W16(ep, sen_taddrl, taddrl);
  197. scc_cr_cmd(fep, CPM_CR_SET_GADDR);
  198. }
  199. static void set_multicast_finish(struct net_device *dev)
  200. {
  201. struct fs_enet_private *fep = netdev_priv(dev);
  202. scc_t __iomem *sccp = fep->scc.sccp;
  203. scc_enet_t __iomem *ep = fep->scc.ep;
  204. /* clear promiscuous always */
  205. C16(sccp, scc_psmr, SCC_PSMR_PRO);
  206. /* if all multi or too many multicasts; just enable all */
  207. if ((dev->flags & IFF_ALLMULTI) != 0 ||
  208. dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
  209. W16(ep, sen_gaddr1, 0xffff);
  210. W16(ep, sen_gaddr2, 0xffff);
  211. W16(ep, sen_gaddr3, 0xffff);
  212. W16(ep, sen_gaddr4, 0xffff);
  213. }
  214. }
  215. static void set_multicast_list(struct net_device *dev)
  216. {
  217. struct dev_mc_list *pmc;
  218. if ((dev->flags & IFF_PROMISC) == 0) {
  219. set_multicast_start(dev);
  220. for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
  221. set_multicast_one(dev, pmc->dmi_addr);
  222. set_multicast_finish(dev);
  223. } else
  224. set_promiscuous_mode(dev);
  225. }
  226. /*
  227. * This function is called to start or restart the FEC during a link
  228. * change. This only happens when switching between half and full
  229. * duplex.
  230. */
  231. static void restart(struct net_device *dev)
  232. {
  233. struct fs_enet_private *fep = netdev_priv(dev);
  234. scc_t __iomem *sccp = fep->scc.sccp;
  235. scc_enet_t __iomem *ep = fep->scc.ep;
  236. const struct fs_platform_info *fpi = fep->fpi;
  237. u16 paddrh, paddrm, paddrl;
  238. const unsigned char *mac;
  239. int i;
  240. C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
  241. /* clear everything (slow & steady does it) */
  242. for (i = 0; i < sizeof(*ep); i++)
  243. __fs_out8((u8 __iomem *)ep + i, 0);
  244. /* point to bds */
  245. W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
  246. W16(ep, sen_genscc.scc_tbase,
  247. fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
  248. /* Initialize function code registers for big-endian.
  249. */
  250. W8(ep, sen_genscc.scc_rfcr, SCC_EB);
  251. W8(ep, sen_genscc.scc_tfcr, SCC_EB);
  252. /* Set maximum bytes per receive buffer.
  253. * This appears to be an Ethernet frame size, not the buffer
  254. * fragment size. It must be a multiple of four.
  255. */
  256. W16(ep, sen_genscc.scc_mrblr, 0x5f0);
  257. /* Set CRC preset and mask.
  258. */
  259. W32(ep, sen_cpres, 0xffffffff);
  260. W32(ep, sen_cmask, 0xdebb20e3);
  261. W32(ep, sen_crcec, 0); /* CRC Error counter */
  262. W32(ep, sen_alec, 0); /* alignment error counter */
  263. W32(ep, sen_disfc, 0); /* discard frame counter */
  264. W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
  265. W16(ep, sen_retlim, 15); /* Retry limit threshold */
  266. W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
  267. W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
  268. W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
  269. W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
  270. /* Clear hash tables.
  271. */
  272. W16(ep, sen_gaddr1, 0);
  273. W16(ep, sen_gaddr2, 0);
  274. W16(ep, sen_gaddr3, 0);
  275. W16(ep, sen_gaddr4, 0);
  276. W16(ep, sen_iaddr1, 0);
  277. W16(ep, sen_iaddr2, 0);
  278. W16(ep, sen_iaddr3, 0);
  279. W16(ep, sen_iaddr4, 0);
  280. /* set address
  281. */
  282. mac = dev->dev_addr;
  283. paddrh = ((u16) mac[5] << 8) | mac[4];
  284. paddrm = ((u16) mac[3] << 8) | mac[2];
  285. paddrl = ((u16) mac[1] << 8) | mac[0];
  286. W16(ep, sen_paddrh, paddrh);
  287. W16(ep, sen_paddrm, paddrm);
  288. W16(ep, sen_paddrl, paddrl);
  289. W16(ep, sen_pper, 0);
  290. W16(ep, sen_taddrl, 0);
  291. W16(ep, sen_taddrm, 0);
  292. W16(ep, sen_taddrh, 0);
  293. fs_init_bds(dev);
  294. scc_cr_cmd(fep, CPM_CR_INIT_TRX);
  295. W16(sccp, scc_scce, 0xffff);
  296. /* Enable interrupts we wish to service.
  297. */
  298. W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
  299. /* Set GSMR_H to enable all normal operating modes.
  300. * Set GSMR_L to enable Ethernet to MC68160.
  301. */
  302. W32(sccp, scc_gsmrh, 0);
  303. W32(sccp, scc_gsmrl,
  304. SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
  305. SCC_GSMRL_MODE_ENET);
  306. /* Set sync/delimiters.
  307. */
  308. W16(sccp, scc_dsr, 0xd555);
  309. /* Set processing mode. Use Ethernet CRC, catch broadcast, and
  310. * start frame search 22 bit times after RENA.
  311. */
  312. W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
  313. /* Set full duplex mode if needed */
  314. if (fep->phydev->duplex)
  315. S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
  316. S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
  317. }
  318. static void stop(struct net_device *dev)
  319. {
  320. struct fs_enet_private *fep = netdev_priv(dev);
  321. scc_t __iomem *sccp = fep->scc.sccp;
  322. int i;
  323. for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
  324. udelay(1);
  325. if (i == SCC_RESET_DELAY)
  326. printk(KERN_WARNING DRV_MODULE_NAME
  327. ": %s SCC timeout on graceful transmit stop\n",
  328. dev->name);
  329. W16(sccp, scc_sccm, 0);
  330. C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
  331. fs_cleanup_bds(dev);
  332. }
  333. static void pre_request_irq(struct net_device *dev, int irq)
  334. {
  335. #ifndef CONFIG_PPC_MERGE
  336. immap_t *immap = fs_enet_immap;
  337. u32 siel;
  338. /* SIU interrupt */
  339. if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
  340. siel = in_be32(&immap->im_siu_conf.sc_siel);
  341. if ((irq & 1) == 0)
  342. siel |= (0x80000000 >> irq);
  343. else
  344. siel &= ~(0x80000000 >> (irq & ~1));
  345. out_be32(&immap->im_siu_conf.sc_siel, siel);
  346. }
  347. #endif
  348. }
  349. static void post_free_irq(struct net_device *dev, int irq)
  350. {
  351. /* nothing */
  352. }
  353. static void napi_clear_rx_event(struct net_device *dev)
  354. {
  355. struct fs_enet_private *fep = netdev_priv(dev);
  356. scc_t __iomem *sccp = fep->scc.sccp;
  357. W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
  358. }
  359. static void napi_enable_rx(struct net_device *dev)
  360. {
  361. struct fs_enet_private *fep = netdev_priv(dev);
  362. scc_t __iomem *sccp = fep->scc.sccp;
  363. S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
  364. }
  365. static void napi_disable_rx(struct net_device *dev)
  366. {
  367. struct fs_enet_private *fep = netdev_priv(dev);
  368. scc_t __iomem *sccp = fep->scc.sccp;
  369. C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
  370. }
  371. static void rx_bd_done(struct net_device *dev)
  372. {
  373. /* nothing */
  374. }
  375. static void tx_kickstart(struct net_device *dev)
  376. {
  377. /* nothing */
  378. }
  379. static u32 get_int_events(struct net_device *dev)
  380. {
  381. struct fs_enet_private *fep = netdev_priv(dev);
  382. scc_t __iomem *sccp = fep->scc.sccp;
  383. return (u32) R16(sccp, scc_scce);
  384. }
  385. static void clear_int_events(struct net_device *dev, u32 int_events)
  386. {
  387. struct fs_enet_private *fep = netdev_priv(dev);
  388. scc_t __iomem *sccp = fep->scc.sccp;
  389. W16(sccp, scc_scce, int_events & 0xffff);
  390. }
  391. static void ev_error(struct net_device *dev, u32 int_events)
  392. {
  393. printk(KERN_WARNING DRV_MODULE_NAME
  394. ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events);
  395. }
  396. static int get_regs(struct net_device *dev, void *p, int *sizep)
  397. {
  398. struct fs_enet_private *fep = netdev_priv(dev);
  399. if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
  400. return -EINVAL;
  401. memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
  402. p = (char *)p + sizeof(scc_t);
  403. memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
  404. return 0;
  405. }
  406. static int get_regs_len(struct net_device *dev)
  407. {
  408. return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
  409. }
  410. static void tx_restart(struct net_device *dev)
  411. {
  412. struct fs_enet_private *fep = netdev_priv(dev);
  413. scc_cr_cmd(fep, CPM_CR_RESTART_TX);
  414. }
  415. /*************************************************************************/
  416. const struct fs_ops fs_scc_ops = {
  417. .setup_data = setup_data,
  418. .cleanup_data = cleanup_data,
  419. .set_multicast_list = set_multicast_list,
  420. .restart = restart,
  421. .stop = stop,
  422. .pre_request_irq = pre_request_irq,
  423. .post_free_irq = post_free_irq,
  424. .napi_clear_rx_event = napi_clear_rx_event,
  425. .napi_enable_rx = napi_enable_rx,
  426. .napi_disable_rx = napi_disable_rx,
  427. .rx_bd_done = rx_bd_done,
  428. .tx_kickstart = tx_kickstart,
  429. .get_int_events = get_int_events,
  430. .clear_int_events = clear_int_events,
  431. .ev_error = ev_error,
  432. .get_regs = get_regs,
  433. .get_regs_len = get_regs_len,
  434. .tx_restart = tx_restart,
  435. .allocate_bd = allocate_bd,
  436. .free_bd = free_bd,
  437. };