bcm63xx_enet.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966
  1. /*
  2. * Driver for BCM963xx builtin Ethernet mac
  3. *
  4. * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/module.h>
  23. #include <linux/clk.h>
  24. #include <linux/etherdevice.h>
  25. #include <linux/slab.h>
  26. #include <linux/delay.h>
  27. #include <linux/ethtool.h>
  28. #include <linux/crc32.h>
  29. #include <linux/err.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/if_vlan.h>
  33. #include <bcm63xx_dev_enet.h>
  34. #include "bcm63xx_enet.h"
  35. static char bcm_enet_driver_name[] = "bcm63xx_enet";
  36. static char bcm_enet_driver_version[] = "1.0";
  37. static int copybreak __read_mostly = 128;
  38. module_param(copybreak, int, 0);
  39. MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  40. /* io memory shared between all devices */
  41. static void __iomem *bcm_enet_shared_base;
  42. /*
  43. * io helpers to access mac registers
  44. */
  45. static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
  46. {
  47. return bcm_readl(priv->base + off);
  48. }
  49. static inline void enet_writel(struct bcm_enet_priv *priv,
  50. u32 val, u32 off)
  51. {
  52. bcm_writel(val, priv->base + off);
  53. }
  54. /*
  55. * io helpers to access shared registers
  56. */
  57. static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
  58. {
  59. return bcm_readl(bcm_enet_shared_base + off);
  60. }
  61. static inline void enet_dma_writel(struct bcm_enet_priv *priv,
  62. u32 val, u32 off)
  63. {
  64. bcm_writel(val, bcm_enet_shared_base + off);
  65. }
  66. /*
  67. * write given data into mii register and wait for transfer to end
  68. * with timeout (average measured transfer time is 25us)
  69. */
  70. static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
  71. {
  72. int limit;
  73. /* make sure mii interrupt status is cleared */
  74. enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
  75. enet_writel(priv, data, ENET_MIIDATA_REG);
  76. wmb();
  77. /* busy wait on mii interrupt bit, with timeout */
  78. limit = 1000;
  79. do {
  80. if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
  81. break;
  82. udelay(1);
  83. } while (limit-- > 0);
  84. return (limit < 0) ? 1 : 0;
  85. }
  86. /*
  87. * MII internal read callback
  88. */
  89. static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
  90. int regnum)
  91. {
  92. u32 tmp, val;
  93. tmp = regnum << ENET_MIIDATA_REG_SHIFT;
  94. tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
  95. tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
  96. tmp |= ENET_MIIDATA_OP_READ_MASK;
  97. if (do_mdio_op(priv, tmp))
  98. return -1;
  99. val = enet_readl(priv, ENET_MIIDATA_REG);
  100. val &= 0xffff;
  101. return val;
  102. }
  103. /*
  104. * MII internal write callback
  105. */
  106. static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
  107. int regnum, u16 value)
  108. {
  109. u32 tmp;
  110. tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
  111. tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
  112. tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
  113. tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
  114. tmp |= ENET_MIIDATA_OP_WRITE_MASK;
  115. (void)do_mdio_op(priv, tmp);
  116. return 0;
  117. }
  118. /*
  119. * MII read callback from phylib
  120. */
  121. static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
  122. int regnum)
  123. {
  124. return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
  125. }
  126. /*
  127. * MII write callback from phylib
  128. */
  129. static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
  130. int regnum, u16 value)
  131. {
  132. return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
  133. }
  134. /*
  135. * MII read callback from mii core
  136. */
  137. static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
  138. int regnum)
  139. {
  140. return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
  141. }
  142. /*
  143. * MII write callback from mii core
  144. */
  145. static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
  146. int regnum, int value)
  147. {
  148. bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
  149. }
  150. /*
  151. * refill rx queue
  152. */
  153. static int bcm_enet_refill_rx(struct net_device *dev)
  154. {
  155. struct bcm_enet_priv *priv;
  156. priv = netdev_priv(dev);
  157. while (priv->rx_desc_count < priv->rx_ring_size) {
  158. struct bcm_enet_desc *desc;
  159. struct sk_buff *skb;
  160. dma_addr_t p;
  161. int desc_idx;
  162. u32 len_stat;
  163. desc_idx = priv->rx_dirty_desc;
  164. desc = &priv->rx_desc_cpu[desc_idx];
  165. if (!priv->rx_skb[desc_idx]) {
  166. skb = netdev_alloc_skb(dev, priv->rx_skb_size);
  167. if (!skb)
  168. break;
  169. priv->rx_skb[desc_idx] = skb;
  170. p = dma_map_single(&priv->pdev->dev, skb->data,
  171. priv->rx_skb_size,
  172. DMA_FROM_DEVICE);
  173. desc->address = p;
  174. }
  175. len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
  176. len_stat |= DMADESC_OWNER_MASK;
  177. if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
  178. len_stat |= DMADESC_WRAP_MASK;
  179. priv->rx_dirty_desc = 0;
  180. } else {
  181. priv->rx_dirty_desc++;
  182. }
  183. wmb();
  184. desc->len_stat = len_stat;
  185. priv->rx_desc_count++;
  186. /* tell dma engine we allocated one buffer */
  187. enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
  188. }
  189. /* If rx ring is still empty, set a timer to try allocating
  190. * again at a later time. */
  191. if (priv->rx_desc_count == 0 && netif_running(dev)) {
  192. dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
  193. priv->rx_timeout.expires = jiffies + HZ;
  194. add_timer(&priv->rx_timeout);
  195. }
  196. return 0;
  197. }
  198. /*
  199. * timer callback to defer refill rx queue in case we're OOM
  200. */
  201. static void bcm_enet_refill_rx_timer(unsigned long data)
  202. {
  203. struct net_device *dev;
  204. struct bcm_enet_priv *priv;
  205. dev = (struct net_device *)data;
  206. priv = netdev_priv(dev);
  207. spin_lock(&priv->rx_lock);
  208. bcm_enet_refill_rx((struct net_device *)data);
  209. spin_unlock(&priv->rx_lock);
  210. }
  211. /*
  212. * extract packet from rx queue
  213. */
  214. static int bcm_enet_receive_queue(struct net_device *dev, int budget)
  215. {
  216. struct bcm_enet_priv *priv;
  217. struct device *kdev;
  218. int processed;
  219. priv = netdev_priv(dev);
  220. kdev = &priv->pdev->dev;
  221. processed = 0;
  222. /* don't scan ring further than number of refilled
  223. * descriptor */
  224. if (budget > priv->rx_desc_count)
  225. budget = priv->rx_desc_count;
  226. do {
  227. struct bcm_enet_desc *desc;
  228. struct sk_buff *skb;
  229. int desc_idx;
  230. u32 len_stat;
  231. unsigned int len;
  232. desc_idx = priv->rx_curr_desc;
  233. desc = &priv->rx_desc_cpu[desc_idx];
  234. /* make sure we actually read the descriptor status at
  235. * each loop */
  236. rmb();
  237. len_stat = desc->len_stat;
  238. /* break if dma ownership belongs to hw */
  239. if (len_stat & DMADESC_OWNER_MASK)
  240. break;
  241. processed++;
  242. priv->rx_curr_desc++;
  243. if (priv->rx_curr_desc == priv->rx_ring_size)
  244. priv->rx_curr_desc = 0;
  245. priv->rx_desc_count--;
  246. /* if the packet does not have start of packet _and_
  247. * end of packet flag set, then just recycle it */
  248. if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
  249. dev->stats.rx_dropped++;
  250. continue;
  251. }
  252. /* recycle packet if it's marked as bad */
  253. if (unlikely(len_stat & DMADESC_ERR_MASK)) {
  254. dev->stats.rx_errors++;
  255. if (len_stat & DMADESC_OVSIZE_MASK)
  256. dev->stats.rx_length_errors++;
  257. if (len_stat & DMADESC_CRC_MASK)
  258. dev->stats.rx_crc_errors++;
  259. if (len_stat & DMADESC_UNDER_MASK)
  260. dev->stats.rx_frame_errors++;
  261. if (len_stat & DMADESC_OV_MASK)
  262. dev->stats.rx_fifo_errors++;
  263. continue;
  264. }
  265. /* valid packet */
  266. skb = priv->rx_skb[desc_idx];
  267. len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
  268. /* don't include FCS */
  269. len -= 4;
  270. if (len < copybreak) {
  271. struct sk_buff *nskb;
  272. nskb = netdev_alloc_skb_ip_align(dev, len);
  273. if (!nskb) {
  274. /* forget packet, just rearm desc */
  275. dev->stats.rx_dropped++;
  276. continue;
  277. }
  278. dma_sync_single_for_cpu(kdev, desc->address,
  279. len, DMA_FROM_DEVICE);
  280. memcpy(nskb->data, skb->data, len);
  281. dma_sync_single_for_device(kdev, desc->address,
  282. len, DMA_FROM_DEVICE);
  283. skb = nskb;
  284. } else {
  285. dma_unmap_single(&priv->pdev->dev, desc->address,
  286. priv->rx_skb_size, DMA_FROM_DEVICE);
  287. priv->rx_skb[desc_idx] = NULL;
  288. }
  289. skb_put(skb, len);
  290. skb->protocol = eth_type_trans(skb, dev);
  291. dev->stats.rx_packets++;
  292. dev->stats.rx_bytes += len;
  293. netif_receive_skb(skb);
  294. } while (--budget > 0);
  295. if (processed || !priv->rx_desc_count) {
  296. bcm_enet_refill_rx(dev);
  297. /* kick rx dma */
  298. enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
  299. ENETDMA_CHANCFG_REG(priv->rx_chan));
  300. }
  301. return processed;
  302. }
  303. /*
  304. * try to or force reclaim of transmitted buffers
  305. */
  306. static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
  307. {
  308. struct bcm_enet_priv *priv;
  309. int released;
  310. priv = netdev_priv(dev);
  311. released = 0;
  312. while (priv->tx_desc_count < priv->tx_ring_size) {
  313. struct bcm_enet_desc *desc;
  314. struct sk_buff *skb;
  315. /* We run in a bh and fight against start_xmit, which
  316. * is called with bh disabled */
  317. spin_lock(&priv->tx_lock);
  318. desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
  319. if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
  320. spin_unlock(&priv->tx_lock);
  321. break;
  322. }
  323. /* ensure other field of the descriptor were not read
  324. * before we checked ownership */
  325. rmb();
  326. skb = priv->tx_skb[priv->tx_dirty_desc];
  327. priv->tx_skb[priv->tx_dirty_desc] = NULL;
  328. dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
  329. DMA_TO_DEVICE);
  330. priv->tx_dirty_desc++;
  331. if (priv->tx_dirty_desc == priv->tx_ring_size)
  332. priv->tx_dirty_desc = 0;
  333. priv->tx_desc_count++;
  334. spin_unlock(&priv->tx_lock);
  335. if (desc->len_stat & DMADESC_UNDER_MASK)
  336. dev->stats.tx_errors++;
  337. dev_kfree_skb(skb);
  338. released++;
  339. }
  340. if (netif_queue_stopped(dev) && released)
  341. netif_wake_queue(dev);
  342. return released;
  343. }
  344. /*
  345. * poll func, called by network core
  346. */
  347. static int bcm_enet_poll(struct napi_struct *napi, int budget)
  348. {
  349. struct bcm_enet_priv *priv;
  350. struct net_device *dev;
  351. int tx_work_done, rx_work_done;
  352. priv = container_of(napi, struct bcm_enet_priv, napi);
  353. dev = priv->net_dev;
  354. /* ack interrupts */
  355. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  356. ENETDMA_IR_REG(priv->rx_chan));
  357. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  358. ENETDMA_IR_REG(priv->tx_chan));
  359. /* reclaim sent skb */
  360. tx_work_done = bcm_enet_tx_reclaim(dev, 0);
  361. spin_lock(&priv->rx_lock);
  362. rx_work_done = bcm_enet_receive_queue(dev, budget);
  363. spin_unlock(&priv->rx_lock);
  364. if (rx_work_done >= budget || tx_work_done > 0) {
  365. /* rx/tx queue is not yet empty/clean */
  366. return rx_work_done;
  367. }
  368. /* no more packet in rx/tx queue, remove device from poll
  369. * queue */
  370. napi_complete(napi);
  371. /* restore rx/tx interrupt */
  372. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  373. ENETDMA_IRMASK_REG(priv->rx_chan));
  374. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  375. ENETDMA_IRMASK_REG(priv->tx_chan));
  376. return rx_work_done;
  377. }
  378. /*
  379. * mac interrupt handler
  380. */
  381. static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
  382. {
  383. struct net_device *dev;
  384. struct bcm_enet_priv *priv;
  385. u32 stat;
  386. dev = dev_id;
  387. priv = netdev_priv(dev);
  388. stat = enet_readl(priv, ENET_IR_REG);
  389. if (!(stat & ENET_IR_MIB))
  390. return IRQ_NONE;
  391. /* clear & mask interrupt */
  392. enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
  393. enet_writel(priv, 0, ENET_IRMASK_REG);
  394. /* read mib registers in workqueue */
  395. schedule_work(&priv->mib_update_task);
  396. return IRQ_HANDLED;
  397. }
  398. /*
  399. * rx/tx dma interrupt handler
  400. */
  401. static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
  402. {
  403. struct net_device *dev;
  404. struct bcm_enet_priv *priv;
  405. dev = dev_id;
  406. priv = netdev_priv(dev);
  407. /* mask rx/tx interrupts */
  408. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
  409. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
  410. napi_schedule(&priv->napi);
  411. return IRQ_HANDLED;
  412. }
  413. /*
  414. * tx request callback
  415. */
  416. static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  417. {
  418. struct bcm_enet_priv *priv;
  419. struct bcm_enet_desc *desc;
  420. u32 len_stat;
  421. int ret;
  422. priv = netdev_priv(dev);
  423. /* lock against tx reclaim */
  424. spin_lock(&priv->tx_lock);
  425. /* make sure the tx hw queue is not full, should not happen
  426. * since we stop queue before it's the case */
  427. if (unlikely(!priv->tx_desc_count)) {
  428. netif_stop_queue(dev);
  429. dev_err(&priv->pdev->dev, "xmit called with no tx desc "
  430. "available?\n");
  431. ret = NETDEV_TX_BUSY;
  432. goto out_unlock;
  433. }
  434. /* point to the next available desc */
  435. desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
  436. priv->tx_skb[priv->tx_curr_desc] = skb;
  437. /* fill descriptor */
  438. desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
  439. DMA_TO_DEVICE);
  440. len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
  441. len_stat |= DMADESC_ESOP_MASK |
  442. DMADESC_APPEND_CRC |
  443. DMADESC_OWNER_MASK;
  444. priv->tx_curr_desc++;
  445. if (priv->tx_curr_desc == priv->tx_ring_size) {
  446. priv->tx_curr_desc = 0;
  447. len_stat |= DMADESC_WRAP_MASK;
  448. }
  449. priv->tx_desc_count--;
  450. /* dma might be already polling, make sure we update desc
  451. * fields in correct order */
  452. wmb();
  453. desc->len_stat = len_stat;
  454. wmb();
  455. /* kick tx dma */
  456. enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
  457. ENETDMA_CHANCFG_REG(priv->tx_chan));
  458. /* stop queue if no more desc available */
  459. if (!priv->tx_desc_count)
  460. netif_stop_queue(dev);
  461. dev->stats.tx_bytes += skb->len;
  462. dev->stats.tx_packets++;
  463. ret = NETDEV_TX_OK;
  464. out_unlock:
  465. spin_unlock(&priv->tx_lock);
  466. return ret;
  467. }
  468. /*
  469. * Change the interface's mac address.
  470. */
  471. static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
  472. {
  473. struct bcm_enet_priv *priv;
  474. struct sockaddr *addr = p;
  475. u32 val;
  476. priv = netdev_priv(dev);
  477. memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
  478. /* use perfect match register 0 to store my mac address */
  479. val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
  480. (dev->dev_addr[4] << 8) | dev->dev_addr[5];
  481. enet_writel(priv, val, ENET_PML_REG(0));
  482. val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
  483. val |= ENET_PMH_DATAVALID_MASK;
  484. enet_writel(priv, val, ENET_PMH_REG(0));
  485. return 0;
  486. }
  487. /*
  488. * Change rx mode (promiscuous/allmulti) and update multicast list
  489. */
  490. static void bcm_enet_set_multicast_list(struct net_device *dev)
  491. {
  492. struct bcm_enet_priv *priv;
  493. struct netdev_hw_addr *ha;
  494. u32 val;
  495. int i;
  496. priv = netdev_priv(dev);
  497. val = enet_readl(priv, ENET_RXCFG_REG);
  498. if (dev->flags & IFF_PROMISC)
  499. val |= ENET_RXCFG_PROMISC_MASK;
  500. else
  501. val &= ~ENET_RXCFG_PROMISC_MASK;
  502. /* only 3 perfect match registers left, first one is used for
  503. * own mac address */
  504. if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
  505. val |= ENET_RXCFG_ALLMCAST_MASK;
  506. else
  507. val &= ~ENET_RXCFG_ALLMCAST_MASK;
  508. /* no need to set perfect match registers if we catch all
  509. * multicast */
  510. if (val & ENET_RXCFG_ALLMCAST_MASK) {
  511. enet_writel(priv, val, ENET_RXCFG_REG);
  512. return;
  513. }
  514. i = 0;
  515. netdev_for_each_mc_addr(ha, dev) {
  516. u8 *dmi_addr;
  517. u32 tmp;
  518. if (i == 3)
  519. break;
  520. /* update perfect match registers */
  521. dmi_addr = ha->addr;
  522. tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
  523. (dmi_addr[4] << 8) | dmi_addr[5];
  524. enet_writel(priv, tmp, ENET_PML_REG(i + 1));
  525. tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
  526. tmp |= ENET_PMH_DATAVALID_MASK;
  527. enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
  528. }
  529. for (; i < 3; i++) {
  530. enet_writel(priv, 0, ENET_PML_REG(i + 1));
  531. enet_writel(priv, 0, ENET_PMH_REG(i + 1));
  532. }
  533. enet_writel(priv, val, ENET_RXCFG_REG);
  534. }
  535. /*
  536. * set mac duplex parameters
  537. */
  538. static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
  539. {
  540. u32 val;
  541. val = enet_readl(priv, ENET_TXCTL_REG);
  542. if (fullduplex)
  543. val |= ENET_TXCTL_FD_MASK;
  544. else
  545. val &= ~ENET_TXCTL_FD_MASK;
  546. enet_writel(priv, val, ENET_TXCTL_REG);
  547. }
  548. /*
  549. * set mac flow control parameters
  550. */
  551. static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
  552. {
  553. u32 val;
  554. /* rx flow control (pause frame handling) */
  555. val = enet_readl(priv, ENET_RXCFG_REG);
  556. if (rx_en)
  557. val |= ENET_RXCFG_ENFLOW_MASK;
  558. else
  559. val &= ~ENET_RXCFG_ENFLOW_MASK;
  560. enet_writel(priv, val, ENET_RXCFG_REG);
  561. /* tx flow control (pause frame generation) */
  562. val = enet_dma_readl(priv, ENETDMA_CFG_REG);
  563. if (tx_en)
  564. val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
  565. else
  566. val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
  567. enet_dma_writel(priv, val, ENETDMA_CFG_REG);
  568. }
  569. /*
  570. * link changed callback (from phylib)
  571. */
  572. static void bcm_enet_adjust_phy_link(struct net_device *dev)
  573. {
  574. struct bcm_enet_priv *priv;
  575. struct phy_device *phydev;
  576. int status_changed;
  577. priv = netdev_priv(dev);
  578. phydev = priv->phydev;
  579. status_changed = 0;
  580. if (priv->old_link != phydev->link) {
  581. status_changed = 1;
  582. priv->old_link = phydev->link;
  583. }
  584. /* reflect duplex change in mac configuration */
  585. if (phydev->link && phydev->duplex != priv->old_duplex) {
  586. bcm_enet_set_duplex(priv,
  587. (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
  588. status_changed = 1;
  589. priv->old_duplex = phydev->duplex;
  590. }
  591. /* enable flow control if remote advertise it (trust phylib to
  592. * check that duplex is full */
  593. if (phydev->link && phydev->pause != priv->old_pause) {
  594. int rx_pause_en, tx_pause_en;
  595. if (phydev->pause) {
  596. /* pause was advertised by lpa and us */
  597. rx_pause_en = 1;
  598. tx_pause_en = 1;
  599. } else if (!priv->pause_auto) {
  600. /* pause setting overrided by user */
  601. rx_pause_en = priv->pause_rx;
  602. tx_pause_en = priv->pause_tx;
  603. } else {
  604. rx_pause_en = 0;
  605. tx_pause_en = 0;
  606. }
  607. bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
  608. status_changed = 1;
  609. priv->old_pause = phydev->pause;
  610. }
  611. if (status_changed) {
  612. pr_info("%s: link %s", dev->name, phydev->link ?
  613. "UP" : "DOWN");
  614. if (phydev->link)
  615. pr_cont(" - %d/%s - flow control %s", phydev->speed,
  616. DUPLEX_FULL == phydev->duplex ? "full" : "half",
  617. phydev->pause == 1 ? "rx&tx" : "off");
  618. pr_cont("\n");
  619. }
  620. }
  621. /*
  622. * link changed callback (if phylib is not used)
  623. */
  624. static void bcm_enet_adjust_link(struct net_device *dev)
  625. {
  626. struct bcm_enet_priv *priv;
  627. priv = netdev_priv(dev);
  628. bcm_enet_set_duplex(priv, priv->force_duplex_full);
  629. bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
  630. netif_carrier_on(dev);
  631. pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
  632. dev->name,
  633. priv->force_speed_100 ? 100 : 10,
  634. priv->force_duplex_full ? "full" : "half",
  635. priv->pause_rx ? "rx" : "off",
  636. priv->pause_tx ? "tx" : "off");
  637. }
  638. /*
  639. * open callback, allocate dma rings & buffers and start rx operation
  640. */
  641. static int bcm_enet_open(struct net_device *dev)
  642. {
  643. struct bcm_enet_priv *priv;
  644. struct sockaddr addr;
  645. struct device *kdev;
  646. struct phy_device *phydev;
  647. int i, ret;
  648. unsigned int size;
  649. char phy_id[MII_BUS_ID_SIZE + 3];
  650. void *p;
  651. u32 val;
  652. priv = netdev_priv(dev);
  653. kdev = &priv->pdev->dev;
  654. if (priv->has_phy) {
  655. /* connect to PHY */
  656. snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
  657. priv->mac_id ? "1" : "0", priv->phy_id);
  658. phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
  659. PHY_INTERFACE_MODE_MII);
  660. if (IS_ERR(phydev)) {
  661. dev_err(kdev, "could not attach to PHY\n");
  662. return PTR_ERR(phydev);
  663. }
  664. /* mask with MAC supported features */
  665. phydev->supported &= (SUPPORTED_10baseT_Half |
  666. SUPPORTED_10baseT_Full |
  667. SUPPORTED_100baseT_Half |
  668. SUPPORTED_100baseT_Full |
  669. SUPPORTED_Autoneg |
  670. SUPPORTED_Pause |
  671. SUPPORTED_MII);
  672. phydev->advertising = phydev->supported;
  673. if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
  674. phydev->advertising |= SUPPORTED_Pause;
  675. else
  676. phydev->advertising &= ~SUPPORTED_Pause;
  677. dev_info(kdev, "attached PHY at address %d [%s]\n",
  678. phydev->addr, phydev->drv->name);
  679. priv->old_link = 0;
  680. priv->old_duplex = -1;
  681. priv->old_pause = -1;
  682. priv->phydev = phydev;
  683. }
  684. /* mask all interrupts and request them */
  685. enet_writel(priv, 0, ENET_IRMASK_REG);
  686. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
  687. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
  688. ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
  689. if (ret)
  690. goto out_phy_disconnect;
  691. ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
  692. dev->name, dev);
  693. if (ret)
  694. goto out_freeirq;
  695. ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
  696. IRQF_DISABLED, dev->name, dev);
  697. if (ret)
  698. goto out_freeirq_rx;
  699. /* initialize perfect match registers */
  700. for (i = 0; i < 4; i++) {
  701. enet_writel(priv, 0, ENET_PML_REG(i));
  702. enet_writel(priv, 0, ENET_PMH_REG(i));
  703. }
  704. /* write device mac address */
  705. memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
  706. bcm_enet_set_mac_address(dev, &addr);
  707. /* allocate rx dma ring */
  708. size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
  709. p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
  710. if (!p) {
  711. dev_err(kdev, "cannot allocate rx ring %u\n", size);
  712. ret = -ENOMEM;
  713. goto out_freeirq_tx;
  714. }
  715. memset(p, 0, size);
  716. priv->rx_desc_alloc_size = size;
  717. priv->rx_desc_cpu = p;
  718. /* allocate tx dma ring */
  719. size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
  720. p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
  721. if (!p) {
  722. dev_err(kdev, "cannot allocate tx ring\n");
  723. ret = -ENOMEM;
  724. goto out_free_rx_ring;
  725. }
  726. memset(p, 0, size);
  727. priv->tx_desc_alloc_size = size;
  728. priv->tx_desc_cpu = p;
  729. priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
  730. GFP_KERNEL);
  731. if (!priv->tx_skb) {
  732. dev_err(kdev, "cannot allocate rx skb queue\n");
  733. ret = -ENOMEM;
  734. goto out_free_tx_ring;
  735. }
  736. priv->tx_desc_count = priv->tx_ring_size;
  737. priv->tx_dirty_desc = 0;
  738. priv->tx_curr_desc = 0;
  739. spin_lock_init(&priv->tx_lock);
  740. /* init & fill rx ring with skbs */
  741. priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
  742. GFP_KERNEL);
  743. if (!priv->rx_skb) {
  744. dev_err(kdev, "cannot allocate rx skb queue\n");
  745. ret = -ENOMEM;
  746. goto out_free_tx_skb;
  747. }
  748. priv->rx_desc_count = 0;
  749. priv->rx_dirty_desc = 0;
  750. priv->rx_curr_desc = 0;
  751. /* initialize flow control buffer allocation */
  752. enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
  753. ENETDMA_BUFALLOC_REG(priv->rx_chan));
  754. if (bcm_enet_refill_rx(dev)) {
  755. dev_err(kdev, "cannot allocate rx skb queue\n");
  756. ret = -ENOMEM;
  757. goto out;
  758. }
  759. /* write rx & tx ring addresses */
  760. enet_dma_writel(priv, priv->rx_desc_dma,
  761. ENETDMA_RSTART_REG(priv->rx_chan));
  762. enet_dma_writel(priv, priv->tx_desc_dma,
  763. ENETDMA_RSTART_REG(priv->tx_chan));
  764. /* clear remaining state ram for rx & tx channel */
  765. enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
  766. enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
  767. enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
  768. enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
  769. enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
  770. enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
  771. /* set max rx/tx length */
  772. enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
  773. enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
  774. /* set dma maximum burst len */
  775. enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
  776. ENETDMA_MAXBURST_REG(priv->rx_chan));
  777. enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
  778. ENETDMA_MAXBURST_REG(priv->tx_chan));
  779. /* set correct transmit fifo watermark */
  780. enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
  781. /* set flow control low/high threshold to 1/3 / 2/3 */
  782. val = priv->rx_ring_size / 3;
  783. enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
  784. val = (priv->rx_ring_size * 2) / 3;
  785. enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
  786. /* all set, enable mac and interrupts, start dma engine and
  787. * kick rx dma channel */
  788. wmb();
  789. val = enet_readl(priv, ENET_CTL_REG);
  790. val |= ENET_CTL_ENABLE_MASK;
  791. enet_writel(priv, val, ENET_CTL_REG);
  792. enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
  793. enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
  794. ENETDMA_CHANCFG_REG(priv->rx_chan));
  795. /* watch "mib counters about to overflow" interrupt */
  796. enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
  797. enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
  798. /* watch "packet transferred" interrupt in rx and tx */
  799. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  800. ENETDMA_IR_REG(priv->rx_chan));
  801. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  802. ENETDMA_IR_REG(priv->tx_chan));
  803. /* make sure we enable napi before rx interrupt */
  804. napi_enable(&priv->napi);
  805. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  806. ENETDMA_IRMASK_REG(priv->rx_chan));
  807. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  808. ENETDMA_IRMASK_REG(priv->tx_chan));
  809. if (priv->has_phy)
  810. phy_start(priv->phydev);
  811. else
  812. bcm_enet_adjust_link(dev);
  813. netif_start_queue(dev);
  814. return 0;
  815. out:
  816. for (i = 0; i < priv->rx_ring_size; i++) {
  817. struct bcm_enet_desc *desc;
  818. if (!priv->rx_skb[i])
  819. continue;
  820. desc = &priv->rx_desc_cpu[i];
  821. dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
  822. DMA_FROM_DEVICE);
  823. kfree_skb(priv->rx_skb[i]);
  824. }
  825. kfree(priv->rx_skb);
  826. out_free_tx_skb:
  827. kfree(priv->tx_skb);
  828. out_free_tx_ring:
  829. dma_free_coherent(kdev, priv->tx_desc_alloc_size,
  830. priv->tx_desc_cpu, priv->tx_desc_dma);
  831. out_free_rx_ring:
  832. dma_free_coherent(kdev, priv->rx_desc_alloc_size,
  833. priv->rx_desc_cpu, priv->rx_desc_dma);
  834. out_freeirq_tx:
  835. free_irq(priv->irq_tx, dev);
  836. out_freeirq_rx:
  837. free_irq(priv->irq_rx, dev);
  838. out_freeirq:
  839. free_irq(dev->irq, dev);
  840. out_phy_disconnect:
  841. phy_disconnect(priv->phydev);
  842. return ret;
  843. }
  844. /*
  845. * disable mac
  846. */
  847. static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
  848. {
  849. int limit;
  850. u32 val;
  851. val = enet_readl(priv, ENET_CTL_REG);
  852. val |= ENET_CTL_DISABLE_MASK;
  853. enet_writel(priv, val, ENET_CTL_REG);
  854. limit = 1000;
  855. do {
  856. u32 val;
  857. val = enet_readl(priv, ENET_CTL_REG);
  858. if (!(val & ENET_CTL_DISABLE_MASK))
  859. break;
  860. udelay(1);
  861. } while (limit--);
  862. }
  863. /*
  864. * disable dma in given channel
  865. */
  866. static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
  867. {
  868. int limit;
  869. enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
  870. limit = 1000;
  871. do {
  872. u32 val;
  873. val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
  874. if (!(val & ENETDMA_CHANCFG_EN_MASK))
  875. break;
  876. udelay(1);
  877. } while (limit--);
  878. }
  879. /*
  880. * stop callback
  881. */
  882. static int bcm_enet_stop(struct net_device *dev)
  883. {
  884. struct bcm_enet_priv *priv;
  885. struct device *kdev;
  886. int i;
  887. priv = netdev_priv(dev);
  888. kdev = &priv->pdev->dev;
  889. netif_stop_queue(dev);
  890. napi_disable(&priv->napi);
  891. if (priv->has_phy)
  892. phy_stop(priv->phydev);
  893. del_timer_sync(&priv->rx_timeout);
  894. /* mask all interrupts */
  895. enet_writel(priv, 0, ENET_IRMASK_REG);
  896. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
  897. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
  898. /* make sure no mib update is scheduled */
  899. cancel_work_sync(&priv->mib_update_task);
  900. /* disable dma & mac */
  901. bcm_enet_disable_dma(priv, priv->tx_chan);
  902. bcm_enet_disable_dma(priv, priv->rx_chan);
  903. bcm_enet_disable_mac(priv);
  904. /* force reclaim of all tx buffers */
  905. bcm_enet_tx_reclaim(dev, 1);
  906. /* free the rx skb ring */
  907. for (i = 0; i < priv->rx_ring_size; i++) {
  908. struct bcm_enet_desc *desc;
  909. if (!priv->rx_skb[i])
  910. continue;
  911. desc = &priv->rx_desc_cpu[i];
  912. dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
  913. DMA_FROM_DEVICE);
  914. kfree_skb(priv->rx_skb[i]);
  915. }
  916. /* free remaining allocated memory */
  917. kfree(priv->rx_skb);
  918. kfree(priv->tx_skb);
  919. dma_free_coherent(kdev, priv->rx_desc_alloc_size,
  920. priv->rx_desc_cpu, priv->rx_desc_dma);
  921. dma_free_coherent(kdev, priv->tx_desc_alloc_size,
  922. priv->tx_desc_cpu, priv->tx_desc_dma);
  923. free_irq(priv->irq_tx, dev);
  924. free_irq(priv->irq_rx, dev);
  925. free_irq(dev->irq, dev);
  926. /* release phy */
  927. if (priv->has_phy) {
  928. phy_disconnect(priv->phydev);
  929. priv->phydev = NULL;
  930. }
  931. return 0;
  932. }
  933. /*
  934. * ethtool callbacks
  935. */
  936. struct bcm_enet_stats {
  937. char stat_string[ETH_GSTRING_LEN];
  938. int sizeof_stat;
  939. int stat_offset;
  940. int mib_reg;
  941. };
  942. #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
  943. offsetof(struct bcm_enet_priv, m)
  944. #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
  945. offsetof(struct net_device_stats, m)
  946. static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
  947. { "rx_packets", DEV_STAT(rx_packets), -1 },
  948. { "tx_packets", DEV_STAT(tx_packets), -1 },
  949. { "rx_bytes", DEV_STAT(rx_bytes), -1 },
  950. { "tx_bytes", DEV_STAT(tx_bytes), -1 },
  951. { "rx_errors", DEV_STAT(rx_errors), -1 },
  952. { "tx_errors", DEV_STAT(tx_errors), -1 },
  953. { "rx_dropped", DEV_STAT(rx_dropped), -1 },
  954. { "tx_dropped", DEV_STAT(tx_dropped), -1 },
  955. { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
  956. { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
  957. { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
  958. { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
  959. { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
  960. { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
  961. { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
  962. { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
  963. { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
  964. { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
  965. { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
  966. { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
  967. { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
  968. { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
  969. { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
  970. { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
  971. { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
  972. { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
  973. { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
  974. { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
  975. { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
  976. { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
  977. { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
  978. { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
  979. { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
  980. { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
  981. { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
  982. { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
  983. { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
  984. { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
  985. { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
  986. { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
  987. { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
  988. { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
  989. { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
  990. { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
  991. { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
  992. { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
  993. { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
  994. { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
  995. { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
  996. { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
  997. { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
  998. };
  999. #define BCM_ENET_STATS_LEN \
  1000. (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
  1001. static const u32 unused_mib_regs[] = {
  1002. ETH_MIB_TX_ALL_OCTETS,
  1003. ETH_MIB_TX_ALL_PKTS,
  1004. ETH_MIB_RX_ALL_OCTETS,
  1005. ETH_MIB_RX_ALL_PKTS,
  1006. };
  1007. static void bcm_enet_get_drvinfo(struct net_device *netdev,
  1008. struct ethtool_drvinfo *drvinfo)
  1009. {
  1010. strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
  1011. strncpy(drvinfo->version, bcm_enet_driver_version, 32);
  1012. strncpy(drvinfo->fw_version, "N/A", 32);
  1013. strncpy(drvinfo->bus_info, "bcm63xx", 32);
  1014. drvinfo->n_stats = BCM_ENET_STATS_LEN;
  1015. }
  1016. static int bcm_enet_get_sset_count(struct net_device *netdev,
  1017. int string_set)
  1018. {
  1019. switch (string_set) {
  1020. case ETH_SS_STATS:
  1021. return BCM_ENET_STATS_LEN;
  1022. default:
  1023. return -EINVAL;
  1024. }
  1025. }
  1026. static void bcm_enet_get_strings(struct net_device *netdev,
  1027. u32 stringset, u8 *data)
  1028. {
  1029. int i;
  1030. switch (stringset) {
  1031. case ETH_SS_STATS:
  1032. for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
  1033. memcpy(data + i * ETH_GSTRING_LEN,
  1034. bcm_enet_gstrings_stats[i].stat_string,
  1035. ETH_GSTRING_LEN);
  1036. }
  1037. break;
  1038. }
  1039. }
  1040. static void update_mib_counters(struct bcm_enet_priv *priv)
  1041. {
  1042. int i;
  1043. for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
  1044. const struct bcm_enet_stats *s;
  1045. u32 val;
  1046. char *p;
  1047. s = &bcm_enet_gstrings_stats[i];
  1048. if (s->mib_reg == -1)
  1049. continue;
  1050. val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
  1051. p = (char *)priv + s->stat_offset;
  1052. if (s->sizeof_stat == sizeof(u64))
  1053. *(u64 *)p += val;
  1054. else
  1055. *(u32 *)p += val;
  1056. }
  1057. /* also empty unused mib counters to make sure mib counter
  1058. * overflow interrupt is cleared */
  1059. for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
  1060. (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
  1061. }
  1062. static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
  1063. {
  1064. struct bcm_enet_priv *priv;
  1065. priv = container_of(t, struct bcm_enet_priv, mib_update_task);
  1066. mutex_lock(&priv->mib_update_lock);
  1067. update_mib_counters(priv);
  1068. mutex_unlock(&priv->mib_update_lock);
  1069. /* reenable mib interrupt */
  1070. if (netif_running(priv->net_dev))
  1071. enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
  1072. }
  1073. static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
  1074. struct ethtool_stats *stats,
  1075. u64 *data)
  1076. {
  1077. struct bcm_enet_priv *priv;
  1078. int i;
  1079. priv = netdev_priv(netdev);
  1080. mutex_lock(&priv->mib_update_lock);
  1081. update_mib_counters(priv);
  1082. for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
  1083. const struct bcm_enet_stats *s;
  1084. char *p;
  1085. s = &bcm_enet_gstrings_stats[i];
  1086. if (s->mib_reg == -1)
  1087. p = (char *)&netdev->stats;
  1088. else
  1089. p = (char *)priv;
  1090. p += s->stat_offset;
  1091. data[i] = (s->sizeof_stat == sizeof(u64)) ?
  1092. *(u64 *)p : *(u32 *)p;
  1093. }
  1094. mutex_unlock(&priv->mib_update_lock);
  1095. }
  1096. static int bcm_enet_get_settings(struct net_device *dev,
  1097. struct ethtool_cmd *cmd)
  1098. {
  1099. struct bcm_enet_priv *priv;
  1100. priv = netdev_priv(dev);
  1101. cmd->maxrxpkt = 0;
  1102. cmd->maxtxpkt = 0;
  1103. if (priv->has_phy) {
  1104. if (!priv->phydev)
  1105. return -ENODEV;
  1106. return phy_ethtool_gset(priv->phydev, cmd);
  1107. } else {
  1108. cmd->autoneg = 0;
  1109. ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
  1110. ? SPEED_100 : SPEED_10));
  1111. cmd->duplex = (priv->force_duplex_full) ?
  1112. DUPLEX_FULL : DUPLEX_HALF;
  1113. cmd->supported = ADVERTISED_10baseT_Half |
  1114. ADVERTISED_10baseT_Full |
  1115. ADVERTISED_100baseT_Half |
  1116. ADVERTISED_100baseT_Full;
  1117. cmd->advertising = 0;
  1118. cmd->port = PORT_MII;
  1119. cmd->transceiver = XCVR_EXTERNAL;
  1120. }
  1121. return 0;
  1122. }
  1123. static int bcm_enet_set_settings(struct net_device *dev,
  1124. struct ethtool_cmd *cmd)
  1125. {
  1126. struct bcm_enet_priv *priv;
  1127. priv = netdev_priv(dev);
  1128. if (priv->has_phy) {
  1129. if (!priv->phydev)
  1130. return -ENODEV;
  1131. return phy_ethtool_sset(priv->phydev, cmd);
  1132. } else {
  1133. if (cmd->autoneg ||
  1134. (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
  1135. cmd->port != PORT_MII)
  1136. return -EINVAL;
  1137. priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
  1138. priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
  1139. if (netif_running(dev))
  1140. bcm_enet_adjust_link(dev);
  1141. return 0;
  1142. }
  1143. }
  1144. static void bcm_enet_get_ringparam(struct net_device *dev,
  1145. struct ethtool_ringparam *ering)
  1146. {
  1147. struct bcm_enet_priv *priv;
  1148. priv = netdev_priv(dev);
  1149. /* rx/tx ring is actually only limited by memory */
  1150. ering->rx_max_pending = 8192;
  1151. ering->tx_max_pending = 8192;
  1152. ering->rx_mini_max_pending = 0;
  1153. ering->rx_jumbo_max_pending = 0;
  1154. ering->rx_pending = priv->rx_ring_size;
  1155. ering->tx_pending = priv->tx_ring_size;
  1156. }
  1157. static int bcm_enet_set_ringparam(struct net_device *dev,
  1158. struct ethtool_ringparam *ering)
  1159. {
  1160. struct bcm_enet_priv *priv;
  1161. int was_running;
  1162. priv = netdev_priv(dev);
  1163. was_running = 0;
  1164. if (netif_running(dev)) {
  1165. bcm_enet_stop(dev);
  1166. was_running = 1;
  1167. }
  1168. priv->rx_ring_size = ering->rx_pending;
  1169. priv->tx_ring_size = ering->tx_pending;
  1170. if (was_running) {
  1171. int err;
  1172. err = bcm_enet_open(dev);
  1173. if (err)
  1174. dev_close(dev);
  1175. else
  1176. bcm_enet_set_multicast_list(dev);
  1177. }
  1178. return 0;
  1179. }
  1180. static void bcm_enet_get_pauseparam(struct net_device *dev,
  1181. struct ethtool_pauseparam *ecmd)
  1182. {
  1183. struct bcm_enet_priv *priv;
  1184. priv = netdev_priv(dev);
  1185. ecmd->autoneg = priv->pause_auto;
  1186. ecmd->rx_pause = priv->pause_rx;
  1187. ecmd->tx_pause = priv->pause_tx;
  1188. }
  1189. static int bcm_enet_set_pauseparam(struct net_device *dev,
  1190. struct ethtool_pauseparam *ecmd)
  1191. {
  1192. struct bcm_enet_priv *priv;
  1193. priv = netdev_priv(dev);
  1194. if (priv->has_phy) {
  1195. if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
  1196. /* asymetric pause mode not supported,
  1197. * actually possible but integrated PHY has RO
  1198. * asym_pause bit */
  1199. return -EINVAL;
  1200. }
  1201. } else {
  1202. /* no pause autoneg on direct mii connection */
  1203. if (ecmd->autoneg)
  1204. return -EINVAL;
  1205. }
  1206. priv->pause_auto = ecmd->autoneg;
  1207. priv->pause_rx = ecmd->rx_pause;
  1208. priv->pause_tx = ecmd->tx_pause;
  1209. return 0;
  1210. }
  1211. static struct ethtool_ops bcm_enet_ethtool_ops = {
  1212. .get_strings = bcm_enet_get_strings,
  1213. .get_sset_count = bcm_enet_get_sset_count,
  1214. .get_ethtool_stats = bcm_enet_get_ethtool_stats,
  1215. .get_settings = bcm_enet_get_settings,
  1216. .set_settings = bcm_enet_set_settings,
  1217. .get_drvinfo = bcm_enet_get_drvinfo,
  1218. .get_link = ethtool_op_get_link,
  1219. .get_ringparam = bcm_enet_get_ringparam,
  1220. .set_ringparam = bcm_enet_set_ringparam,
  1221. .get_pauseparam = bcm_enet_get_pauseparam,
  1222. .set_pauseparam = bcm_enet_set_pauseparam,
  1223. };
  1224. static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1225. {
  1226. struct bcm_enet_priv *priv;
  1227. priv = netdev_priv(dev);
  1228. if (priv->has_phy) {
  1229. if (!priv->phydev)
  1230. return -ENODEV;
  1231. return phy_mii_ioctl(priv->phydev, rq, cmd);
  1232. } else {
  1233. struct mii_if_info mii;
  1234. mii.dev = dev;
  1235. mii.mdio_read = bcm_enet_mdio_read_mii;
  1236. mii.mdio_write = bcm_enet_mdio_write_mii;
  1237. mii.phy_id = 0;
  1238. mii.phy_id_mask = 0x3f;
  1239. mii.reg_num_mask = 0x1f;
  1240. return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
  1241. }
  1242. }
  1243. /*
  1244. * calculate actual hardware mtu
  1245. */
  1246. static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
  1247. {
  1248. int actual_mtu;
  1249. actual_mtu = mtu;
  1250. /* add ethernet header + vlan tag size */
  1251. actual_mtu += VLAN_ETH_HLEN;
  1252. if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
  1253. return -EINVAL;
  1254. /*
  1255. * setup maximum size before we get overflow mark in
  1256. * descriptor, note that this will not prevent reception of
  1257. * big frames, they will be split into multiple buffers
  1258. * anyway
  1259. */
  1260. priv->hw_mtu = actual_mtu;
  1261. /*
  1262. * align rx buffer size to dma burst len, account FCS since
  1263. * it's appended
  1264. */
  1265. priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
  1266. BCMENET_DMA_MAXBURST * 4);
  1267. return 0;
  1268. }
  1269. /*
  1270. * adjust mtu, can't be called while device is running
  1271. */
  1272. static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
  1273. {
  1274. int ret;
  1275. if (netif_running(dev))
  1276. return -EBUSY;
  1277. ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
  1278. if (ret)
  1279. return ret;
  1280. dev->mtu = new_mtu;
  1281. return 0;
  1282. }
  1283. /*
  1284. * preinit hardware to allow mii operation while device is down
  1285. */
  1286. static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
  1287. {
  1288. u32 val;
  1289. int limit;
  1290. /* make sure mac is disabled */
  1291. bcm_enet_disable_mac(priv);
  1292. /* soft reset mac */
  1293. val = ENET_CTL_SRESET_MASK;
  1294. enet_writel(priv, val, ENET_CTL_REG);
  1295. wmb();
  1296. limit = 1000;
  1297. do {
  1298. val = enet_readl(priv, ENET_CTL_REG);
  1299. if (!(val & ENET_CTL_SRESET_MASK))
  1300. break;
  1301. udelay(1);
  1302. } while (limit--);
  1303. /* select correct mii interface */
  1304. val = enet_readl(priv, ENET_CTL_REG);
  1305. if (priv->use_external_mii)
  1306. val |= ENET_CTL_EPHYSEL_MASK;
  1307. else
  1308. val &= ~ENET_CTL_EPHYSEL_MASK;
  1309. enet_writel(priv, val, ENET_CTL_REG);
  1310. /* turn on mdc clock */
  1311. enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
  1312. ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
  1313. /* set mib counters to self-clear when read */
  1314. val = enet_readl(priv, ENET_MIBCTL_REG);
  1315. val |= ENET_MIBCTL_RDCLEAR_MASK;
  1316. enet_writel(priv, val, ENET_MIBCTL_REG);
  1317. }
  1318. static const struct net_device_ops bcm_enet_ops = {
  1319. .ndo_open = bcm_enet_open,
  1320. .ndo_stop = bcm_enet_stop,
  1321. .ndo_start_xmit = bcm_enet_start_xmit,
  1322. .ndo_set_mac_address = bcm_enet_set_mac_address,
  1323. .ndo_set_multicast_list = bcm_enet_set_multicast_list,
  1324. .ndo_do_ioctl = bcm_enet_ioctl,
  1325. .ndo_change_mtu = bcm_enet_change_mtu,
  1326. #ifdef CONFIG_NET_POLL_CONTROLLER
  1327. .ndo_poll_controller = bcm_enet_netpoll,
  1328. #endif
  1329. };
  1330. /*
  1331. * allocate netdevice, request register memory and register device.
  1332. */
  1333. static int __devinit bcm_enet_probe(struct platform_device *pdev)
  1334. {
  1335. struct bcm_enet_priv *priv;
  1336. struct net_device *dev;
  1337. struct bcm63xx_enet_platform_data *pd;
  1338. struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
  1339. struct mii_bus *bus;
  1340. const char *clk_name;
  1341. unsigned int iomem_size;
  1342. int i, ret;
  1343. /* stop if shared driver failed, assume driver->probe will be
  1344. * called in the same order we register devices (correct ?) */
  1345. if (!bcm_enet_shared_base)
  1346. return -ENODEV;
  1347. res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1348. res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1349. res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
  1350. res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
  1351. if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
  1352. return -ENODEV;
  1353. ret = 0;
  1354. dev = alloc_etherdev(sizeof(*priv));
  1355. if (!dev)
  1356. return -ENOMEM;
  1357. priv = netdev_priv(dev);
  1358. ret = compute_hw_mtu(priv, dev->mtu);
  1359. if (ret)
  1360. goto out;
  1361. iomem_size = resource_size(res_mem);
  1362. if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
  1363. ret = -EBUSY;
  1364. goto out;
  1365. }
  1366. priv->base = ioremap(res_mem->start, iomem_size);
  1367. if (priv->base == NULL) {
  1368. ret = -ENOMEM;
  1369. goto out_release_mem;
  1370. }
  1371. dev->irq = priv->irq = res_irq->start;
  1372. priv->irq_rx = res_irq_rx->start;
  1373. priv->irq_tx = res_irq_tx->start;
  1374. priv->mac_id = pdev->id;
  1375. /* get rx & tx dma channel id for this mac */
  1376. if (priv->mac_id == 0) {
  1377. priv->rx_chan = 0;
  1378. priv->tx_chan = 1;
  1379. clk_name = "enet0";
  1380. } else {
  1381. priv->rx_chan = 2;
  1382. priv->tx_chan = 3;
  1383. clk_name = "enet1";
  1384. }
  1385. priv->mac_clk = clk_get(&pdev->dev, clk_name);
  1386. if (IS_ERR(priv->mac_clk)) {
  1387. ret = PTR_ERR(priv->mac_clk);
  1388. goto out_unmap;
  1389. }
  1390. clk_enable(priv->mac_clk);
  1391. /* initialize default and fetch platform data */
  1392. priv->rx_ring_size = BCMENET_DEF_RX_DESC;
  1393. priv->tx_ring_size = BCMENET_DEF_TX_DESC;
  1394. pd = pdev->dev.platform_data;
  1395. if (pd) {
  1396. memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
  1397. priv->has_phy = pd->has_phy;
  1398. priv->phy_id = pd->phy_id;
  1399. priv->has_phy_interrupt = pd->has_phy_interrupt;
  1400. priv->phy_interrupt = pd->phy_interrupt;
  1401. priv->use_external_mii = !pd->use_internal_phy;
  1402. priv->pause_auto = pd->pause_auto;
  1403. priv->pause_rx = pd->pause_rx;
  1404. priv->pause_tx = pd->pause_tx;
  1405. priv->force_duplex_full = pd->force_duplex_full;
  1406. priv->force_speed_100 = pd->force_speed_100;
  1407. }
  1408. if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
  1409. /* using internal PHY, enable clock */
  1410. priv->phy_clk = clk_get(&pdev->dev, "ephy");
  1411. if (IS_ERR(priv->phy_clk)) {
  1412. ret = PTR_ERR(priv->phy_clk);
  1413. priv->phy_clk = NULL;
  1414. goto out_put_clk_mac;
  1415. }
  1416. clk_enable(priv->phy_clk);
  1417. }
  1418. /* do minimal hardware init to be able to probe mii bus */
  1419. bcm_enet_hw_preinit(priv);
  1420. /* MII bus registration */
  1421. if (priv->has_phy) {
  1422. priv->mii_bus = mdiobus_alloc();
  1423. if (!priv->mii_bus) {
  1424. ret = -ENOMEM;
  1425. goto out_uninit_hw;
  1426. }
  1427. bus = priv->mii_bus;
  1428. bus->name = "bcm63xx_enet MII bus";
  1429. bus->parent = &pdev->dev;
  1430. bus->priv = priv;
  1431. bus->read = bcm_enet_mdio_read_phylib;
  1432. bus->write = bcm_enet_mdio_write_phylib;
  1433. sprintf(bus->id, "%d", priv->mac_id);
  1434. /* only probe bus where we think the PHY is, because
  1435. * the mdio read operation return 0 instead of 0xffff
  1436. * if a slave is not present on hw */
  1437. bus->phy_mask = ~(1 << priv->phy_id);
  1438. bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
  1439. if (!bus->irq) {
  1440. ret = -ENOMEM;
  1441. goto out_free_mdio;
  1442. }
  1443. if (priv->has_phy_interrupt)
  1444. bus->irq[priv->phy_id] = priv->phy_interrupt;
  1445. else
  1446. bus->irq[priv->phy_id] = PHY_POLL;
  1447. ret = mdiobus_register(bus);
  1448. if (ret) {
  1449. dev_err(&pdev->dev, "unable to register mdio bus\n");
  1450. goto out_free_mdio;
  1451. }
  1452. } else {
  1453. /* run platform code to initialize PHY device */
  1454. if (pd->mii_config &&
  1455. pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
  1456. bcm_enet_mdio_write_mii)) {
  1457. dev_err(&pdev->dev, "unable to configure mdio bus\n");
  1458. goto out_uninit_hw;
  1459. }
  1460. }
  1461. spin_lock_init(&priv->rx_lock);
  1462. /* init rx timeout (used for oom) */
  1463. init_timer(&priv->rx_timeout);
  1464. priv->rx_timeout.function = bcm_enet_refill_rx_timer;
  1465. priv->rx_timeout.data = (unsigned long)dev;
  1466. /* init the mib update lock&work */
  1467. mutex_init(&priv->mib_update_lock);
  1468. INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
  1469. /* zero mib counters */
  1470. for (i = 0; i < ENET_MIB_REG_COUNT; i++)
  1471. enet_writel(priv, 0, ENET_MIB_REG(i));
  1472. /* register netdevice */
  1473. dev->netdev_ops = &bcm_enet_ops;
  1474. netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
  1475. SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
  1476. SET_NETDEV_DEV(dev, &pdev->dev);
  1477. ret = register_netdev(dev);
  1478. if (ret)
  1479. goto out_unregister_mdio;
  1480. netif_carrier_off(dev);
  1481. platform_set_drvdata(pdev, dev);
  1482. priv->pdev = pdev;
  1483. priv->net_dev = dev;
  1484. return 0;
  1485. out_unregister_mdio:
  1486. if (priv->mii_bus) {
  1487. mdiobus_unregister(priv->mii_bus);
  1488. kfree(priv->mii_bus->irq);
  1489. }
  1490. out_free_mdio:
  1491. if (priv->mii_bus)
  1492. mdiobus_free(priv->mii_bus);
  1493. out_uninit_hw:
  1494. /* turn off mdc clock */
  1495. enet_writel(priv, 0, ENET_MIISC_REG);
  1496. if (priv->phy_clk) {
  1497. clk_disable(priv->phy_clk);
  1498. clk_put(priv->phy_clk);
  1499. }
  1500. out_put_clk_mac:
  1501. clk_disable(priv->mac_clk);
  1502. clk_put(priv->mac_clk);
  1503. out_unmap:
  1504. iounmap(priv->base);
  1505. out_release_mem:
  1506. release_mem_region(res_mem->start, iomem_size);
  1507. out:
  1508. free_netdev(dev);
  1509. return ret;
  1510. }
  1511. /*
  1512. * exit func, stops hardware and unregisters netdevice
  1513. */
  1514. static int __devexit bcm_enet_remove(struct platform_device *pdev)
  1515. {
  1516. struct bcm_enet_priv *priv;
  1517. struct net_device *dev;
  1518. struct resource *res;
  1519. /* stop netdevice */
  1520. dev = platform_get_drvdata(pdev);
  1521. priv = netdev_priv(dev);
  1522. unregister_netdev(dev);
  1523. /* turn off mdc clock */
  1524. enet_writel(priv, 0, ENET_MIISC_REG);
  1525. if (priv->has_phy) {
  1526. mdiobus_unregister(priv->mii_bus);
  1527. kfree(priv->mii_bus->irq);
  1528. mdiobus_free(priv->mii_bus);
  1529. } else {
  1530. struct bcm63xx_enet_platform_data *pd;
  1531. pd = pdev->dev.platform_data;
  1532. if (pd && pd->mii_config)
  1533. pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
  1534. bcm_enet_mdio_write_mii);
  1535. }
  1536. /* release device resources */
  1537. iounmap(priv->base);
  1538. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1539. release_mem_region(res->start, resource_size(res));
  1540. /* disable hw block clocks */
  1541. if (priv->phy_clk) {
  1542. clk_disable(priv->phy_clk);
  1543. clk_put(priv->phy_clk);
  1544. }
  1545. clk_disable(priv->mac_clk);
  1546. clk_put(priv->mac_clk);
  1547. platform_set_drvdata(pdev, NULL);
  1548. free_netdev(dev);
  1549. return 0;
  1550. }
  1551. struct platform_driver bcm63xx_enet_driver = {
  1552. .probe = bcm_enet_probe,
  1553. .remove = __devexit_p(bcm_enet_remove),
  1554. .driver = {
  1555. .name = "bcm63xx_enet",
  1556. .owner = THIS_MODULE,
  1557. },
  1558. };
  1559. /*
  1560. * reserve & remap memory space shared between all macs
  1561. */
  1562. static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
  1563. {
  1564. struct resource *res;
  1565. unsigned int iomem_size;
  1566. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1567. if (!res)
  1568. return -ENODEV;
  1569. iomem_size = resource_size(res);
  1570. if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
  1571. return -EBUSY;
  1572. bcm_enet_shared_base = ioremap(res->start, iomem_size);
  1573. if (!bcm_enet_shared_base) {
  1574. release_mem_region(res->start, iomem_size);
  1575. return -ENOMEM;
  1576. }
  1577. return 0;
  1578. }
  1579. static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
  1580. {
  1581. struct resource *res;
  1582. iounmap(bcm_enet_shared_base);
  1583. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1584. release_mem_region(res->start, resource_size(res));
  1585. return 0;
  1586. }
  1587. /*
  1588. * this "shared" driver is needed because both macs share a single
  1589. * address space
  1590. */
  1591. struct platform_driver bcm63xx_enet_shared_driver = {
  1592. .probe = bcm_enet_shared_probe,
  1593. .remove = __devexit_p(bcm_enet_shared_remove),
  1594. .driver = {
  1595. .name = "bcm63xx_enet_shared",
  1596. .owner = THIS_MODULE,
  1597. },
  1598. };
  1599. /*
  1600. * entry point
  1601. */
  1602. static int __init bcm_enet_init(void)
  1603. {
  1604. int ret;
  1605. ret = platform_driver_register(&bcm63xx_enet_shared_driver);
  1606. if (ret)
  1607. return ret;
  1608. ret = platform_driver_register(&bcm63xx_enet_driver);
  1609. if (ret)
  1610. platform_driver_unregister(&bcm63xx_enet_shared_driver);
  1611. return ret;
  1612. }
  1613. static void __exit bcm_enet_exit(void)
  1614. {
  1615. platform_driver_unregister(&bcm63xx_enet_driver);
  1616. platform_driver_unregister(&bcm63xx_enet_shared_driver);
  1617. }
  1618. module_init(bcm_enet_init);
  1619. module_exit(bcm_enet_exit);
  1620. MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
  1621. MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
  1622. MODULE_LICENSE("GPL");