bcm63xx_enet.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970
  1. /*
  2. * Driver for BCM963xx builtin Ethernet mac
  3. *
  4. * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/module.h>
  22. #include <linux/clk.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/delay.h>
  25. #include <linux/ethtool.h>
  26. #include <linux/crc32.h>
  27. #include <linux/err.h>
  28. #include <linux/dma-mapping.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/if_vlan.h>
  31. #include <bcm63xx_dev_enet.h>
  32. #include "bcm63xx_enet.h"
  33. static char bcm_enet_driver_name[] = "bcm63xx_enet";
  34. static char bcm_enet_driver_version[] = "1.0";
  35. static int copybreak __read_mostly = 128;
  36. module_param(copybreak, int, 0);
  37. MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  38. /* io memory shared between all devices */
  39. static void __iomem *bcm_enet_shared_base;
  40. /*
  41. * io helpers to access mac registers
  42. */
  43. static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
  44. {
  45. return bcm_readl(priv->base + off);
  46. }
  47. static inline void enet_writel(struct bcm_enet_priv *priv,
  48. u32 val, u32 off)
  49. {
  50. bcm_writel(val, priv->base + off);
  51. }
  52. /*
  53. * io helpers to access shared registers
  54. */
  55. static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
  56. {
  57. return bcm_readl(bcm_enet_shared_base + off);
  58. }
  59. static inline void enet_dma_writel(struct bcm_enet_priv *priv,
  60. u32 val, u32 off)
  61. {
  62. bcm_writel(val, bcm_enet_shared_base + off);
  63. }
  64. /*
  65. * write given data into mii register and wait for transfer to end
  66. * with timeout (average measured transfer time is 25us)
  67. */
  68. static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
  69. {
  70. int limit;
  71. /* make sure mii interrupt status is cleared */
  72. enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
  73. enet_writel(priv, data, ENET_MIIDATA_REG);
  74. wmb();
  75. /* busy wait on mii interrupt bit, with timeout */
  76. limit = 1000;
  77. do {
  78. if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
  79. break;
  80. udelay(1);
  81. } while (limit-- > 0);
  82. return (limit < 0) ? 1 : 0;
  83. }
  84. /*
  85. * MII internal read callback
  86. */
  87. static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
  88. int regnum)
  89. {
  90. u32 tmp, val;
  91. tmp = regnum << ENET_MIIDATA_REG_SHIFT;
  92. tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
  93. tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
  94. tmp |= ENET_MIIDATA_OP_READ_MASK;
  95. if (do_mdio_op(priv, tmp))
  96. return -1;
  97. val = enet_readl(priv, ENET_MIIDATA_REG);
  98. val &= 0xffff;
  99. return val;
  100. }
  101. /*
  102. * MII internal write callback
  103. */
  104. static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
  105. int regnum, u16 value)
  106. {
  107. u32 tmp;
  108. tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
  109. tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
  110. tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
  111. tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
  112. tmp |= ENET_MIIDATA_OP_WRITE_MASK;
  113. (void)do_mdio_op(priv, tmp);
  114. return 0;
  115. }
  116. /*
  117. * MII read callback from phylib
  118. */
  119. static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
  120. int regnum)
  121. {
  122. return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
  123. }
  124. /*
  125. * MII write callback from phylib
  126. */
  127. static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
  128. int regnum, u16 value)
  129. {
  130. return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
  131. }
  132. /*
  133. * MII read callback from mii core
  134. */
  135. static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
  136. int regnum)
  137. {
  138. return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
  139. }
  140. /*
  141. * MII write callback from mii core
  142. */
  143. static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
  144. int regnum, int value)
  145. {
  146. bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
  147. }
  148. /*
  149. * refill rx queue
  150. */
  151. static int bcm_enet_refill_rx(struct net_device *dev)
  152. {
  153. struct bcm_enet_priv *priv;
  154. priv = netdev_priv(dev);
  155. while (priv->rx_desc_count < priv->rx_ring_size) {
  156. struct bcm_enet_desc *desc;
  157. struct sk_buff *skb;
  158. dma_addr_t p;
  159. int desc_idx;
  160. u32 len_stat;
  161. desc_idx = priv->rx_dirty_desc;
  162. desc = &priv->rx_desc_cpu[desc_idx];
  163. if (!priv->rx_skb[desc_idx]) {
  164. skb = netdev_alloc_skb(dev, priv->rx_skb_size);
  165. if (!skb)
  166. break;
  167. priv->rx_skb[desc_idx] = skb;
  168. p = dma_map_single(&priv->pdev->dev, skb->data,
  169. priv->rx_skb_size,
  170. DMA_FROM_DEVICE);
  171. desc->address = p;
  172. }
  173. len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
  174. len_stat |= DMADESC_OWNER_MASK;
  175. if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
  176. len_stat |= DMADESC_WRAP_MASK;
  177. priv->rx_dirty_desc = 0;
  178. } else {
  179. priv->rx_dirty_desc++;
  180. }
  181. wmb();
  182. desc->len_stat = len_stat;
  183. priv->rx_desc_count++;
  184. /* tell dma engine we allocated one buffer */
  185. enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
  186. }
  187. /* If rx ring is still empty, set a timer to try allocating
  188. * again at a later time. */
  189. if (priv->rx_desc_count == 0 && netif_running(dev)) {
  190. dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
  191. priv->rx_timeout.expires = jiffies + HZ;
  192. add_timer(&priv->rx_timeout);
  193. }
  194. return 0;
  195. }
  196. /*
  197. * timer callback to defer refill rx queue in case we're OOM
  198. */
  199. static void bcm_enet_refill_rx_timer(unsigned long data)
  200. {
  201. struct net_device *dev;
  202. struct bcm_enet_priv *priv;
  203. dev = (struct net_device *)data;
  204. priv = netdev_priv(dev);
  205. spin_lock(&priv->rx_lock);
  206. bcm_enet_refill_rx((struct net_device *)data);
  207. spin_unlock(&priv->rx_lock);
  208. }
  209. /*
  210. * extract packet from rx queue
  211. */
  212. static int bcm_enet_receive_queue(struct net_device *dev, int budget)
  213. {
  214. struct bcm_enet_priv *priv;
  215. struct device *kdev;
  216. int processed;
  217. priv = netdev_priv(dev);
  218. kdev = &priv->pdev->dev;
  219. processed = 0;
  220. /* don't scan ring further than number of refilled
  221. * descriptor */
  222. if (budget > priv->rx_desc_count)
  223. budget = priv->rx_desc_count;
  224. do {
  225. struct bcm_enet_desc *desc;
  226. struct sk_buff *skb;
  227. int desc_idx;
  228. u32 len_stat;
  229. unsigned int len;
  230. desc_idx = priv->rx_curr_desc;
  231. desc = &priv->rx_desc_cpu[desc_idx];
  232. /* make sure we actually read the descriptor status at
  233. * each loop */
  234. rmb();
  235. len_stat = desc->len_stat;
  236. /* break if dma ownership belongs to hw */
  237. if (len_stat & DMADESC_OWNER_MASK)
  238. break;
  239. processed++;
  240. priv->rx_curr_desc++;
  241. if (priv->rx_curr_desc == priv->rx_ring_size)
  242. priv->rx_curr_desc = 0;
  243. priv->rx_desc_count--;
  244. /* if the packet does not have start of packet _and_
  245. * end of packet flag set, then just recycle it */
  246. if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
  247. priv->stats.rx_dropped++;
  248. continue;
  249. }
  250. /* recycle packet if it's marked as bad */
  251. if (unlikely(len_stat & DMADESC_ERR_MASK)) {
  252. priv->stats.rx_errors++;
  253. if (len_stat & DMADESC_OVSIZE_MASK)
  254. priv->stats.rx_length_errors++;
  255. if (len_stat & DMADESC_CRC_MASK)
  256. priv->stats.rx_crc_errors++;
  257. if (len_stat & DMADESC_UNDER_MASK)
  258. priv->stats.rx_frame_errors++;
  259. if (len_stat & DMADESC_OV_MASK)
  260. priv->stats.rx_fifo_errors++;
  261. continue;
  262. }
  263. /* valid packet */
  264. skb = priv->rx_skb[desc_idx];
  265. len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
  266. /* don't include FCS */
  267. len -= 4;
  268. if (len < copybreak) {
  269. struct sk_buff *nskb;
  270. nskb = netdev_alloc_skb_ip_align(dev, len);
  271. if (!nskb) {
  272. /* forget packet, just rearm desc */
  273. priv->stats.rx_dropped++;
  274. continue;
  275. }
  276. dma_sync_single_for_cpu(kdev, desc->address,
  277. len, DMA_FROM_DEVICE);
  278. memcpy(nskb->data, skb->data, len);
  279. dma_sync_single_for_device(kdev, desc->address,
  280. len, DMA_FROM_DEVICE);
  281. skb = nskb;
  282. } else {
  283. dma_unmap_single(&priv->pdev->dev, desc->address,
  284. priv->rx_skb_size, DMA_FROM_DEVICE);
  285. priv->rx_skb[desc_idx] = NULL;
  286. }
  287. skb_put(skb, len);
  288. skb->protocol = eth_type_trans(skb, dev);
  289. priv->stats.rx_packets++;
  290. priv->stats.rx_bytes += len;
  291. dev->last_rx = jiffies;
  292. netif_receive_skb(skb);
  293. } while (--budget > 0);
  294. if (processed || !priv->rx_desc_count) {
  295. bcm_enet_refill_rx(dev);
  296. /* kick rx dma */
  297. enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
  298. ENETDMA_CHANCFG_REG(priv->rx_chan));
  299. }
  300. return processed;
  301. }
  302. /*
  303. * try to or force reclaim of transmitted buffers
  304. */
  305. static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
  306. {
  307. struct bcm_enet_priv *priv;
  308. int released;
  309. priv = netdev_priv(dev);
  310. released = 0;
  311. while (priv->tx_desc_count < priv->tx_ring_size) {
  312. struct bcm_enet_desc *desc;
  313. struct sk_buff *skb;
  314. /* We run in a bh and fight against start_xmit, which
  315. * is called with bh disabled */
  316. spin_lock(&priv->tx_lock);
  317. desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
  318. if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
  319. spin_unlock(&priv->tx_lock);
  320. break;
  321. }
  322. /* ensure other field of the descriptor were not read
  323. * before we checked ownership */
  324. rmb();
  325. skb = priv->tx_skb[priv->tx_dirty_desc];
  326. priv->tx_skb[priv->tx_dirty_desc] = NULL;
  327. dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
  328. DMA_TO_DEVICE);
  329. priv->tx_dirty_desc++;
  330. if (priv->tx_dirty_desc == priv->tx_ring_size)
  331. priv->tx_dirty_desc = 0;
  332. priv->tx_desc_count++;
  333. spin_unlock(&priv->tx_lock);
  334. if (desc->len_stat & DMADESC_UNDER_MASK)
  335. priv->stats.tx_errors++;
  336. dev_kfree_skb(skb);
  337. released++;
  338. }
  339. if (netif_queue_stopped(dev) && released)
  340. netif_wake_queue(dev);
  341. return released;
  342. }
  343. /*
  344. * poll func, called by network core
  345. */
  346. static int bcm_enet_poll(struct napi_struct *napi, int budget)
  347. {
  348. struct bcm_enet_priv *priv;
  349. struct net_device *dev;
  350. int tx_work_done, rx_work_done;
  351. priv = container_of(napi, struct bcm_enet_priv, napi);
  352. dev = priv->net_dev;
  353. /* ack interrupts */
  354. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  355. ENETDMA_IR_REG(priv->rx_chan));
  356. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  357. ENETDMA_IR_REG(priv->tx_chan));
  358. /* reclaim sent skb */
  359. tx_work_done = bcm_enet_tx_reclaim(dev, 0);
  360. spin_lock(&priv->rx_lock);
  361. rx_work_done = bcm_enet_receive_queue(dev, budget);
  362. spin_unlock(&priv->rx_lock);
  363. if (rx_work_done >= budget || tx_work_done > 0) {
  364. /* rx/tx queue is not yet empty/clean */
  365. return rx_work_done;
  366. }
  367. /* no more packet in rx/tx queue, remove device from poll
  368. * queue */
  369. napi_complete(napi);
  370. /* restore rx/tx interrupt */
  371. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  372. ENETDMA_IRMASK_REG(priv->rx_chan));
  373. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  374. ENETDMA_IRMASK_REG(priv->tx_chan));
  375. return rx_work_done;
  376. }
  377. /*
  378. * mac interrupt handler
  379. */
  380. static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
  381. {
  382. struct net_device *dev;
  383. struct bcm_enet_priv *priv;
  384. u32 stat;
  385. dev = dev_id;
  386. priv = netdev_priv(dev);
  387. stat = enet_readl(priv, ENET_IR_REG);
  388. if (!(stat & ENET_IR_MIB))
  389. return IRQ_NONE;
  390. /* clear & mask interrupt */
  391. enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
  392. enet_writel(priv, 0, ENET_IRMASK_REG);
  393. /* read mib registers in workqueue */
  394. schedule_work(&priv->mib_update_task);
  395. return IRQ_HANDLED;
  396. }
  397. /*
  398. * rx/tx dma interrupt handler
  399. */
  400. static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
  401. {
  402. struct net_device *dev;
  403. struct bcm_enet_priv *priv;
  404. dev = dev_id;
  405. priv = netdev_priv(dev);
  406. /* mask rx/tx interrupts */
  407. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
  408. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
  409. napi_schedule(&priv->napi);
  410. return IRQ_HANDLED;
  411. }
  412. /*
  413. * tx request callback
  414. */
  415. static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  416. {
  417. struct bcm_enet_priv *priv;
  418. struct bcm_enet_desc *desc;
  419. u32 len_stat;
  420. int ret;
  421. priv = netdev_priv(dev);
  422. /* lock against tx reclaim */
  423. spin_lock(&priv->tx_lock);
  424. /* make sure the tx hw queue is not full, should not happen
  425. * since we stop queue before it's the case */
  426. if (unlikely(!priv->tx_desc_count)) {
  427. netif_stop_queue(dev);
  428. dev_err(&priv->pdev->dev, "xmit called with no tx desc "
  429. "available?\n");
  430. ret = NETDEV_TX_BUSY;
  431. goto out_unlock;
  432. }
  433. /* point to the next available desc */
  434. desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
  435. priv->tx_skb[priv->tx_curr_desc] = skb;
  436. /* fill descriptor */
  437. desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
  438. DMA_TO_DEVICE);
  439. len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
  440. len_stat |= DMADESC_ESOP_MASK |
  441. DMADESC_APPEND_CRC |
  442. DMADESC_OWNER_MASK;
  443. priv->tx_curr_desc++;
  444. if (priv->tx_curr_desc == priv->tx_ring_size) {
  445. priv->tx_curr_desc = 0;
  446. len_stat |= DMADESC_WRAP_MASK;
  447. }
  448. priv->tx_desc_count--;
  449. /* dma might be already polling, make sure we update desc
  450. * fields in correct order */
  451. wmb();
  452. desc->len_stat = len_stat;
  453. wmb();
  454. /* kick tx dma */
  455. enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
  456. ENETDMA_CHANCFG_REG(priv->tx_chan));
  457. /* stop queue if no more desc available */
  458. if (!priv->tx_desc_count)
  459. netif_stop_queue(dev);
  460. priv->stats.tx_bytes += skb->len;
  461. priv->stats.tx_packets++;
  462. dev->trans_start = jiffies;
  463. ret = NETDEV_TX_OK;
  464. out_unlock:
  465. spin_unlock(&priv->tx_lock);
  466. return ret;
  467. }
  468. /*
  469. * Change the interface's mac address.
  470. */
  471. static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
  472. {
  473. struct bcm_enet_priv *priv;
  474. struct sockaddr *addr = p;
  475. u32 val;
  476. priv = netdev_priv(dev);
  477. memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
  478. /* use perfect match register 0 to store my mac address */
  479. val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
  480. (dev->dev_addr[4] << 8) | dev->dev_addr[5];
  481. enet_writel(priv, val, ENET_PML_REG(0));
  482. val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
  483. val |= ENET_PMH_DATAVALID_MASK;
  484. enet_writel(priv, val, ENET_PMH_REG(0));
  485. return 0;
  486. }
  487. /*
  488. * Change rx mode (promiscous/allmulti) and update multicast list
  489. */
  490. static void bcm_enet_set_multicast_list(struct net_device *dev)
  491. {
  492. struct bcm_enet_priv *priv;
  493. struct dev_mc_list *mc_list;
  494. u32 val;
  495. int i;
  496. priv = netdev_priv(dev);
  497. val = enet_readl(priv, ENET_RXCFG_REG);
  498. if (dev->flags & IFF_PROMISC)
  499. val |= ENET_RXCFG_PROMISC_MASK;
  500. else
  501. val &= ~ENET_RXCFG_PROMISC_MASK;
  502. /* only 3 perfect match registers left, first one is used for
  503. * own mac address */
  504. if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
  505. val |= ENET_RXCFG_ALLMCAST_MASK;
  506. else
  507. val &= ~ENET_RXCFG_ALLMCAST_MASK;
  508. /* no need to set perfect match registers if we catch all
  509. * multicast */
  510. if (val & ENET_RXCFG_ALLMCAST_MASK) {
  511. enet_writel(priv, val, ENET_RXCFG_REG);
  512. return;
  513. }
  514. i = 0;
  515. netdev_for_each_mc_addr(mc_list, dev) {
  516. u8 *dmi_addr;
  517. u32 tmp;
  518. if (i == 3)
  519. break;
  520. /* update perfect match registers */
  521. dmi_addr = mc_list->dmi_addr;
  522. tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
  523. (dmi_addr[4] << 8) | dmi_addr[5];
  524. enet_writel(priv, tmp, ENET_PML_REG(i + 1));
  525. tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
  526. tmp |= ENET_PMH_DATAVALID_MASK;
  527. enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
  528. }
  529. for (; i < 3; i++) {
  530. enet_writel(priv, 0, ENET_PML_REG(i + 1));
  531. enet_writel(priv, 0, ENET_PMH_REG(i + 1));
  532. }
  533. enet_writel(priv, val, ENET_RXCFG_REG);
  534. }
  535. /*
  536. * set mac duplex parameters
  537. */
  538. static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
  539. {
  540. u32 val;
  541. val = enet_readl(priv, ENET_TXCTL_REG);
  542. if (fullduplex)
  543. val |= ENET_TXCTL_FD_MASK;
  544. else
  545. val &= ~ENET_TXCTL_FD_MASK;
  546. enet_writel(priv, val, ENET_TXCTL_REG);
  547. }
  548. /*
  549. * set mac flow control parameters
  550. */
  551. static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
  552. {
  553. u32 val;
  554. /* rx flow control (pause frame handling) */
  555. val = enet_readl(priv, ENET_RXCFG_REG);
  556. if (rx_en)
  557. val |= ENET_RXCFG_ENFLOW_MASK;
  558. else
  559. val &= ~ENET_RXCFG_ENFLOW_MASK;
  560. enet_writel(priv, val, ENET_RXCFG_REG);
  561. /* tx flow control (pause frame generation) */
  562. val = enet_dma_readl(priv, ENETDMA_CFG_REG);
  563. if (tx_en)
  564. val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
  565. else
  566. val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
  567. enet_dma_writel(priv, val, ENETDMA_CFG_REG);
  568. }
  569. /*
  570. * link changed callback (from phylib)
  571. */
  572. static void bcm_enet_adjust_phy_link(struct net_device *dev)
  573. {
  574. struct bcm_enet_priv *priv;
  575. struct phy_device *phydev;
  576. int status_changed;
  577. priv = netdev_priv(dev);
  578. phydev = priv->phydev;
  579. status_changed = 0;
  580. if (priv->old_link != phydev->link) {
  581. status_changed = 1;
  582. priv->old_link = phydev->link;
  583. }
  584. /* reflect duplex change in mac configuration */
  585. if (phydev->link && phydev->duplex != priv->old_duplex) {
  586. bcm_enet_set_duplex(priv,
  587. (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
  588. status_changed = 1;
  589. priv->old_duplex = phydev->duplex;
  590. }
  591. /* enable flow control if remote advertise it (trust phylib to
  592. * check that duplex is full */
  593. if (phydev->link && phydev->pause != priv->old_pause) {
  594. int rx_pause_en, tx_pause_en;
  595. if (phydev->pause) {
  596. /* pause was advertised by lpa and us */
  597. rx_pause_en = 1;
  598. tx_pause_en = 1;
  599. } else if (!priv->pause_auto) {
  600. /* pause setting overrided by user */
  601. rx_pause_en = priv->pause_rx;
  602. tx_pause_en = priv->pause_tx;
  603. } else {
  604. rx_pause_en = 0;
  605. tx_pause_en = 0;
  606. }
  607. bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
  608. status_changed = 1;
  609. priv->old_pause = phydev->pause;
  610. }
  611. if (status_changed) {
  612. pr_info("%s: link %s", dev->name, phydev->link ?
  613. "UP" : "DOWN");
  614. if (phydev->link)
  615. pr_cont(" - %d/%s - flow control %s", phydev->speed,
  616. DUPLEX_FULL == phydev->duplex ? "full" : "half",
  617. phydev->pause == 1 ? "rx&tx" : "off");
  618. pr_cont("\n");
  619. }
  620. }
  621. /*
  622. * link changed callback (if phylib is not used)
  623. */
  624. static void bcm_enet_adjust_link(struct net_device *dev)
  625. {
  626. struct bcm_enet_priv *priv;
  627. priv = netdev_priv(dev);
  628. bcm_enet_set_duplex(priv, priv->force_duplex_full);
  629. bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
  630. netif_carrier_on(dev);
  631. pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
  632. dev->name,
  633. priv->force_speed_100 ? 100 : 10,
  634. priv->force_duplex_full ? "full" : "half",
  635. priv->pause_rx ? "rx" : "off",
  636. priv->pause_tx ? "tx" : "off");
  637. }
  638. /*
  639. * open callback, allocate dma rings & buffers and start rx operation
  640. */
  641. static int bcm_enet_open(struct net_device *dev)
  642. {
  643. struct bcm_enet_priv *priv;
  644. struct sockaddr addr;
  645. struct device *kdev;
  646. struct phy_device *phydev;
  647. int i, ret;
  648. unsigned int size;
  649. char phy_id[MII_BUS_ID_SIZE + 3];
  650. void *p;
  651. u32 val;
  652. priv = netdev_priv(dev);
  653. kdev = &priv->pdev->dev;
  654. if (priv->has_phy) {
  655. /* connect to PHY */
  656. snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
  657. priv->mac_id ? "1" : "0", priv->phy_id);
  658. phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
  659. PHY_INTERFACE_MODE_MII);
  660. if (IS_ERR(phydev)) {
  661. dev_err(kdev, "could not attach to PHY\n");
  662. return PTR_ERR(phydev);
  663. }
  664. /* mask with MAC supported features */
  665. phydev->supported &= (SUPPORTED_10baseT_Half |
  666. SUPPORTED_10baseT_Full |
  667. SUPPORTED_100baseT_Half |
  668. SUPPORTED_100baseT_Full |
  669. SUPPORTED_Autoneg |
  670. SUPPORTED_Pause |
  671. SUPPORTED_MII);
  672. phydev->advertising = phydev->supported;
  673. if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
  674. phydev->advertising |= SUPPORTED_Pause;
  675. else
  676. phydev->advertising &= ~SUPPORTED_Pause;
  677. dev_info(kdev, "attached PHY at address %d [%s]\n",
  678. phydev->addr, phydev->drv->name);
  679. priv->old_link = 0;
  680. priv->old_duplex = -1;
  681. priv->old_pause = -1;
  682. priv->phydev = phydev;
  683. }
  684. /* mask all interrupts and request them */
  685. enet_writel(priv, 0, ENET_IRMASK_REG);
  686. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
  687. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
  688. ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
  689. if (ret)
  690. goto out_phy_disconnect;
  691. ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
  692. IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
  693. if (ret)
  694. goto out_freeirq;
  695. ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
  696. IRQF_DISABLED, dev->name, dev);
  697. if (ret)
  698. goto out_freeirq_rx;
  699. /* initialize perfect match registers */
  700. for (i = 0; i < 4; i++) {
  701. enet_writel(priv, 0, ENET_PML_REG(i));
  702. enet_writel(priv, 0, ENET_PMH_REG(i));
  703. }
  704. /* write device mac address */
  705. memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
  706. bcm_enet_set_mac_address(dev, &addr);
  707. /* allocate rx dma ring */
  708. size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
  709. p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
  710. if (!p) {
  711. dev_err(kdev, "cannot allocate rx ring %u\n", size);
  712. ret = -ENOMEM;
  713. goto out_freeirq_tx;
  714. }
  715. memset(p, 0, size);
  716. priv->rx_desc_alloc_size = size;
  717. priv->rx_desc_cpu = p;
  718. /* allocate tx dma ring */
  719. size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
  720. p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
  721. if (!p) {
  722. dev_err(kdev, "cannot allocate tx ring\n");
  723. ret = -ENOMEM;
  724. goto out_free_rx_ring;
  725. }
  726. memset(p, 0, size);
  727. priv->tx_desc_alloc_size = size;
  728. priv->tx_desc_cpu = p;
  729. priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
  730. GFP_KERNEL);
  731. if (!priv->tx_skb) {
  732. dev_err(kdev, "cannot allocate rx skb queue\n");
  733. ret = -ENOMEM;
  734. goto out_free_tx_ring;
  735. }
  736. priv->tx_desc_count = priv->tx_ring_size;
  737. priv->tx_dirty_desc = 0;
  738. priv->tx_curr_desc = 0;
  739. spin_lock_init(&priv->tx_lock);
  740. /* init & fill rx ring with skbs */
  741. priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
  742. GFP_KERNEL);
  743. if (!priv->rx_skb) {
  744. dev_err(kdev, "cannot allocate rx skb queue\n");
  745. ret = -ENOMEM;
  746. goto out_free_tx_skb;
  747. }
  748. priv->rx_desc_count = 0;
  749. priv->rx_dirty_desc = 0;
  750. priv->rx_curr_desc = 0;
  751. /* initialize flow control buffer allocation */
  752. enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
  753. ENETDMA_BUFALLOC_REG(priv->rx_chan));
  754. if (bcm_enet_refill_rx(dev)) {
  755. dev_err(kdev, "cannot allocate rx skb queue\n");
  756. ret = -ENOMEM;
  757. goto out;
  758. }
  759. /* write rx & tx ring addresses */
  760. enet_dma_writel(priv, priv->rx_desc_dma,
  761. ENETDMA_RSTART_REG(priv->rx_chan));
  762. enet_dma_writel(priv, priv->tx_desc_dma,
  763. ENETDMA_RSTART_REG(priv->tx_chan));
  764. /* clear remaining state ram for rx & tx channel */
  765. enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
  766. enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
  767. enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
  768. enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
  769. enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
  770. enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
  771. /* set max rx/tx length */
  772. enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
  773. enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
  774. /* set dma maximum burst len */
  775. enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
  776. ENETDMA_MAXBURST_REG(priv->rx_chan));
  777. enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
  778. ENETDMA_MAXBURST_REG(priv->tx_chan));
  779. /* set correct transmit fifo watermark */
  780. enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
  781. /* set flow control low/high threshold to 1/3 / 2/3 */
  782. val = priv->rx_ring_size / 3;
  783. enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
  784. val = (priv->rx_ring_size * 2) / 3;
  785. enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
  786. /* all set, enable mac and interrupts, start dma engine and
  787. * kick rx dma channel */
  788. wmb();
  789. enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
  790. enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
  791. enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
  792. ENETDMA_CHANCFG_REG(priv->rx_chan));
  793. /* watch "mib counters about to overflow" interrupt */
  794. enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
  795. enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
  796. /* watch "packet transferred" interrupt in rx and tx */
  797. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  798. ENETDMA_IR_REG(priv->rx_chan));
  799. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  800. ENETDMA_IR_REG(priv->tx_chan));
  801. /* make sure we enable napi before rx interrupt */
  802. napi_enable(&priv->napi);
  803. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  804. ENETDMA_IRMASK_REG(priv->rx_chan));
  805. enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
  806. ENETDMA_IRMASK_REG(priv->tx_chan));
  807. if (priv->has_phy)
  808. phy_start(priv->phydev);
  809. else
  810. bcm_enet_adjust_link(dev);
  811. netif_start_queue(dev);
  812. return 0;
  813. out:
  814. for (i = 0; i < priv->rx_ring_size; i++) {
  815. struct bcm_enet_desc *desc;
  816. if (!priv->rx_skb[i])
  817. continue;
  818. desc = &priv->rx_desc_cpu[i];
  819. dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
  820. DMA_FROM_DEVICE);
  821. kfree_skb(priv->rx_skb[i]);
  822. }
  823. kfree(priv->rx_skb);
  824. out_free_tx_skb:
  825. kfree(priv->tx_skb);
  826. out_free_tx_ring:
  827. dma_free_coherent(kdev, priv->tx_desc_alloc_size,
  828. priv->tx_desc_cpu, priv->tx_desc_dma);
  829. out_free_rx_ring:
  830. dma_free_coherent(kdev, priv->rx_desc_alloc_size,
  831. priv->rx_desc_cpu, priv->rx_desc_dma);
  832. out_freeirq_tx:
  833. free_irq(priv->irq_tx, dev);
  834. out_freeirq_rx:
  835. free_irq(priv->irq_rx, dev);
  836. out_freeirq:
  837. free_irq(dev->irq, dev);
  838. out_phy_disconnect:
  839. phy_disconnect(priv->phydev);
  840. return ret;
  841. }
  842. /*
  843. * disable mac
  844. */
  845. static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
  846. {
  847. int limit;
  848. u32 val;
  849. val = enet_readl(priv, ENET_CTL_REG);
  850. val |= ENET_CTL_DISABLE_MASK;
  851. enet_writel(priv, val, ENET_CTL_REG);
  852. limit = 1000;
  853. do {
  854. u32 val;
  855. val = enet_readl(priv, ENET_CTL_REG);
  856. if (!(val & ENET_CTL_DISABLE_MASK))
  857. break;
  858. udelay(1);
  859. } while (limit--);
  860. }
  861. /*
  862. * disable dma in given channel
  863. */
  864. static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
  865. {
  866. int limit;
  867. enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
  868. limit = 1000;
  869. do {
  870. u32 val;
  871. val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
  872. if (!(val & ENETDMA_CHANCFG_EN_MASK))
  873. break;
  874. udelay(1);
  875. } while (limit--);
  876. }
  877. /*
  878. * stop callback
  879. */
  880. static int bcm_enet_stop(struct net_device *dev)
  881. {
  882. struct bcm_enet_priv *priv;
  883. struct device *kdev;
  884. int i;
  885. priv = netdev_priv(dev);
  886. kdev = &priv->pdev->dev;
  887. netif_stop_queue(dev);
  888. napi_disable(&priv->napi);
  889. if (priv->has_phy)
  890. phy_stop(priv->phydev);
  891. del_timer_sync(&priv->rx_timeout);
  892. /* mask all interrupts */
  893. enet_writel(priv, 0, ENET_IRMASK_REG);
  894. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
  895. enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
  896. /* make sure no mib update is scheduled */
  897. flush_scheduled_work();
  898. /* disable dma & mac */
  899. bcm_enet_disable_dma(priv, priv->tx_chan);
  900. bcm_enet_disable_dma(priv, priv->rx_chan);
  901. bcm_enet_disable_mac(priv);
  902. /* force reclaim of all tx buffers */
  903. bcm_enet_tx_reclaim(dev, 1);
  904. /* free the rx skb ring */
  905. for (i = 0; i < priv->rx_ring_size; i++) {
  906. struct bcm_enet_desc *desc;
  907. if (!priv->rx_skb[i])
  908. continue;
  909. desc = &priv->rx_desc_cpu[i];
  910. dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
  911. DMA_FROM_DEVICE);
  912. kfree_skb(priv->rx_skb[i]);
  913. }
  914. /* free remaining allocated memory */
  915. kfree(priv->rx_skb);
  916. kfree(priv->tx_skb);
  917. dma_free_coherent(kdev, priv->rx_desc_alloc_size,
  918. priv->rx_desc_cpu, priv->rx_desc_dma);
  919. dma_free_coherent(kdev, priv->tx_desc_alloc_size,
  920. priv->tx_desc_cpu, priv->tx_desc_dma);
  921. free_irq(priv->irq_tx, dev);
  922. free_irq(priv->irq_rx, dev);
  923. free_irq(dev->irq, dev);
  924. /* release phy */
  925. if (priv->has_phy) {
  926. phy_disconnect(priv->phydev);
  927. priv->phydev = NULL;
  928. }
  929. return 0;
  930. }
  931. /*
  932. * core request to return device rx/tx stats
  933. */
  934. static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
  935. {
  936. struct bcm_enet_priv *priv;
  937. priv = netdev_priv(dev);
  938. return &priv->stats;
  939. }
  940. /*
  941. * ethtool callbacks
  942. */
  943. struct bcm_enet_stats {
  944. char stat_string[ETH_GSTRING_LEN];
  945. int sizeof_stat;
  946. int stat_offset;
  947. int mib_reg;
  948. };
  949. #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
  950. offsetof(struct bcm_enet_priv, m)
  951. static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
  952. { "rx_packets", GEN_STAT(stats.rx_packets), -1 },
  953. { "tx_packets", GEN_STAT(stats.tx_packets), -1 },
  954. { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 },
  955. { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 },
  956. { "rx_errors", GEN_STAT(stats.rx_errors), -1 },
  957. { "tx_errors", GEN_STAT(stats.tx_errors), -1 },
  958. { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 },
  959. { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 },
  960. { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
  961. { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
  962. { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
  963. { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
  964. { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
  965. { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
  966. { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
  967. { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
  968. { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
  969. { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
  970. { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
  971. { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
  972. { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
  973. { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
  974. { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
  975. { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
  976. { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
  977. { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
  978. { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
  979. { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
  980. { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
  981. { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
  982. { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
  983. { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
  984. { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
  985. { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
  986. { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
  987. { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
  988. { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
  989. { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
  990. { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
  991. { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
  992. { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
  993. { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
  994. { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
  995. { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
  996. { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
  997. { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
  998. { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
  999. { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
  1000. { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
  1001. { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
  1002. { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
  1003. };
  1004. #define BCM_ENET_STATS_LEN \
  1005. (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
  1006. static const u32 unused_mib_regs[] = {
  1007. ETH_MIB_TX_ALL_OCTETS,
  1008. ETH_MIB_TX_ALL_PKTS,
  1009. ETH_MIB_RX_ALL_OCTETS,
  1010. ETH_MIB_RX_ALL_PKTS,
  1011. };
  1012. static void bcm_enet_get_drvinfo(struct net_device *netdev,
  1013. struct ethtool_drvinfo *drvinfo)
  1014. {
  1015. strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
  1016. strncpy(drvinfo->version, bcm_enet_driver_version, 32);
  1017. strncpy(drvinfo->fw_version, "N/A", 32);
  1018. strncpy(drvinfo->bus_info, "bcm63xx", 32);
  1019. drvinfo->n_stats = BCM_ENET_STATS_LEN;
  1020. }
  1021. static int bcm_enet_get_sset_count(struct net_device *netdev,
  1022. int string_set)
  1023. {
  1024. switch (string_set) {
  1025. case ETH_SS_STATS:
  1026. return BCM_ENET_STATS_LEN;
  1027. default:
  1028. return -EINVAL;
  1029. }
  1030. }
  1031. static void bcm_enet_get_strings(struct net_device *netdev,
  1032. u32 stringset, u8 *data)
  1033. {
  1034. int i;
  1035. switch (stringset) {
  1036. case ETH_SS_STATS:
  1037. for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
  1038. memcpy(data + i * ETH_GSTRING_LEN,
  1039. bcm_enet_gstrings_stats[i].stat_string,
  1040. ETH_GSTRING_LEN);
  1041. }
  1042. break;
  1043. }
  1044. }
  1045. static void update_mib_counters(struct bcm_enet_priv *priv)
  1046. {
  1047. int i;
  1048. for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
  1049. const struct bcm_enet_stats *s;
  1050. u32 val;
  1051. char *p;
  1052. s = &bcm_enet_gstrings_stats[i];
  1053. if (s->mib_reg == -1)
  1054. continue;
  1055. val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
  1056. p = (char *)priv + s->stat_offset;
  1057. if (s->sizeof_stat == sizeof(u64))
  1058. *(u64 *)p += val;
  1059. else
  1060. *(u32 *)p += val;
  1061. }
  1062. /* also empty unused mib counters to make sure mib counter
  1063. * overflow interrupt is cleared */
  1064. for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
  1065. (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
  1066. }
  1067. static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
  1068. {
  1069. struct bcm_enet_priv *priv;
  1070. priv = container_of(t, struct bcm_enet_priv, mib_update_task);
  1071. mutex_lock(&priv->mib_update_lock);
  1072. update_mib_counters(priv);
  1073. mutex_unlock(&priv->mib_update_lock);
  1074. /* reenable mib interrupt */
  1075. if (netif_running(priv->net_dev))
  1076. enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
  1077. }
  1078. static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
  1079. struct ethtool_stats *stats,
  1080. u64 *data)
  1081. {
  1082. struct bcm_enet_priv *priv;
  1083. int i;
  1084. priv = netdev_priv(netdev);
  1085. mutex_lock(&priv->mib_update_lock);
  1086. update_mib_counters(priv);
  1087. for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
  1088. const struct bcm_enet_stats *s;
  1089. char *p;
  1090. s = &bcm_enet_gstrings_stats[i];
  1091. p = (char *)priv + s->stat_offset;
  1092. data[i] = (s->sizeof_stat == sizeof(u64)) ?
  1093. *(u64 *)p : *(u32 *)p;
  1094. }
  1095. mutex_unlock(&priv->mib_update_lock);
  1096. }
  1097. static int bcm_enet_get_settings(struct net_device *dev,
  1098. struct ethtool_cmd *cmd)
  1099. {
  1100. struct bcm_enet_priv *priv;
  1101. priv = netdev_priv(dev);
  1102. cmd->maxrxpkt = 0;
  1103. cmd->maxtxpkt = 0;
  1104. if (priv->has_phy) {
  1105. if (!priv->phydev)
  1106. return -ENODEV;
  1107. return phy_ethtool_gset(priv->phydev, cmd);
  1108. } else {
  1109. cmd->autoneg = 0;
  1110. cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10;
  1111. cmd->duplex = (priv->force_duplex_full) ?
  1112. DUPLEX_FULL : DUPLEX_HALF;
  1113. cmd->supported = ADVERTISED_10baseT_Half |
  1114. ADVERTISED_10baseT_Full |
  1115. ADVERTISED_100baseT_Half |
  1116. ADVERTISED_100baseT_Full;
  1117. cmd->advertising = 0;
  1118. cmd->port = PORT_MII;
  1119. cmd->transceiver = XCVR_EXTERNAL;
  1120. }
  1121. return 0;
  1122. }
  1123. static int bcm_enet_set_settings(struct net_device *dev,
  1124. struct ethtool_cmd *cmd)
  1125. {
  1126. struct bcm_enet_priv *priv;
  1127. priv = netdev_priv(dev);
  1128. if (priv->has_phy) {
  1129. if (!priv->phydev)
  1130. return -ENODEV;
  1131. return phy_ethtool_sset(priv->phydev, cmd);
  1132. } else {
  1133. if (cmd->autoneg ||
  1134. (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
  1135. cmd->port != PORT_MII)
  1136. return -EINVAL;
  1137. priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
  1138. priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
  1139. if (netif_running(dev))
  1140. bcm_enet_adjust_link(dev);
  1141. return 0;
  1142. }
  1143. }
  1144. static void bcm_enet_get_ringparam(struct net_device *dev,
  1145. struct ethtool_ringparam *ering)
  1146. {
  1147. struct bcm_enet_priv *priv;
  1148. priv = netdev_priv(dev);
  1149. /* rx/tx ring is actually only limited by memory */
  1150. ering->rx_max_pending = 8192;
  1151. ering->tx_max_pending = 8192;
  1152. ering->rx_mini_max_pending = 0;
  1153. ering->rx_jumbo_max_pending = 0;
  1154. ering->rx_pending = priv->rx_ring_size;
  1155. ering->tx_pending = priv->tx_ring_size;
  1156. }
  1157. static int bcm_enet_set_ringparam(struct net_device *dev,
  1158. struct ethtool_ringparam *ering)
  1159. {
  1160. struct bcm_enet_priv *priv;
  1161. int was_running;
  1162. priv = netdev_priv(dev);
  1163. was_running = 0;
  1164. if (netif_running(dev)) {
  1165. bcm_enet_stop(dev);
  1166. was_running = 1;
  1167. }
  1168. priv->rx_ring_size = ering->rx_pending;
  1169. priv->tx_ring_size = ering->tx_pending;
  1170. if (was_running) {
  1171. int err;
  1172. err = bcm_enet_open(dev);
  1173. if (err)
  1174. dev_close(dev);
  1175. else
  1176. bcm_enet_set_multicast_list(dev);
  1177. }
  1178. return 0;
  1179. }
  1180. static void bcm_enet_get_pauseparam(struct net_device *dev,
  1181. struct ethtool_pauseparam *ecmd)
  1182. {
  1183. struct bcm_enet_priv *priv;
  1184. priv = netdev_priv(dev);
  1185. ecmd->autoneg = priv->pause_auto;
  1186. ecmd->rx_pause = priv->pause_rx;
  1187. ecmd->tx_pause = priv->pause_tx;
  1188. }
  1189. static int bcm_enet_set_pauseparam(struct net_device *dev,
  1190. struct ethtool_pauseparam *ecmd)
  1191. {
  1192. struct bcm_enet_priv *priv;
  1193. priv = netdev_priv(dev);
  1194. if (priv->has_phy) {
  1195. if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
  1196. /* asymetric pause mode not supported,
  1197. * actually possible but integrated PHY has RO
  1198. * asym_pause bit */
  1199. return -EINVAL;
  1200. }
  1201. } else {
  1202. /* no pause autoneg on direct mii connection */
  1203. if (ecmd->autoneg)
  1204. return -EINVAL;
  1205. }
  1206. priv->pause_auto = ecmd->autoneg;
  1207. priv->pause_rx = ecmd->rx_pause;
  1208. priv->pause_tx = ecmd->tx_pause;
  1209. return 0;
  1210. }
  1211. static struct ethtool_ops bcm_enet_ethtool_ops = {
  1212. .get_strings = bcm_enet_get_strings,
  1213. .get_sset_count = bcm_enet_get_sset_count,
  1214. .get_ethtool_stats = bcm_enet_get_ethtool_stats,
  1215. .get_settings = bcm_enet_get_settings,
  1216. .set_settings = bcm_enet_set_settings,
  1217. .get_drvinfo = bcm_enet_get_drvinfo,
  1218. .get_link = ethtool_op_get_link,
  1219. .get_ringparam = bcm_enet_get_ringparam,
  1220. .set_ringparam = bcm_enet_set_ringparam,
  1221. .get_pauseparam = bcm_enet_get_pauseparam,
  1222. .set_pauseparam = bcm_enet_set_pauseparam,
  1223. };
  1224. static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1225. {
  1226. struct bcm_enet_priv *priv;
  1227. priv = netdev_priv(dev);
  1228. if (priv->has_phy) {
  1229. if (!priv->phydev)
  1230. return -ENODEV;
  1231. return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
  1232. } else {
  1233. struct mii_if_info mii;
  1234. mii.dev = dev;
  1235. mii.mdio_read = bcm_enet_mdio_read_mii;
  1236. mii.mdio_write = bcm_enet_mdio_write_mii;
  1237. mii.phy_id = 0;
  1238. mii.phy_id_mask = 0x3f;
  1239. mii.reg_num_mask = 0x1f;
  1240. return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
  1241. }
  1242. }
  1243. /*
  1244. * calculate actual hardware mtu
  1245. */
  1246. static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
  1247. {
  1248. int actual_mtu;
  1249. actual_mtu = mtu;
  1250. /* add ethernet header + vlan tag size */
  1251. actual_mtu += VLAN_ETH_HLEN;
  1252. if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
  1253. return -EINVAL;
  1254. /*
  1255. * setup maximum size before we get overflow mark in
  1256. * descriptor, note that this will not prevent reception of
  1257. * big frames, they will be split into multiple buffers
  1258. * anyway
  1259. */
  1260. priv->hw_mtu = actual_mtu;
  1261. /*
  1262. * align rx buffer size to dma burst len, account FCS since
  1263. * it's appended
  1264. */
  1265. priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
  1266. BCMENET_DMA_MAXBURST * 4);
  1267. return 0;
  1268. }
  1269. /*
  1270. * adjust mtu, can't be called while device is running
  1271. */
  1272. static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
  1273. {
  1274. int ret;
  1275. if (netif_running(dev))
  1276. return -EBUSY;
  1277. ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
  1278. if (ret)
  1279. return ret;
  1280. dev->mtu = new_mtu;
  1281. return 0;
  1282. }
  1283. /*
  1284. * preinit hardware to allow mii operation while device is down
  1285. */
  1286. static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
  1287. {
  1288. u32 val;
  1289. int limit;
  1290. /* make sure mac is disabled */
  1291. bcm_enet_disable_mac(priv);
  1292. /* soft reset mac */
  1293. val = ENET_CTL_SRESET_MASK;
  1294. enet_writel(priv, val, ENET_CTL_REG);
  1295. wmb();
  1296. limit = 1000;
  1297. do {
  1298. val = enet_readl(priv, ENET_CTL_REG);
  1299. if (!(val & ENET_CTL_SRESET_MASK))
  1300. break;
  1301. udelay(1);
  1302. } while (limit--);
  1303. /* select correct mii interface */
  1304. val = enet_readl(priv, ENET_CTL_REG);
  1305. if (priv->use_external_mii)
  1306. val |= ENET_CTL_EPHYSEL_MASK;
  1307. else
  1308. val &= ~ENET_CTL_EPHYSEL_MASK;
  1309. enet_writel(priv, val, ENET_CTL_REG);
  1310. /* turn on mdc clock */
  1311. enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
  1312. ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
  1313. /* set mib counters to self-clear when read */
  1314. val = enet_readl(priv, ENET_MIBCTL_REG);
  1315. val |= ENET_MIBCTL_RDCLEAR_MASK;
  1316. enet_writel(priv, val, ENET_MIBCTL_REG);
  1317. }
  1318. static const struct net_device_ops bcm_enet_ops = {
  1319. .ndo_open = bcm_enet_open,
  1320. .ndo_stop = bcm_enet_stop,
  1321. .ndo_start_xmit = bcm_enet_start_xmit,
  1322. .ndo_get_stats = bcm_enet_get_stats,
  1323. .ndo_set_mac_address = bcm_enet_set_mac_address,
  1324. .ndo_set_multicast_list = bcm_enet_set_multicast_list,
  1325. .ndo_do_ioctl = bcm_enet_ioctl,
  1326. .ndo_change_mtu = bcm_enet_change_mtu,
  1327. #ifdef CONFIG_NET_POLL_CONTROLLER
  1328. .ndo_poll_controller = bcm_enet_netpoll,
  1329. #endif
  1330. };
  1331. /*
  1332. * allocate netdevice, request register memory and register device.
  1333. */
  1334. static int __devinit bcm_enet_probe(struct platform_device *pdev)
  1335. {
  1336. struct bcm_enet_priv *priv;
  1337. struct net_device *dev;
  1338. struct bcm63xx_enet_platform_data *pd;
  1339. struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
  1340. struct mii_bus *bus;
  1341. const char *clk_name;
  1342. unsigned int iomem_size;
  1343. int i, ret;
  1344. /* stop if shared driver failed, assume driver->probe will be
  1345. * called in the same order we register devices (correct ?) */
  1346. if (!bcm_enet_shared_base)
  1347. return -ENODEV;
  1348. res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1349. res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1350. res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
  1351. res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
  1352. if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
  1353. return -ENODEV;
  1354. ret = 0;
  1355. dev = alloc_etherdev(sizeof(*priv));
  1356. if (!dev)
  1357. return -ENOMEM;
  1358. priv = netdev_priv(dev);
  1359. memset(priv, 0, sizeof(*priv));
  1360. ret = compute_hw_mtu(priv, dev->mtu);
  1361. if (ret)
  1362. goto out;
  1363. iomem_size = res_mem->end - res_mem->start + 1;
  1364. if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
  1365. ret = -EBUSY;
  1366. goto out;
  1367. }
  1368. priv->base = ioremap(res_mem->start, iomem_size);
  1369. if (priv->base == NULL) {
  1370. ret = -ENOMEM;
  1371. goto out_release_mem;
  1372. }
  1373. dev->irq = priv->irq = res_irq->start;
  1374. priv->irq_rx = res_irq_rx->start;
  1375. priv->irq_tx = res_irq_tx->start;
  1376. priv->mac_id = pdev->id;
  1377. /* get rx & tx dma channel id for this mac */
  1378. if (priv->mac_id == 0) {
  1379. priv->rx_chan = 0;
  1380. priv->tx_chan = 1;
  1381. clk_name = "enet0";
  1382. } else {
  1383. priv->rx_chan = 2;
  1384. priv->tx_chan = 3;
  1385. clk_name = "enet1";
  1386. }
  1387. priv->mac_clk = clk_get(&pdev->dev, clk_name);
  1388. if (IS_ERR(priv->mac_clk)) {
  1389. ret = PTR_ERR(priv->mac_clk);
  1390. goto out_unmap;
  1391. }
  1392. clk_enable(priv->mac_clk);
  1393. /* initialize default and fetch platform data */
  1394. priv->rx_ring_size = BCMENET_DEF_RX_DESC;
  1395. priv->tx_ring_size = BCMENET_DEF_TX_DESC;
  1396. pd = pdev->dev.platform_data;
  1397. if (pd) {
  1398. memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
  1399. priv->has_phy = pd->has_phy;
  1400. priv->phy_id = pd->phy_id;
  1401. priv->has_phy_interrupt = pd->has_phy_interrupt;
  1402. priv->phy_interrupt = pd->phy_interrupt;
  1403. priv->use_external_mii = !pd->use_internal_phy;
  1404. priv->pause_auto = pd->pause_auto;
  1405. priv->pause_rx = pd->pause_rx;
  1406. priv->pause_tx = pd->pause_tx;
  1407. priv->force_duplex_full = pd->force_duplex_full;
  1408. priv->force_speed_100 = pd->force_speed_100;
  1409. }
  1410. if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
  1411. /* using internal PHY, enable clock */
  1412. priv->phy_clk = clk_get(&pdev->dev, "ephy");
  1413. if (IS_ERR(priv->phy_clk)) {
  1414. ret = PTR_ERR(priv->phy_clk);
  1415. priv->phy_clk = NULL;
  1416. goto out_put_clk_mac;
  1417. }
  1418. clk_enable(priv->phy_clk);
  1419. }
  1420. /* do minimal hardware init to be able to probe mii bus */
  1421. bcm_enet_hw_preinit(priv);
  1422. /* MII bus registration */
  1423. if (priv->has_phy) {
  1424. priv->mii_bus = mdiobus_alloc();
  1425. if (!priv->mii_bus) {
  1426. ret = -ENOMEM;
  1427. goto out_uninit_hw;
  1428. }
  1429. bus = priv->mii_bus;
  1430. bus->name = "bcm63xx_enet MII bus";
  1431. bus->parent = &pdev->dev;
  1432. bus->priv = priv;
  1433. bus->read = bcm_enet_mdio_read_phylib;
  1434. bus->write = bcm_enet_mdio_write_phylib;
  1435. sprintf(bus->id, "%d", priv->mac_id);
  1436. /* only probe bus where we think the PHY is, because
  1437. * the mdio read operation return 0 instead of 0xffff
  1438. * if a slave is not present on hw */
  1439. bus->phy_mask = ~(1 << priv->phy_id);
  1440. bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
  1441. if (!bus->irq) {
  1442. ret = -ENOMEM;
  1443. goto out_free_mdio;
  1444. }
  1445. if (priv->has_phy_interrupt)
  1446. bus->irq[priv->phy_id] = priv->phy_interrupt;
  1447. else
  1448. bus->irq[priv->phy_id] = PHY_POLL;
  1449. ret = mdiobus_register(bus);
  1450. if (ret) {
  1451. dev_err(&pdev->dev, "unable to register mdio bus\n");
  1452. goto out_free_mdio;
  1453. }
  1454. } else {
  1455. /* run platform code to initialize PHY device */
  1456. if (pd->mii_config &&
  1457. pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
  1458. bcm_enet_mdio_write_mii)) {
  1459. dev_err(&pdev->dev, "unable to configure mdio bus\n");
  1460. goto out_uninit_hw;
  1461. }
  1462. }
  1463. spin_lock_init(&priv->rx_lock);
  1464. /* init rx timeout (used for oom) */
  1465. init_timer(&priv->rx_timeout);
  1466. priv->rx_timeout.function = bcm_enet_refill_rx_timer;
  1467. priv->rx_timeout.data = (unsigned long)dev;
  1468. /* init the mib update lock&work */
  1469. mutex_init(&priv->mib_update_lock);
  1470. INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
  1471. /* zero mib counters */
  1472. for (i = 0; i < ENET_MIB_REG_COUNT; i++)
  1473. enet_writel(priv, 0, ENET_MIB_REG(i));
  1474. /* register netdevice */
  1475. dev->netdev_ops = &bcm_enet_ops;
  1476. netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
  1477. SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
  1478. SET_NETDEV_DEV(dev, &pdev->dev);
  1479. ret = register_netdev(dev);
  1480. if (ret)
  1481. goto out_unregister_mdio;
  1482. netif_carrier_off(dev);
  1483. platform_set_drvdata(pdev, dev);
  1484. priv->pdev = pdev;
  1485. priv->net_dev = dev;
  1486. return 0;
  1487. out_unregister_mdio:
  1488. if (priv->mii_bus) {
  1489. mdiobus_unregister(priv->mii_bus);
  1490. kfree(priv->mii_bus->irq);
  1491. }
  1492. out_free_mdio:
  1493. if (priv->mii_bus)
  1494. mdiobus_free(priv->mii_bus);
  1495. out_uninit_hw:
  1496. /* turn off mdc clock */
  1497. enet_writel(priv, 0, ENET_MIISC_REG);
  1498. if (priv->phy_clk) {
  1499. clk_disable(priv->phy_clk);
  1500. clk_put(priv->phy_clk);
  1501. }
  1502. out_put_clk_mac:
  1503. clk_disable(priv->mac_clk);
  1504. clk_put(priv->mac_clk);
  1505. out_unmap:
  1506. iounmap(priv->base);
  1507. out_release_mem:
  1508. release_mem_region(res_mem->start, iomem_size);
  1509. out:
  1510. free_netdev(dev);
  1511. return ret;
  1512. }
  1513. /*
  1514. * exit func, stops hardware and unregisters netdevice
  1515. */
  1516. static int __devexit bcm_enet_remove(struct platform_device *pdev)
  1517. {
  1518. struct bcm_enet_priv *priv;
  1519. struct net_device *dev;
  1520. struct resource *res;
  1521. /* stop netdevice */
  1522. dev = platform_get_drvdata(pdev);
  1523. priv = netdev_priv(dev);
  1524. unregister_netdev(dev);
  1525. /* turn off mdc clock */
  1526. enet_writel(priv, 0, ENET_MIISC_REG);
  1527. if (priv->has_phy) {
  1528. mdiobus_unregister(priv->mii_bus);
  1529. kfree(priv->mii_bus->irq);
  1530. mdiobus_free(priv->mii_bus);
  1531. } else {
  1532. struct bcm63xx_enet_platform_data *pd;
  1533. pd = pdev->dev.platform_data;
  1534. if (pd && pd->mii_config)
  1535. pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
  1536. bcm_enet_mdio_write_mii);
  1537. }
  1538. /* release device resources */
  1539. iounmap(priv->base);
  1540. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1541. release_mem_region(res->start, res->end - res->start + 1);
  1542. /* disable hw block clocks */
  1543. if (priv->phy_clk) {
  1544. clk_disable(priv->phy_clk);
  1545. clk_put(priv->phy_clk);
  1546. }
  1547. clk_disable(priv->mac_clk);
  1548. clk_put(priv->mac_clk);
  1549. platform_set_drvdata(pdev, NULL);
  1550. free_netdev(dev);
  1551. return 0;
  1552. }
  1553. struct platform_driver bcm63xx_enet_driver = {
  1554. .probe = bcm_enet_probe,
  1555. .remove = __devexit_p(bcm_enet_remove),
  1556. .driver = {
  1557. .name = "bcm63xx_enet",
  1558. .owner = THIS_MODULE,
  1559. },
  1560. };
  1561. /*
  1562. * reserve & remap memory space shared between all macs
  1563. */
  1564. static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
  1565. {
  1566. struct resource *res;
  1567. unsigned int iomem_size;
  1568. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1569. if (!res)
  1570. return -ENODEV;
  1571. iomem_size = res->end - res->start + 1;
  1572. if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
  1573. return -EBUSY;
  1574. bcm_enet_shared_base = ioremap(res->start, iomem_size);
  1575. if (!bcm_enet_shared_base) {
  1576. release_mem_region(res->start, iomem_size);
  1577. return -ENOMEM;
  1578. }
  1579. return 0;
  1580. }
  1581. static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
  1582. {
  1583. struct resource *res;
  1584. iounmap(bcm_enet_shared_base);
  1585. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1586. release_mem_region(res->start, res->end - res->start + 1);
  1587. return 0;
  1588. }
  1589. /*
  1590. * this "shared" driver is needed because both macs share a single
  1591. * address space
  1592. */
  1593. struct platform_driver bcm63xx_enet_shared_driver = {
  1594. .probe = bcm_enet_shared_probe,
  1595. .remove = __devexit_p(bcm_enet_shared_remove),
  1596. .driver = {
  1597. .name = "bcm63xx_enet_shared",
  1598. .owner = THIS_MODULE,
  1599. },
  1600. };
  1601. /*
  1602. * entry point
  1603. */
  1604. static int __init bcm_enet_init(void)
  1605. {
  1606. int ret;
  1607. ret = platform_driver_register(&bcm63xx_enet_shared_driver);
  1608. if (ret)
  1609. return ret;
  1610. ret = platform_driver_register(&bcm63xx_enet_driver);
  1611. if (ret)
  1612. platform_driver_unregister(&bcm63xx_enet_shared_driver);
  1613. return ret;
  1614. }
  1615. static void __exit bcm_enet_exit(void)
  1616. {
  1617. platform_driver_unregister(&bcm63xx_enet_driver);
  1618. platform_driver_unregister(&bcm63xx_enet_shared_driver);
  1619. }
  1620. module_init(bcm_enet_init);
  1621. module_exit(bcm_enet_exit);
  1622. MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
  1623. MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
  1624. MODULE_LICENSE("GPL");