cpmac.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. /*
  2. * Copyright (C) 2006, 2007 Eugene Konev
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <linux/moduleparam.h>
  21. #include <linux/sched.h>
  22. #include <linux/kernel.h>
  23. #include <linux/slab.h>
  24. #include <linux/errno.h>
  25. #include <linux/types.h>
  26. #include <linux/delay.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/ethtool.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/mii.h>
  32. #include <linux/phy.h>
  33. #include <linux/phy_fixed.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/dma-mapping.h>
  36. #include <asm/gpio.h>
  37. #include <asm/atomic.h>
  38. MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
  39. MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
  40. MODULE_LICENSE("GPL");
  41. MODULE_ALIAS("platform:cpmac");
  42. static int debug_level = 8;
  43. static int dumb_switch;
  44. /* Next 2 are only used in cpmac_probe, so it's pointless to change them */
  45. module_param(debug_level, int, 0444);
  46. module_param(dumb_switch, int, 0444);
  47. MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
  48. MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
  49. #define CPMAC_VERSION "0.5.1"
  50. /* frame size + 802.1q tag */
  51. #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4)
  52. #define CPMAC_QUEUES 8
  53. /* Ethernet registers */
  54. #define CPMAC_TX_CONTROL 0x0004
  55. #define CPMAC_TX_TEARDOWN 0x0008
  56. #define CPMAC_RX_CONTROL 0x0014
  57. #define CPMAC_RX_TEARDOWN 0x0018
  58. #define CPMAC_MBP 0x0100
  59. # define MBP_RXPASSCRC 0x40000000
  60. # define MBP_RXQOS 0x20000000
  61. # define MBP_RXNOCHAIN 0x10000000
  62. # define MBP_RXCMF 0x01000000
  63. # define MBP_RXSHORT 0x00800000
  64. # define MBP_RXCEF 0x00400000
  65. # define MBP_RXPROMISC 0x00200000
  66. # define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
  67. # define MBP_RXBCAST 0x00002000
  68. # define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
  69. # define MBP_RXMCAST 0x00000020
  70. # define MBP_MCASTCHAN(channel) ((channel) & 0x7)
  71. #define CPMAC_UNICAST_ENABLE 0x0104
  72. #define CPMAC_UNICAST_CLEAR 0x0108
  73. #define CPMAC_MAX_LENGTH 0x010c
  74. #define CPMAC_BUFFER_OFFSET 0x0110
  75. #define CPMAC_MAC_CONTROL 0x0160
  76. # define MAC_TXPTYPE 0x00000200
  77. # define MAC_TXPACE 0x00000040
  78. # define MAC_MII 0x00000020
  79. # define MAC_TXFLOW 0x00000010
  80. # define MAC_RXFLOW 0x00000008
  81. # define MAC_MTEST 0x00000004
  82. # define MAC_LOOPBACK 0x00000002
  83. # define MAC_FDX 0x00000001
  84. #define CPMAC_MAC_STATUS 0x0164
  85. # define MAC_STATUS_QOS 0x00000004
  86. # define MAC_STATUS_RXFLOW 0x00000002
  87. # define MAC_STATUS_TXFLOW 0x00000001
  88. #define CPMAC_TX_INT_ENABLE 0x0178
  89. #define CPMAC_TX_INT_CLEAR 0x017c
  90. #define CPMAC_MAC_INT_VECTOR 0x0180
  91. # define MAC_INT_STATUS 0x00080000
  92. # define MAC_INT_HOST 0x00040000
  93. # define MAC_INT_RX 0x00020000
  94. # define MAC_INT_TX 0x00010000
  95. #define CPMAC_MAC_EOI_VECTOR 0x0184
  96. #define CPMAC_RX_INT_ENABLE 0x0198
  97. #define CPMAC_RX_INT_CLEAR 0x019c
  98. #define CPMAC_MAC_INT_ENABLE 0x01a8
  99. #define CPMAC_MAC_INT_CLEAR 0x01ac
  100. #define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
  101. #define CPMAC_MAC_ADDR_MID 0x01d0
  102. #define CPMAC_MAC_ADDR_HI 0x01d4
  103. #define CPMAC_MAC_HASH_LO 0x01d8
  104. #define CPMAC_MAC_HASH_HI 0x01dc
  105. #define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
  106. #define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
  107. #define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
  108. #define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
  109. #define CPMAC_REG_END 0x0680
  110. /*
  111. * Rx/Tx statistics
  112. * TODO: use some of them to fill stats in cpmac_stats()
  113. */
  114. #define CPMAC_STATS_RX_GOOD 0x0200
  115. #define CPMAC_STATS_RX_BCAST 0x0204
  116. #define CPMAC_STATS_RX_MCAST 0x0208
  117. #define CPMAC_STATS_RX_PAUSE 0x020c
  118. #define CPMAC_STATS_RX_CRC 0x0210
  119. #define CPMAC_STATS_RX_ALIGN 0x0214
  120. #define CPMAC_STATS_RX_OVER 0x0218
  121. #define CPMAC_STATS_RX_JABBER 0x021c
  122. #define CPMAC_STATS_RX_UNDER 0x0220
  123. #define CPMAC_STATS_RX_FRAG 0x0224
  124. #define CPMAC_STATS_RX_FILTER 0x0228
  125. #define CPMAC_STATS_RX_QOSFILTER 0x022c
  126. #define CPMAC_STATS_RX_OCTETS 0x0230
  127. #define CPMAC_STATS_TX_GOOD 0x0234
  128. #define CPMAC_STATS_TX_BCAST 0x0238
  129. #define CPMAC_STATS_TX_MCAST 0x023c
  130. #define CPMAC_STATS_TX_PAUSE 0x0240
  131. #define CPMAC_STATS_TX_DEFER 0x0244
  132. #define CPMAC_STATS_TX_COLLISION 0x0248
  133. #define CPMAC_STATS_TX_SINGLECOLL 0x024c
  134. #define CPMAC_STATS_TX_MULTICOLL 0x0250
  135. #define CPMAC_STATS_TX_EXCESSCOLL 0x0254
  136. #define CPMAC_STATS_TX_LATECOLL 0x0258
  137. #define CPMAC_STATS_TX_UNDERRUN 0x025c
  138. #define CPMAC_STATS_TX_CARRIERSENSE 0x0260
  139. #define CPMAC_STATS_TX_OCTETS 0x0264
  140. #define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
  141. #define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
  142. (reg)))
  143. /* MDIO bus */
  144. #define CPMAC_MDIO_VERSION 0x0000
  145. #define CPMAC_MDIO_CONTROL 0x0004
  146. # define MDIOC_IDLE 0x80000000
  147. # define MDIOC_ENABLE 0x40000000
  148. # define MDIOC_PREAMBLE 0x00100000
  149. # define MDIOC_FAULT 0x00080000
  150. # define MDIOC_FAULTDETECT 0x00040000
  151. # define MDIOC_INTTEST 0x00020000
  152. # define MDIOC_CLKDIV(div) ((div) & 0xff)
  153. #define CPMAC_MDIO_ALIVE 0x0008
  154. #define CPMAC_MDIO_LINK 0x000c
  155. #define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
  156. # define MDIO_BUSY 0x80000000
  157. # define MDIO_WRITE 0x40000000
  158. # define MDIO_REG(reg) (((reg) & 0x1f) << 21)
  159. # define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
  160. # define MDIO_DATA(data) ((data) & 0xffff)
  161. #define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
  162. # define PHYSEL_LINKSEL 0x00000040
  163. # define PHYSEL_LINKINT 0x00000020
  164. struct cpmac_desc {
  165. u32 hw_next;
  166. u32 hw_data;
  167. u16 buflen;
  168. u16 bufflags;
  169. u16 datalen;
  170. u16 dataflags;
  171. #define CPMAC_SOP 0x8000
  172. #define CPMAC_EOP 0x4000
  173. #define CPMAC_OWN 0x2000
  174. #define CPMAC_EOQ 0x1000
  175. struct sk_buff *skb;
  176. struct cpmac_desc *next;
  177. struct cpmac_desc *prev;
  178. dma_addr_t mapping;
  179. dma_addr_t data_mapping;
  180. };
  181. struct cpmac_priv {
  182. spinlock_t lock;
  183. spinlock_t rx_lock;
  184. struct cpmac_desc *rx_head;
  185. int ring_size;
  186. struct cpmac_desc *desc_ring;
  187. dma_addr_t dma_ring;
  188. void __iomem *regs;
  189. struct mii_bus *mii_bus;
  190. struct phy_device *phy;
  191. char phy_name[MII_BUS_ID_SIZE + 3];
  192. int oldlink, oldspeed, oldduplex;
  193. u32 msg_enable;
  194. struct net_device *dev;
  195. struct work_struct reset_work;
  196. struct platform_device *pdev;
  197. struct napi_struct napi;
  198. atomic_t reset_pending;
  199. };
  200. static irqreturn_t cpmac_irq(int, void *);
  201. static void cpmac_hw_start(struct net_device *dev);
  202. static void cpmac_hw_stop(struct net_device *dev);
  203. static int cpmac_stop(struct net_device *dev);
  204. static int cpmac_open(struct net_device *dev);
  205. static void cpmac_dump_regs(struct net_device *dev)
  206. {
  207. int i;
  208. struct cpmac_priv *priv = netdev_priv(dev);
  209. for (i = 0; i < CPMAC_REG_END; i += 4) {
  210. if (i % 16 == 0) {
  211. if (i)
  212. printk("\n");
  213. printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
  214. priv->regs + i);
  215. }
  216. printk(" %08x", cpmac_read(priv->regs, i));
  217. }
  218. printk("\n");
  219. }
  220. static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
  221. {
  222. int i;
  223. printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
  224. for (i = 0; i < sizeof(*desc) / 4; i++)
  225. printk(" %08x", ((u32 *)desc)[i]);
  226. printk("\n");
  227. }
  228. static void cpmac_dump_all_desc(struct net_device *dev)
  229. {
  230. struct cpmac_priv *priv = netdev_priv(dev);
  231. struct cpmac_desc *dump = priv->rx_head;
  232. do {
  233. cpmac_dump_desc(dev, dump);
  234. dump = dump->next;
  235. } while (dump != priv->rx_head);
  236. }
  237. static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
  238. {
  239. int i;
  240. printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
  241. for (i = 0; i < skb->len; i++) {
  242. if (i % 16 == 0) {
  243. if (i)
  244. printk("\n");
  245. printk(KERN_DEBUG "%s: data[%p]:", dev->name,
  246. skb->data + i);
  247. }
  248. printk(" %02x", ((u8 *)skb->data)[i]);
  249. }
  250. printk("\n");
  251. }
  252. static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
  253. {
  254. u32 val;
  255. while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
  256. cpu_relax();
  257. cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
  258. MDIO_PHY(phy_id));
  259. while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
  260. cpu_relax();
  261. return MDIO_DATA(val);
  262. }
  263. static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
  264. int reg, u16 val)
  265. {
  266. while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
  267. cpu_relax();
  268. cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
  269. MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
  270. return 0;
  271. }
  272. static int cpmac_mdio_reset(struct mii_bus *bus)
  273. {
  274. ar7_device_reset(AR7_RESET_BIT_MDIO);
  275. cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
  276. MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1));
  277. return 0;
  278. }
  279. static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
  280. static struct mii_bus *cpmac_mii;
  281. static int cpmac_config(struct net_device *dev, struct ifmap *map)
  282. {
  283. if (dev->flags & IFF_UP)
  284. return -EBUSY;
  285. /* Don't allow changing the I/O address */
  286. if (map->base_addr != dev->base_addr)
  287. return -EOPNOTSUPP;
  288. /* ignore other fields */
  289. return 0;
  290. }
  291. static void cpmac_set_multicast_list(struct net_device *dev)
  292. {
  293. struct dev_mc_list *iter;
  294. int i;
  295. u8 tmp;
  296. u32 mbp, bit, hash[2] = { 0, };
  297. struct cpmac_priv *priv = netdev_priv(dev);
  298. mbp = cpmac_read(priv->regs, CPMAC_MBP);
  299. if (dev->flags & IFF_PROMISC) {
  300. cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
  301. MBP_RXPROMISC);
  302. } else {
  303. cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
  304. if (dev->flags & IFF_ALLMULTI) {
  305. /* enable all multicast mode */
  306. cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
  307. cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
  308. } else {
  309. /*
  310. * cpmac uses some strange mac address hashing
  311. * (not crc32)
  312. */
  313. for (i = 0, iter = dev->mc_list; i < dev->mc_count;
  314. i++, iter = iter->next) {
  315. bit = 0;
  316. tmp = iter->dmi_addr[0];
  317. bit ^= (tmp >> 2) ^ (tmp << 4);
  318. tmp = iter->dmi_addr[1];
  319. bit ^= (tmp >> 4) ^ (tmp << 2);
  320. tmp = iter->dmi_addr[2];
  321. bit ^= (tmp >> 6) ^ tmp;
  322. tmp = iter->dmi_addr[3];
  323. bit ^= (tmp >> 2) ^ (tmp << 4);
  324. tmp = iter->dmi_addr[4];
  325. bit ^= (tmp >> 4) ^ (tmp << 2);
  326. tmp = iter->dmi_addr[5];
  327. bit ^= (tmp >> 6) ^ tmp;
  328. bit &= 0x3f;
  329. hash[bit / 32] |= 1 << (bit % 32);
  330. }
  331. cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
  332. cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
  333. }
  334. }
  335. }
  336. static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
  337. struct cpmac_desc *desc)
  338. {
  339. struct sk_buff *skb, *result = NULL;
  340. if (unlikely(netif_msg_hw(priv)))
  341. cpmac_dump_desc(priv->dev, desc);
  342. cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
  343. if (unlikely(!desc->datalen)) {
  344. if (netif_msg_rx_err(priv) && net_ratelimit())
  345. printk(KERN_WARNING "%s: rx: spurious interrupt\n",
  346. priv->dev->name);
  347. return NULL;
  348. }
  349. skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
  350. if (likely(skb)) {
  351. skb_put(desc->skb, desc->datalen);
  352. desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
  353. desc->skb->ip_summed = CHECKSUM_NONE;
  354. priv->dev->stats.rx_packets++;
  355. priv->dev->stats.rx_bytes += desc->datalen;
  356. result = desc->skb;
  357. dma_unmap_single(&priv->dev->dev, desc->data_mapping,
  358. CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
  359. desc->skb = skb;
  360. desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
  361. CPMAC_SKB_SIZE,
  362. DMA_FROM_DEVICE);
  363. desc->hw_data = (u32)desc->data_mapping;
  364. if (unlikely(netif_msg_pktdata(priv))) {
  365. printk(KERN_DEBUG "%s: received packet:\n",
  366. priv->dev->name);
  367. cpmac_dump_skb(priv->dev, result);
  368. }
  369. } else {
  370. if (netif_msg_rx_err(priv) && net_ratelimit())
  371. printk(KERN_WARNING
  372. "%s: low on skbs, dropping packet\n",
  373. priv->dev->name);
  374. priv->dev->stats.rx_dropped++;
  375. }
  376. desc->buflen = CPMAC_SKB_SIZE;
  377. desc->dataflags = CPMAC_OWN;
  378. return result;
  379. }
  380. static int cpmac_poll(struct napi_struct *napi, int budget)
  381. {
  382. struct sk_buff *skb;
  383. struct cpmac_desc *desc, *restart;
  384. struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
  385. int received = 0, processed = 0;
  386. spin_lock(&priv->rx_lock);
  387. if (unlikely(!priv->rx_head)) {
  388. if (netif_msg_rx_err(priv) && net_ratelimit())
  389. printk(KERN_WARNING "%s: rx: polling, but no queue\n",
  390. priv->dev->name);
  391. spin_unlock(&priv->rx_lock);
  392. napi_complete(napi);
  393. return 0;
  394. }
  395. desc = priv->rx_head;
  396. restart = NULL;
  397. while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
  398. processed++;
  399. if ((desc->dataflags & CPMAC_EOQ) != 0) {
  400. /* The last update to eoq->hw_next didn't happen
  401. * soon enough, and the receiver stopped here.
  402. *Remember this descriptor so we can restart
  403. * the receiver after freeing some space.
  404. */
  405. if (unlikely(restart)) {
  406. if (netif_msg_rx_err(priv))
  407. printk(KERN_ERR "%s: poll found a"
  408. " duplicate EOQ: %p and %p\n",
  409. priv->dev->name, restart, desc);
  410. goto fatal_error;
  411. }
  412. restart = desc->next;
  413. }
  414. skb = cpmac_rx_one(priv, desc);
  415. if (likely(skb)) {
  416. netif_receive_skb(skb);
  417. received++;
  418. }
  419. desc = desc->next;
  420. }
  421. if (desc != priv->rx_head) {
  422. /* We freed some buffers, but not the whole ring,
  423. * add what we did free to the rx list */
  424. desc->prev->hw_next = (u32)0;
  425. priv->rx_head->prev->hw_next = priv->rx_head->mapping;
  426. }
  427. /* Optimization: If we did not actually process an EOQ (perhaps because
  428. * of quota limits), check to see if the tail of the queue has EOQ set.
  429. * We should immediately restart in that case so that the receiver can
  430. * restart and run in parallel with more packet processing.
  431. * This lets us handle slightly larger bursts before running
  432. * out of ring space (assuming dev->weight < ring_size) */
  433. if (!restart &&
  434. (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
  435. == CPMAC_EOQ &&
  436. (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
  437. /* reset EOQ so the poll loop (above) doesn't try to
  438. * restart this when it eventually gets to this descriptor.
  439. */
  440. priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
  441. restart = priv->rx_head;
  442. }
  443. if (restart) {
  444. priv->dev->stats.rx_errors++;
  445. priv->dev->stats.rx_fifo_errors++;
  446. if (netif_msg_rx_err(priv) && net_ratelimit())
  447. printk(KERN_WARNING "%s: rx dma ring overrun\n",
  448. priv->dev->name);
  449. if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
  450. if (netif_msg_drv(priv))
  451. printk(KERN_ERR "%s: cpmac_poll is trying to "
  452. "restart rx from a descriptor that's "
  453. "not free: %p\n",
  454. priv->dev->name, restart);
  455. goto fatal_error;
  456. }
  457. cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
  458. }
  459. priv->rx_head = desc;
  460. spin_unlock(&priv->rx_lock);
  461. if (unlikely(netif_msg_rx_status(priv)))
  462. printk(KERN_DEBUG "%s: poll processed %d packets\n",
  463. priv->dev->name, received);
  464. if (processed == 0) {
  465. /* we ran out of packets to read,
  466. * revert to interrupt-driven mode */
  467. napi_complete(napi);
  468. cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
  469. return 0;
  470. }
  471. return 1;
  472. fatal_error:
  473. /* Something went horribly wrong.
  474. * Reset hardware to try to recover rather than wedging. */
  475. if (netif_msg_drv(priv)) {
  476. printk(KERN_ERR "%s: cpmac_poll is confused. "
  477. "Resetting hardware\n", priv->dev->name);
  478. cpmac_dump_all_desc(priv->dev);
  479. printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
  480. priv->dev->name,
  481. cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
  482. cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
  483. }
  484. spin_unlock(&priv->rx_lock);
  485. napi_complete(napi);
  486. netif_tx_stop_all_queues(priv->dev);
  487. napi_disable(&priv->napi);
  488. atomic_inc(&priv->reset_pending);
  489. cpmac_hw_stop(priv->dev);
  490. if (!schedule_work(&priv->reset_work))
  491. atomic_dec(&priv->reset_pending);
  492. return 0;
  493. }
  494. static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
  495. {
  496. int queue, len;
  497. struct cpmac_desc *desc;
  498. struct cpmac_priv *priv = netdev_priv(dev);
  499. if (unlikely(atomic_read(&priv->reset_pending)))
  500. return NETDEV_TX_BUSY;
  501. if (unlikely(skb_padto(skb, ETH_ZLEN)))
  502. return NETDEV_TX_OK;
  503. len = max(skb->len, ETH_ZLEN);
  504. queue = skb_get_queue_mapping(skb);
  505. netif_stop_subqueue(dev, queue);
  506. desc = &priv->desc_ring[queue];
  507. if (unlikely(desc->dataflags & CPMAC_OWN)) {
  508. if (netif_msg_tx_err(priv) && net_ratelimit())
  509. printk(KERN_WARNING "%s: tx dma ring full\n",
  510. dev->name);
  511. return NETDEV_TX_BUSY;
  512. }
  513. spin_lock(&priv->lock);
  514. dev->trans_start = jiffies;
  515. spin_unlock(&priv->lock);
  516. desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
  517. desc->skb = skb;
  518. desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
  519. DMA_TO_DEVICE);
  520. desc->hw_data = (u32)desc->data_mapping;
  521. desc->datalen = len;
  522. desc->buflen = len;
  523. if (unlikely(netif_msg_tx_queued(priv)))
  524. printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
  525. skb->len);
  526. if (unlikely(netif_msg_hw(priv)))
  527. cpmac_dump_desc(dev, desc);
  528. if (unlikely(netif_msg_pktdata(priv)))
  529. cpmac_dump_skb(dev, skb);
  530. cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
  531. return NETDEV_TX_OK;
  532. }
  533. static void cpmac_end_xmit(struct net_device *dev, int queue)
  534. {
  535. struct cpmac_desc *desc;
  536. struct cpmac_priv *priv = netdev_priv(dev);
  537. desc = &priv->desc_ring[queue];
  538. cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
  539. if (likely(desc->skb)) {
  540. spin_lock(&priv->lock);
  541. dev->stats.tx_packets++;
  542. dev->stats.tx_bytes += desc->skb->len;
  543. spin_unlock(&priv->lock);
  544. dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
  545. DMA_TO_DEVICE);
  546. if (unlikely(netif_msg_tx_done(priv)))
  547. printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
  548. desc->skb, desc->skb->len);
  549. dev_kfree_skb_irq(desc->skb);
  550. desc->skb = NULL;
  551. if (__netif_subqueue_stopped(dev, queue))
  552. netif_wake_subqueue(dev, queue);
  553. } else {
  554. if (netif_msg_tx_err(priv) && net_ratelimit())
  555. printk(KERN_WARNING
  556. "%s: end_xmit: spurious interrupt\n", dev->name);
  557. if (__netif_subqueue_stopped(dev, queue))
  558. netif_wake_subqueue(dev, queue);
  559. }
  560. }
  561. static void cpmac_hw_stop(struct net_device *dev)
  562. {
  563. int i;
  564. struct cpmac_priv *priv = netdev_priv(dev);
  565. struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
  566. ar7_device_reset(pdata->reset_bit);
  567. cpmac_write(priv->regs, CPMAC_RX_CONTROL,
  568. cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
  569. cpmac_write(priv->regs, CPMAC_TX_CONTROL,
  570. cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
  571. for (i = 0; i < 8; i++) {
  572. cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
  573. cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
  574. }
  575. cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
  576. cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
  577. cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
  578. cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
  579. cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
  580. cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
  581. }
  582. static void cpmac_hw_start(struct net_device *dev)
  583. {
  584. int i;
  585. struct cpmac_priv *priv = netdev_priv(dev);
  586. struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
  587. ar7_device_reset(pdata->reset_bit);
  588. for (i = 0; i < 8; i++) {
  589. cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
  590. cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
  591. }
  592. cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
  593. cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
  594. MBP_RXMCAST);
  595. cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
  596. for (i = 0; i < 8; i++)
  597. cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
  598. cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
  599. cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
  600. (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
  601. (dev->dev_addr[3] << 24));
  602. cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
  603. cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
  604. cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
  605. cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
  606. cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
  607. cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
  608. cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
  609. cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
  610. cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
  611. cpmac_write(priv->regs, CPMAC_RX_CONTROL,
  612. cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
  613. cpmac_write(priv->regs, CPMAC_TX_CONTROL,
  614. cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
  615. cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
  616. cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
  617. MAC_FDX);
  618. }
  619. static void cpmac_clear_rx(struct net_device *dev)
  620. {
  621. struct cpmac_priv *priv = netdev_priv(dev);
  622. struct cpmac_desc *desc;
  623. int i;
  624. if (unlikely(!priv->rx_head))
  625. return;
  626. desc = priv->rx_head;
  627. for (i = 0; i < priv->ring_size; i++) {
  628. if ((desc->dataflags & CPMAC_OWN) == 0) {
  629. if (netif_msg_rx_err(priv) && net_ratelimit())
  630. printk(KERN_WARNING "%s: packet dropped\n",
  631. dev->name);
  632. if (unlikely(netif_msg_hw(priv)))
  633. cpmac_dump_desc(dev, desc);
  634. desc->dataflags = CPMAC_OWN;
  635. dev->stats.rx_dropped++;
  636. }
  637. desc->hw_next = desc->next->mapping;
  638. desc = desc->next;
  639. }
  640. priv->rx_head->prev->hw_next = 0;
  641. }
  642. static void cpmac_clear_tx(struct net_device *dev)
  643. {
  644. struct cpmac_priv *priv = netdev_priv(dev);
  645. int i;
  646. if (unlikely(!priv->desc_ring))
  647. return;
  648. for (i = 0; i < CPMAC_QUEUES; i++) {
  649. priv->desc_ring[i].dataflags = 0;
  650. if (priv->desc_ring[i].skb) {
  651. dev_kfree_skb_any(priv->desc_ring[i].skb);
  652. priv->desc_ring[i].skb = NULL;
  653. }
  654. }
  655. }
  656. static void cpmac_hw_error(struct work_struct *work)
  657. {
  658. struct cpmac_priv *priv =
  659. container_of(work, struct cpmac_priv, reset_work);
  660. spin_lock(&priv->rx_lock);
  661. cpmac_clear_rx(priv->dev);
  662. spin_unlock(&priv->rx_lock);
  663. cpmac_clear_tx(priv->dev);
  664. cpmac_hw_start(priv->dev);
  665. barrier();
  666. atomic_dec(&priv->reset_pending);
  667. netif_tx_wake_all_queues(priv->dev);
  668. cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
  669. }
  670. static void cpmac_check_status(struct net_device *dev)
  671. {
  672. struct cpmac_priv *priv = netdev_priv(dev);
  673. u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
  674. int rx_channel = (macstatus >> 8) & 7;
  675. int rx_code = (macstatus >> 12) & 15;
  676. int tx_channel = (macstatus >> 16) & 7;
  677. int tx_code = (macstatus >> 20) & 15;
  678. if (rx_code || tx_code) {
  679. if (netif_msg_drv(priv) && net_ratelimit()) {
  680. /* Can't find any documentation on what these
  681. *error codes actually are. So just log them and hope..
  682. */
  683. if (rx_code)
  684. printk(KERN_WARNING "%s: host error %d on rx "
  685. "channel %d (macstatus %08x), resetting\n",
  686. dev->name, rx_code, rx_channel, macstatus);
  687. if (tx_code)
  688. printk(KERN_WARNING "%s: host error %d on tx "
  689. "channel %d (macstatus %08x), resetting\n",
  690. dev->name, tx_code, tx_channel, macstatus);
  691. }
  692. netif_tx_stop_all_queues(dev);
  693. cpmac_hw_stop(dev);
  694. if (schedule_work(&priv->reset_work))
  695. atomic_inc(&priv->reset_pending);
  696. if (unlikely(netif_msg_hw(priv)))
  697. cpmac_dump_regs(dev);
  698. }
  699. cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
  700. }
  701. static irqreturn_t cpmac_irq(int irq, void *dev_id)
  702. {
  703. struct net_device *dev = dev_id;
  704. struct cpmac_priv *priv;
  705. int queue;
  706. u32 status;
  707. priv = netdev_priv(dev);
  708. status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
  709. if (unlikely(netif_msg_intr(priv)))
  710. printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
  711. status);
  712. if (status & MAC_INT_TX)
  713. cpmac_end_xmit(dev, (status & 7));
  714. if (status & MAC_INT_RX) {
  715. queue = (status >> 8) & 7;
  716. if (napi_schedule_prep(&priv->napi)) {
  717. cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
  718. __napi_schedule(&priv->napi);
  719. }
  720. }
  721. cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
  722. if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
  723. cpmac_check_status(dev);
  724. return IRQ_HANDLED;
  725. }
  726. static void cpmac_tx_timeout(struct net_device *dev)
  727. {
  728. struct cpmac_priv *priv = netdev_priv(dev);
  729. spin_lock(&priv->lock);
  730. dev->stats.tx_errors++;
  731. spin_unlock(&priv->lock);
  732. if (netif_msg_tx_err(priv) && net_ratelimit())
  733. printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
  734. atomic_inc(&priv->reset_pending);
  735. barrier();
  736. cpmac_clear_tx(dev);
  737. barrier();
  738. atomic_dec(&priv->reset_pending);
  739. netif_tx_wake_all_queues(priv->dev);
  740. }
  741. static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  742. {
  743. struct cpmac_priv *priv = netdev_priv(dev);
  744. if (!(netif_running(dev)))
  745. return -EINVAL;
  746. if (!priv->phy)
  747. return -EINVAL;
  748. if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) ||
  749. (cmd == SIOCSMIIREG))
  750. return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd);
  751. return -EOPNOTSUPP;
  752. }
  753. static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  754. {
  755. struct cpmac_priv *priv = netdev_priv(dev);
  756. if (priv->phy)
  757. return phy_ethtool_gset(priv->phy, cmd);
  758. return -EINVAL;
  759. }
  760. static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  761. {
  762. struct cpmac_priv *priv = netdev_priv(dev);
  763. if (!capable(CAP_NET_ADMIN))
  764. return -EPERM;
  765. if (priv->phy)
  766. return phy_ethtool_sset(priv->phy, cmd);
  767. return -EINVAL;
  768. }
  769. static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  770. {
  771. struct cpmac_priv *priv = netdev_priv(dev);
  772. ring->rx_max_pending = 1024;
  773. ring->rx_mini_max_pending = 1;
  774. ring->rx_jumbo_max_pending = 1;
  775. ring->tx_max_pending = 1;
  776. ring->rx_pending = priv->ring_size;
  777. ring->rx_mini_pending = 1;
  778. ring->rx_jumbo_pending = 1;
  779. ring->tx_pending = 1;
  780. }
  781. static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  782. {
  783. struct cpmac_priv *priv = netdev_priv(dev);
  784. if (netif_running(dev))
  785. return -EBUSY;
  786. priv->ring_size = ring->rx_pending;
  787. return 0;
  788. }
  789. static void cpmac_get_drvinfo(struct net_device *dev,
  790. struct ethtool_drvinfo *info)
  791. {
  792. strcpy(info->driver, "cpmac");
  793. strcpy(info->version, CPMAC_VERSION);
  794. info->fw_version[0] = '\0';
  795. sprintf(info->bus_info, "%s", "cpmac");
  796. info->regdump_len = 0;
  797. }
  798. static const struct ethtool_ops cpmac_ethtool_ops = {
  799. .get_settings = cpmac_get_settings,
  800. .set_settings = cpmac_set_settings,
  801. .get_drvinfo = cpmac_get_drvinfo,
  802. .get_link = ethtool_op_get_link,
  803. .get_ringparam = cpmac_get_ringparam,
  804. .set_ringparam = cpmac_set_ringparam,
  805. };
  806. static void cpmac_adjust_link(struct net_device *dev)
  807. {
  808. struct cpmac_priv *priv = netdev_priv(dev);
  809. int new_state = 0;
  810. spin_lock(&priv->lock);
  811. if (priv->phy->link) {
  812. netif_tx_start_all_queues(dev);
  813. if (priv->phy->duplex != priv->oldduplex) {
  814. new_state = 1;
  815. priv->oldduplex = priv->phy->duplex;
  816. }
  817. if (priv->phy->speed != priv->oldspeed) {
  818. new_state = 1;
  819. priv->oldspeed = priv->phy->speed;
  820. }
  821. if (!priv->oldlink) {
  822. new_state = 1;
  823. priv->oldlink = 1;
  824. }
  825. } else if (priv->oldlink) {
  826. new_state = 1;
  827. priv->oldlink = 0;
  828. priv->oldspeed = 0;
  829. priv->oldduplex = -1;
  830. }
  831. if (new_state && netif_msg_link(priv) && net_ratelimit())
  832. phy_print_status(priv->phy);
  833. spin_unlock(&priv->lock);
  834. }
  835. static int cpmac_open(struct net_device *dev)
  836. {
  837. int i, size, res;
  838. struct cpmac_priv *priv = netdev_priv(dev);
  839. struct resource *mem;
  840. struct cpmac_desc *desc;
  841. struct sk_buff *skb;
  842. mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
  843. if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) {
  844. if (netif_msg_drv(priv))
  845. printk(KERN_ERR "%s: failed to request registers\n",
  846. dev->name);
  847. res = -ENXIO;
  848. goto fail_reserve;
  849. }
  850. priv->regs = ioremap(mem->start, mem->end - mem->start);
  851. if (!priv->regs) {
  852. if (netif_msg_drv(priv))
  853. printk(KERN_ERR "%s: failed to remap registers\n",
  854. dev->name);
  855. res = -ENXIO;
  856. goto fail_remap;
  857. }
  858. size = priv->ring_size + CPMAC_QUEUES;
  859. priv->desc_ring = dma_alloc_coherent(&dev->dev,
  860. sizeof(struct cpmac_desc) * size,
  861. &priv->dma_ring,
  862. GFP_KERNEL);
  863. if (!priv->desc_ring) {
  864. res = -ENOMEM;
  865. goto fail_alloc;
  866. }
  867. for (i = 0; i < size; i++)
  868. priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
  869. priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
  870. for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
  871. skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
  872. if (unlikely(!skb)) {
  873. res = -ENOMEM;
  874. goto fail_desc;
  875. }
  876. desc->skb = skb;
  877. desc->data_mapping = dma_map_single(&dev->dev, skb->data,
  878. CPMAC_SKB_SIZE,
  879. DMA_FROM_DEVICE);
  880. desc->hw_data = (u32)desc->data_mapping;
  881. desc->buflen = CPMAC_SKB_SIZE;
  882. desc->dataflags = CPMAC_OWN;
  883. desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
  884. desc->next->prev = desc;
  885. desc->hw_next = (u32)desc->next->mapping;
  886. }
  887. priv->rx_head->prev->hw_next = (u32)0;
  888. if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
  889. dev->name, dev))) {
  890. if (netif_msg_drv(priv))
  891. printk(KERN_ERR "%s: failed to obtain irq\n",
  892. dev->name);
  893. goto fail_irq;
  894. }
  895. atomic_set(&priv->reset_pending, 0);
  896. INIT_WORK(&priv->reset_work, cpmac_hw_error);
  897. cpmac_hw_start(dev);
  898. napi_enable(&priv->napi);
  899. priv->phy->state = PHY_CHANGELINK;
  900. phy_start(priv->phy);
  901. return 0;
  902. fail_irq:
  903. fail_desc:
  904. for (i = 0; i < priv->ring_size; i++) {
  905. if (priv->rx_head[i].skb) {
  906. dma_unmap_single(&dev->dev,
  907. priv->rx_head[i].data_mapping,
  908. CPMAC_SKB_SIZE,
  909. DMA_FROM_DEVICE);
  910. kfree_skb(priv->rx_head[i].skb);
  911. }
  912. }
  913. fail_alloc:
  914. kfree(priv->desc_ring);
  915. iounmap(priv->regs);
  916. fail_remap:
  917. release_mem_region(mem->start, mem->end - mem->start);
  918. fail_reserve:
  919. return res;
  920. }
  921. static int cpmac_stop(struct net_device *dev)
  922. {
  923. int i;
  924. struct cpmac_priv *priv = netdev_priv(dev);
  925. struct resource *mem;
  926. netif_tx_stop_all_queues(dev);
  927. cancel_work_sync(&priv->reset_work);
  928. napi_disable(&priv->napi);
  929. phy_stop(priv->phy);
  930. cpmac_hw_stop(dev);
  931. for (i = 0; i < 8; i++)
  932. cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
  933. cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
  934. cpmac_write(priv->regs, CPMAC_MBP, 0);
  935. free_irq(dev->irq, dev);
  936. iounmap(priv->regs);
  937. mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
  938. release_mem_region(mem->start, mem->end - mem->start);
  939. priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
  940. for (i = 0; i < priv->ring_size; i++) {
  941. if (priv->rx_head[i].skb) {
  942. dma_unmap_single(&dev->dev,
  943. priv->rx_head[i].data_mapping,
  944. CPMAC_SKB_SIZE,
  945. DMA_FROM_DEVICE);
  946. kfree_skb(priv->rx_head[i].skb);
  947. }
  948. }
  949. dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
  950. (CPMAC_QUEUES + priv->ring_size),
  951. priv->desc_ring, priv->dma_ring);
  952. return 0;
  953. }
  954. static const struct net_device_ops cpmac_netdev_ops = {
  955. .ndo_open = cpmac_open,
  956. .ndo_stop = cpmac_stop,
  957. .ndo_start_xmit = cpmac_start_xmit,
  958. .ndo_tx_timeout = cpmac_tx_timeout,
  959. .ndo_set_multicast_list = cpmac_set_multicast_list,
  960. .ndo_do_ioctl = cpmac_ioctl,
  961. .ndo_set_config = cpmac_config,
  962. .ndo_change_mtu = eth_change_mtu,
  963. .ndo_validate_addr = eth_validate_addr,
  964. .ndo_set_mac_address = eth_mac_addr,
  965. };
  966. static int external_switch;
  967. static int __devinit cpmac_probe(struct platform_device *pdev)
  968. {
  969. int rc, phy_id;
  970. char mdio_bus_id[MII_BUS_ID_SIZE];
  971. struct resource *mem;
  972. struct cpmac_priv *priv;
  973. struct net_device *dev;
  974. struct plat_cpmac_data *pdata;
  975. pdata = pdev->dev.platform_data;
  976. if (external_switch || dumb_switch) {
  977. strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
  978. phy_id = pdev->id;
  979. } else {
  980. for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
  981. if (!(pdata->phy_mask & (1 << phy_id)))
  982. continue;
  983. if (!cpmac_mii->phy_map[phy_id])
  984. continue;
  985. strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
  986. break;
  987. }
  988. }
  989. if (phy_id == PHY_MAX_ADDR) {
  990. dev_err(&pdev->dev, "no PHY present\n");
  991. return -ENODEV;
  992. }
  993. dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
  994. if (!dev) {
  995. printk(KERN_ERR "cpmac: Unable to allocate net_device\n");
  996. return -ENOMEM;
  997. }
  998. platform_set_drvdata(pdev, dev);
  999. priv = netdev_priv(dev);
  1000. priv->pdev = pdev;
  1001. mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  1002. if (!mem) {
  1003. rc = -ENODEV;
  1004. goto fail;
  1005. }
  1006. dev->irq = platform_get_irq_byname(pdev, "irq");
  1007. dev->netdev_ops = &cpmac_netdev_ops;
  1008. dev->ethtool_ops = &cpmac_ethtool_ops;
  1009. netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
  1010. spin_lock_init(&priv->lock);
  1011. spin_lock_init(&priv->rx_lock);
  1012. priv->dev = dev;
  1013. priv->ring_size = 64;
  1014. priv->msg_enable = netif_msg_init(debug_level, 0xff);
  1015. memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
  1016. snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
  1017. priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
  1018. PHY_INTERFACE_MODE_MII);
  1019. if (IS_ERR(priv->phy)) {
  1020. if (netif_msg_drv(priv))
  1021. printk(KERN_ERR "%s: Could not attach to PHY\n",
  1022. dev->name);
  1023. return PTR_ERR(priv->phy);
  1024. }
  1025. if ((rc = register_netdev(dev))) {
  1026. printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
  1027. dev->name);
  1028. goto fail;
  1029. }
  1030. if (netif_msg_probe(priv)) {
  1031. printk(KERN_INFO
  1032. "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
  1033. "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
  1034. priv->phy_name, dev->dev_addr);
  1035. }
  1036. return 0;
  1037. fail:
  1038. free_netdev(dev);
  1039. return rc;
  1040. }
  1041. static int __devexit cpmac_remove(struct platform_device *pdev)
  1042. {
  1043. struct net_device *dev = platform_get_drvdata(pdev);
  1044. unregister_netdev(dev);
  1045. free_netdev(dev);
  1046. return 0;
  1047. }
  1048. static struct platform_driver cpmac_driver = {
  1049. .driver.name = "cpmac",
  1050. .driver.owner = THIS_MODULE,
  1051. .probe = cpmac_probe,
  1052. .remove = __devexit_p(cpmac_remove),
  1053. };
  1054. int __devinit cpmac_init(void)
  1055. {
  1056. u32 mask;
  1057. int i, res;
  1058. cpmac_mii = mdiobus_alloc();
  1059. if (cpmac_mii == NULL)
  1060. return -ENOMEM;
  1061. cpmac_mii->name = "cpmac-mii";
  1062. cpmac_mii->read = cpmac_mdio_read;
  1063. cpmac_mii->write = cpmac_mdio_write;
  1064. cpmac_mii->reset = cpmac_mdio_reset;
  1065. cpmac_mii->irq = mii_irqs;
  1066. cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
  1067. if (!cpmac_mii->priv) {
  1068. printk(KERN_ERR "Can't ioremap mdio registers\n");
  1069. res = -ENXIO;
  1070. goto fail_alloc;
  1071. }
  1072. #warning FIXME: unhardcode gpio&reset bits
  1073. ar7_gpio_disable(26);
  1074. ar7_gpio_disable(27);
  1075. ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
  1076. ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
  1077. ar7_device_reset(AR7_RESET_BIT_EPHY);
  1078. cpmac_mii->reset(cpmac_mii);
  1079. for (i = 0; i < 300; i++)
  1080. if ((mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE)))
  1081. break;
  1082. else
  1083. msleep(10);
  1084. mask &= 0x7fffffff;
  1085. if (mask & (mask - 1)) {
  1086. external_switch = 1;
  1087. mask = 0;
  1088. }
  1089. cpmac_mii->phy_mask = ~(mask | 0x80000000);
  1090. snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1");
  1091. res = mdiobus_register(cpmac_mii);
  1092. if (res)
  1093. goto fail_mii;
  1094. res = platform_driver_register(&cpmac_driver);
  1095. if (res)
  1096. goto fail_cpmac;
  1097. return 0;
  1098. fail_cpmac:
  1099. mdiobus_unregister(cpmac_mii);
  1100. fail_mii:
  1101. iounmap(cpmac_mii->priv);
  1102. fail_alloc:
  1103. mdiobus_free(cpmac_mii);
  1104. return res;
  1105. }
  1106. void __devexit cpmac_exit(void)
  1107. {
  1108. platform_driver_unregister(&cpmac_driver);
  1109. mdiobus_unregister(cpmac_mii);
  1110. mdiobus_free(cpmac_mii);
  1111. iounmap(cpmac_mii->priv);
  1112. }
  1113. module_init(cpmac_init);
  1114. module_exit(cpmac_exit);