cpmac.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290
  1. /*
  2. * Copyright (C) 2006, 2007 Eugene Konev
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <linux/moduleparam.h>
  21. #include <linux/sched.h>
  22. #include <linux/kernel.h>
  23. #include <linux/slab.h>
  24. #include <linux/errno.h>
  25. #include <linux/types.h>
  26. #include <linux/delay.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/ethtool.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/mii.h>
  32. #include <linux/phy.h>
  33. #include <linux/phy_fixed.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/dma-mapping.h>
  36. #include <asm/gpio.h>
  37. #include <asm/atomic.h>
  38. MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
  39. MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
  40. MODULE_LICENSE("GPL");
  41. MODULE_ALIAS("platform:cpmac");
  42. static int debug_level = 8;
  43. static int dumb_switch;
  44. /* Next 2 are only used in cpmac_probe, so it's pointless to change them */
  45. module_param(debug_level, int, 0444);
  46. module_param(dumb_switch, int, 0444);
  47. MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
  48. MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
  49. #define CPMAC_VERSION "0.5.1"
  50. /* frame size + 802.1q tag */
  51. #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4)
  52. #define CPMAC_QUEUES 8
  53. /* Ethernet registers */
  54. #define CPMAC_TX_CONTROL 0x0004
  55. #define CPMAC_TX_TEARDOWN 0x0008
  56. #define CPMAC_RX_CONTROL 0x0014
  57. #define CPMAC_RX_TEARDOWN 0x0018
  58. #define CPMAC_MBP 0x0100
  59. # define MBP_RXPASSCRC 0x40000000
  60. # define MBP_RXQOS 0x20000000
  61. # define MBP_RXNOCHAIN 0x10000000
  62. # define MBP_RXCMF 0x01000000
  63. # define MBP_RXSHORT 0x00800000
  64. # define MBP_RXCEF 0x00400000
  65. # define MBP_RXPROMISC 0x00200000
  66. # define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
  67. # define MBP_RXBCAST 0x00002000
  68. # define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
  69. # define MBP_RXMCAST 0x00000020
  70. # define MBP_MCASTCHAN(channel) ((channel) & 0x7)
  71. #define CPMAC_UNICAST_ENABLE 0x0104
  72. #define CPMAC_UNICAST_CLEAR 0x0108
  73. #define CPMAC_MAX_LENGTH 0x010c
  74. #define CPMAC_BUFFER_OFFSET 0x0110
  75. #define CPMAC_MAC_CONTROL 0x0160
  76. # define MAC_TXPTYPE 0x00000200
  77. # define MAC_TXPACE 0x00000040
  78. # define MAC_MII 0x00000020
  79. # define MAC_TXFLOW 0x00000010
  80. # define MAC_RXFLOW 0x00000008
  81. # define MAC_MTEST 0x00000004
  82. # define MAC_LOOPBACK 0x00000002
  83. # define MAC_FDX 0x00000001
  84. #define CPMAC_MAC_STATUS 0x0164
  85. # define MAC_STATUS_QOS 0x00000004
  86. # define MAC_STATUS_RXFLOW 0x00000002
  87. # define MAC_STATUS_TXFLOW 0x00000001
  88. #define CPMAC_TX_INT_ENABLE 0x0178
  89. #define CPMAC_TX_INT_CLEAR 0x017c
  90. #define CPMAC_MAC_INT_VECTOR 0x0180
  91. # define MAC_INT_STATUS 0x00080000
  92. # define MAC_INT_HOST 0x00040000
  93. # define MAC_INT_RX 0x00020000
  94. # define MAC_INT_TX 0x00010000
  95. #define CPMAC_MAC_EOI_VECTOR 0x0184
  96. #define CPMAC_RX_INT_ENABLE 0x0198
  97. #define CPMAC_RX_INT_CLEAR 0x019c
  98. #define CPMAC_MAC_INT_ENABLE 0x01a8
  99. #define CPMAC_MAC_INT_CLEAR 0x01ac
  100. #define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
  101. #define CPMAC_MAC_ADDR_MID 0x01d0
  102. #define CPMAC_MAC_ADDR_HI 0x01d4
  103. #define CPMAC_MAC_HASH_LO 0x01d8
  104. #define CPMAC_MAC_HASH_HI 0x01dc
  105. #define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
  106. #define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
  107. #define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
  108. #define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
  109. #define CPMAC_REG_END 0x0680
  110. /*
  111. * Rx/Tx statistics
  112. * TODO: use some of them to fill stats in cpmac_stats()
  113. */
  114. #define CPMAC_STATS_RX_GOOD 0x0200
  115. #define CPMAC_STATS_RX_BCAST 0x0204
  116. #define CPMAC_STATS_RX_MCAST 0x0208
  117. #define CPMAC_STATS_RX_PAUSE 0x020c
  118. #define CPMAC_STATS_RX_CRC 0x0210
  119. #define CPMAC_STATS_RX_ALIGN 0x0214
  120. #define CPMAC_STATS_RX_OVER 0x0218
  121. #define CPMAC_STATS_RX_JABBER 0x021c
  122. #define CPMAC_STATS_RX_UNDER 0x0220
  123. #define CPMAC_STATS_RX_FRAG 0x0224
  124. #define CPMAC_STATS_RX_FILTER 0x0228
  125. #define CPMAC_STATS_RX_QOSFILTER 0x022c
  126. #define CPMAC_STATS_RX_OCTETS 0x0230
  127. #define CPMAC_STATS_TX_GOOD 0x0234
  128. #define CPMAC_STATS_TX_BCAST 0x0238
  129. #define CPMAC_STATS_TX_MCAST 0x023c
  130. #define CPMAC_STATS_TX_PAUSE 0x0240
  131. #define CPMAC_STATS_TX_DEFER 0x0244
  132. #define CPMAC_STATS_TX_COLLISION 0x0248
  133. #define CPMAC_STATS_TX_SINGLECOLL 0x024c
  134. #define CPMAC_STATS_TX_MULTICOLL 0x0250
  135. #define CPMAC_STATS_TX_EXCESSCOLL 0x0254
  136. #define CPMAC_STATS_TX_LATECOLL 0x0258
  137. #define CPMAC_STATS_TX_UNDERRUN 0x025c
  138. #define CPMAC_STATS_TX_CARRIERSENSE 0x0260
  139. #define CPMAC_STATS_TX_OCTETS 0x0264
  140. #define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
  141. #define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
  142. (reg)))
  143. /* MDIO bus */
  144. #define CPMAC_MDIO_VERSION 0x0000
  145. #define CPMAC_MDIO_CONTROL 0x0004
  146. # define MDIOC_IDLE 0x80000000
  147. # define MDIOC_ENABLE 0x40000000
  148. # define MDIOC_PREAMBLE 0x00100000
  149. # define MDIOC_FAULT 0x00080000
  150. # define MDIOC_FAULTDETECT 0x00040000
  151. # define MDIOC_INTTEST 0x00020000
  152. # define MDIOC_CLKDIV(div) ((div) & 0xff)
  153. #define CPMAC_MDIO_ALIVE 0x0008
  154. #define CPMAC_MDIO_LINK 0x000c
  155. #define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
  156. # define MDIO_BUSY 0x80000000
  157. # define MDIO_WRITE 0x40000000
  158. # define MDIO_REG(reg) (((reg) & 0x1f) << 21)
  159. # define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
  160. # define MDIO_DATA(data) ((data) & 0xffff)
  161. #define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
  162. # define PHYSEL_LINKSEL 0x00000040
  163. # define PHYSEL_LINKINT 0x00000020
  164. struct cpmac_desc {
  165. u32 hw_next;
  166. u32 hw_data;
  167. u16 buflen;
  168. u16 bufflags;
  169. u16 datalen;
  170. u16 dataflags;
  171. #define CPMAC_SOP 0x8000
  172. #define CPMAC_EOP 0x4000
  173. #define CPMAC_OWN 0x2000
  174. #define CPMAC_EOQ 0x1000
  175. struct sk_buff *skb;
  176. struct cpmac_desc *next;
  177. struct cpmac_desc *prev;
  178. dma_addr_t mapping;
  179. dma_addr_t data_mapping;
  180. };
  181. struct cpmac_priv {
  182. spinlock_t lock;
  183. spinlock_t rx_lock;
  184. struct cpmac_desc *rx_head;
  185. int ring_size;
  186. struct cpmac_desc *desc_ring;
  187. dma_addr_t dma_ring;
  188. void __iomem *regs;
  189. struct mii_bus *mii_bus;
  190. struct phy_device *phy;
  191. char phy_name[MII_BUS_ID_SIZE + 3];
  192. int oldlink, oldspeed, oldduplex;
  193. u32 msg_enable;
  194. struct net_device *dev;
  195. struct work_struct reset_work;
  196. struct platform_device *pdev;
  197. struct napi_struct napi;
  198. atomic_t reset_pending;
  199. };
  200. static irqreturn_t cpmac_irq(int, void *);
  201. static void cpmac_hw_start(struct net_device *dev);
  202. static void cpmac_hw_stop(struct net_device *dev);
  203. static int cpmac_stop(struct net_device *dev);
  204. static int cpmac_open(struct net_device *dev);
  205. static void cpmac_dump_regs(struct net_device *dev)
  206. {
  207. int i;
  208. struct cpmac_priv *priv = netdev_priv(dev);
  209. for (i = 0; i < CPMAC_REG_END; i += 4) {
  210. if (i % 16 == 0) {
  211. if (i)
  212. printk("\n");
  213. printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
  214. priv->regs + i);
  215. }
  216. printk(" %08x", cpmac_read(priv->regs, i));
  217. }
  218. printk("\n");
  219. }
  220. static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
  221. {
  222. int i;
  223. printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
  224. for (i = 0; i < sizeof(*desc) / 4; i++)
  225. printk(" %08x", ((u32 *)desc)[i]);
  226. printk("\n");
  227. }
  228. static void cpmac_dump_all_desc(struct net_device *dev)
  229. {
  230. struct cpmac_priv *priv = netdev_priv(dev);
  231. struct cpmac_desc *dump = priv->rx_head;
  232. do {
  233. cpmac_dump_desc(dev, dump);
  234. dump = dump->next;
  235. } while (dump != priv->rx_head);
  236. }
  237. static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
  238. {
  239. int i;
  240. printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
  241. for (i = 0; i < skb->len; i++) {
  242. if (i % 16 == 0) {
  243. if (i)
  244. printk("\n");
  245. printk(KERN_DEBUG "%s: data[%p]:", dev->name,
  246. skb->data + i);
  247. }
  248. printk(" %02x", ((u8 *)skb->data)[i]);
  249. }
  250. printk("\n");
  251. }
  252. static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
  253. {
  254. u32 val;
  255. while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
  256. cpu_relax();
  257. cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
  258. MDIO_PHY(phy_id));
  259. while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
  260. cpu_relax();
  261. return MDIO_DATA(val);
  262. }
  263. static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
  264. int reg, u16 val)
  265. {
  266. while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
  267. cpu_relax();
  268. cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
  269. MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
  270. return 0;
  271. }
  272. static int cpmac_mdio_reset(struct mii_bus *bus)
  273. {
  274. ar7_device_reset(AR7_RESET_BIT_MDIO);
  275. cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
  276. MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1));
  277. return 0;
  278. }
  279. static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
  280. static struct mii_bus *cpmac_mii;
  281. static int cpmac_config(struct net_device *dev, struct ifmap *map)
  282. {
  283. if (dev->flags & IFF_UP)
  284. return -EBUSY;
  285. /* Don't allow changing the I/O address */
  286. if (map->base_addr != dev->base_addr)
  287. return -EOPNOTSUPP;
  288. /* ignore other fields */
  289. return 0;
  290. }
  291. static void cpmac_set_multicast_list(struct net_device *dev)
  292. {
  293. struct dev_mc_list *iter;
  294. u8 tmp;
  295. u32 mbp, bit, hash[2] = { 0, };
  296. struct cpmac_priv *priv = netdev_priv(dev);
  297. mbp = cpmac_read(priv->regs, CPMAC_MBP);
  298. if (dev->flags & IFF_PROMISC) {
  299. cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
  300. MBP_RXPROMISC);
  301. } else {
  302. cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
  303. if (dev->flags & IFF_ALLMULTI) {
  304. /* enable all multicast mode */
  305. cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
  306. cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
  307. } else {
  308. /*
  309. * cpmac uses some strange mac address hashing
  310. * (not crc32)
  311. */
  312. netdev_for_each_mc_addr(iter, dev) {
  313. bit = 0;
  314. tmp = iter->dmi_addr[0];
  315. bit ^= (tmp >> 2) ^ (tmp << 4);
  316. tmp = iter->dmi_addr[1];
  317. bit ^= (tmp >> 4) ^ (tmp << 2);
  318. tmp = iter->dmi_addr[2];
  319. bit ^= (tmp >> 6) ^ tmp;
  320. tmp = iter->dmi_addr[3];
  321. bit ^= (tmp >> 2) ^ (tmp << 4);
  322. tmp = iter->dmi_addr[4];
  323. bit ^= (tmp >> 4) ^ (tmp << 2);
  324. tmp = iter->dmi_addr[5];
  325. bit ^= (tmp >> 6) ^ tmp;
  326. bit &= 0x3f;
  327. hash[bit / 32] |= 1 << (bit % 32);
  328. }
  329. cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
  330. cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
  331. }
  332. }
  333. }
  334. static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
  335. struct cpmac_desc *desc)
  336. {
  337. struct sk_buff *skb, *result = NULL;
  338. if (unlikely(netif_msg_hw(priv)))
  339. cpmac_dump_desc(priv->dev, desc);
  340. cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
  341. if (unlikely(!desc->datalen)) {
  342. if (netif_msg_rx_err(priv) && net_ratelimit())
  343. printk(KERN_WARNING "%s: rx: spurious interrupt\n",
  344. priv->dev->name);
  345. return NULL;
  346. }
  347. skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
  348. if (likely(skb)) {
  349. skb_put(desc->skb, desc->datalen);
  350. desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
  351. desc->skb->ip_summed = CHECKSUM_NONE;
  352. priv->dev->stats.rx_packets++;
  353. priv->dev->stats.rx_bytes += desc->datalen;
  354. result = desc->skb;
  355. dma_unmap_single(&priv->dev->dev, desc->data_mapping,
  356. CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
  357. desc->skb = skb;
  358. desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
  359. CPMAC_SKB_SIZE,
  360. DMA_FROM_DEVICE);
  361. desc->hw_data = (u32)desc->data_mapping;
  362. if (unlikely(netif_msg_pktdata(priv))) {
  363. printk(KERN_DEBUG "%s: received packet:\n",
  364. priv->dev->name);
  365. cpmac_dump_skb(priv->dev, result);
  366. }
  367. } else {
  368. if (netif_msg_rx_err(priv) && net_ratelimit())
  369. printk(KERN_WARNING
  370. "%s: low on skbs, dropping packet\n",
  371. priv->dev->name);
  372. priv->dev->stats.rx_dropped++;
  373. }
  374. desc->buflen = CPMAC_SKB_SIZE;
  375. desc->dataflags = CPMAC_OWN;
  376. return result;
  377. }
  378. static int cpmac_poll(struct napi_struct *napi, int budget)
  379. {
  380. struct sk_buff *skb;
  381. struct cpmac_desc *desc, *restart;
  382. struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
  383. int received = 0, processed = 0;
  384. spin_lock(&priv->rx_lock);
  385. if (unlikely(!priv->rx_head)) {
  386. if (netif_msg_rx_err(priv) && net_ratelimit())
  387. printk(KERN_WARNING "%s: rx: polling, but no queue\n",
  388. priv->dev->name);
  389. spin_unlock(&priv->rx_lock);
  390. napi_complete(napi);
  391. return 0;
  392. }
  393. desc = priv->rx_head;
  394. restart = NULL;
  395. while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
  396. processed++;
  397. if ((desc->dataflags & CPMAC_EOQ) != 0) {
  398. /* The last update to eoq->hw_next didn't happen
  399. * soon enough, and the receiver stopped here.
  400. *Remember this descriptor so we can restart
  401. * the receiver after freeing some space.
  402. */
  403. if (unlikely(restart)) {
  404. if (netif_msg_rx_err(priv))
  405. printk(KERN_ERR "%s: poll found a"
  406. " duplicate EOQ: %p and %p\n",
  407. priv->dev->name, restart, desc);
  408. goto fatal_error;
  409. }
  410. restart = desc->next;
  411. }
  412. skb = cpmac_rx_one(priv, desc);
  413. if (likely(skb)) {
  414. netif_receive_skb(skb);
  415. received++;
  416. }
  417. desc = desc->next;
  418. }
  419. if (desc != priv->rx_head) {
  420. /* We freed some buffers, but not the whole ring,
  421. * add what we did free to the rx list */
  422. desc->prev->hw_next = (u32)0;
  423. priv->rx_head->prev->hw_next = priv->rx_head->mapping;
  424. }
  425. /* Optimization: If we did not actually process an EOQ (perhaps because
  426. * of quota limits), check to see if the tail of the queue has EOQ set.
  427. * We should immediately restart in that case so that the receiver can
  428. * restart and run in parallel with more packet processing.
  429. * This lets us handle slightly larger bursts before running
  430. * out of ring space (assuming dev->weight < ring_size) */
  431. if (!restart &&
  432. (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
  433. == CPMAC_EOQ &&
  434. (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
  435. /* reset EOQ so the poll loop (above) doesn't try to
  436. * restart this when it eventually gets to this descriptor.
  437. */
  438. priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
  439. restart = priv->rx_head;
  440. }
  441. if (restart) {
  442. priv->dev->stats.rx_errors++;
  443. priv->dev->stats.rx_fifo_errors++;
  444. if (netif_msg_rx_err(priv) && net_ratelimit())
  445. printk(KERN_WARNING "%s: rx dma ring overrun\n",
  446. priv->dev->name);
  447. if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
  448. if (netif_msg_drv(priv))
  449. printk(KERN_ERR "%s: cpmac_poll is trying to "
  450. "restart rx from a descriptor that's "
  451. "not free: %p\n",
  452. priv->dev->name, restart);
  453. goto fatal_error;
  454. }
  455. cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
  456. }
  457. priv->rx_head = desc;
  458. spin_unlock(&priv->rx_lock);
  459. if (unlikely(netif_msg_rx_status(priv)))
  460. printk(KERN_DEBUG "%s: poll processed %d packets\n",
  461. priv->dev->name, received);
  462. if (processed == 0) {
  463. /* we ran out of packets to read,
  464. * revert to interrupt-driven mode */
  465. napi_complete(napi);
  466. cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
  467. return 0;
  468. }
  469. return 1;
  470. fatal_error:
  471. /* Something went horribly wrong.
  472. * Reset hardware to try to recover rather than wedging. */
  473. if (netif_msg_drv(priv)) {
  474. printk(KERN_ERR "%s: cpmac_poll is confused. "
  475. "Resetting hardware\n", priv->dev->name);
  476. cpmac_dump_all_desc(priv->dev);
  477. printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
  478. priv->dev->name,
  479. cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
  480. cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
  481. }
  482. spin_unlock(&priv->rx_lock);
  483. napi_complete(napi);
  484. netif_tx_stop_all_queues(priv->dev);
  485. napi_disable(&priv->napi);
  486. atomic_inc(&priv->reset_pending);
  487. cpmac_hw_stop(priv->dev);
  488. if (!schedule_work(&priv->reset_work))
  489. atomic_dec(&priv->reset_pending);
  490. return 0;
  491. }
  492. static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
  493. {
  494. int queue, len;
  495. struct cpmac_desc *desc;
  496. struct cpmac_priv *priv = netdev_priv(dev);
  497. if (unlikely(atomic_read(&priv->reset_pending)))
  498. return NETDEV_TX_BUSY;
  499. if (unlikely(skb_padto(skb, ETH_ZLEN)))
  500. return NETDEV_TX_OK;
  501. len = max(skb->len, ETH_ZLEN);
  502. queue = skb_get_queue_mapping(skb);
  503. netif_stop_subqueue(dev, queue);
  504. desc = &priv->desc_ring[queue];
  505. if (unlikely(desc->dataflags & CPMAC_OWN)) {
  506. if (netif_msg_tx_err(priv) && net_ratelimit())
  507. printk(KERN_WARNING "%s: tx dma ring full\n",
  508. dev->name);
  509. return NETDEV_TX_BUSY;
  510. }
  511. spin_lock(&priv->lock);
  512. dev->trans_start = jiffies;
  513. spin_unlock(&priv->lock);
  514. desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
  515. desc->skb = skb;
  516. desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
  517. DMA_TO_DEVICE);
  518. desc->hw_data = (u32)desc->data_mapping;
  519. desc->datalen = len;
  520. desc->buflen = len;
  521. if (unlikely(netif_msg_tx_queued(priv)))
  522. printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
  523. skb->len);
  524. if (unlikely(netif_msg_hw(priv)))
  525. cpmac_dump_desc(dev, desc);
  526. if (unlikely(netif_msg_pktdata(priv)))
  527. cpmac_dump_skb(dev, skb);
  528. cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
  529. return NETDEV_TX_OK;
  530. }
  531. static void cpmac_end_xmit(struct net_device *dev, int queue)
  532. {
  533. struct cpmac_desc *desc;
  534. struct cpmac_priv *priv = netdev_priv(dev);
  535. desc = &priv->desc_ring[queue];
  536. cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
  537. if (likely(desc->skb)) {
  538. spin_lock(&priv->lock);
  539. dev->stats.tx_packets++;
  540. dev->stats.tx_bytes += desc->skb->len;
  541. spin_unlock(&priv->lock);
  542. dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
  543. DMA_TO_DEVICE);
  544. if (unlikely(netif_msg_tx_done(priv)))
  545. printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
  546. desc->skb, desc->skb->len);
  547. dev_kfree_skb_irq(desc->skb);
  548. desc->skb = NULL;
  549. if (__netif_subqueue_stopped(dev, queue))
  550. netif_wake_subqueue(dev, queue);
  551. } else {
  552. if (netif_msg_tx_err(priv) && net_ratelimit())
  553. printk(KERN_WARNING
  554. "%s: end_xmit: spurious interrupt\n", dev->name);
  555. if (__netif_subqueue_stopped(dev, queue))
  556. netif_wake_subqueue(dev, queue);
  557. }
  558. }
  559. static void cpmac_hw_stop(struct net_device *dev)
  560. {
  561. int i;
  562. struct cpmac_priv *priv = netdev_priv(dev);
  563. struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
  564. ar7_device_reset(pdata->reset_bit);
  565. cpmac_write(priv->regs, CPMAC_RX_CONTROL,
  566. cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
  567. cpmac_write(priv->regs, CPMAC_TX_CONTROL,
  568. cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
  569. for (i = 0; i < 8; i++) {
  570. cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
  571. cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
  572. }
  573. cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
  574. cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
  575. cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
  576. cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
  577. cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
  578. cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
  579. }
  580. static void cpmac_hw_start(struct net_device *dev)
  581. {
  582. int i;
  583. struct cpmac_priv *priv = netdev_priv(dev);
  584. struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
  585. ar7_device_reset(pdata->reset_bit);
  586. for (i = 0; i < 8; i++) {
  587. cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
  588. cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
  589. }
  590. cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
  591. cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
  592. MBP_RXMCAST);
  593. cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
  594. for (i = 0; i < 8; i++)
  595. cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
  596. cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
  597. cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
  598. (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
  599. (dev->dev_addr[3] << 24));
  600. cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
  601. cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
  602. cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
  603. cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
  604. cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
  605. cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
  606. cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
  607. cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
  608. cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
  609. cpmac_write(priv->regs, CPMAC_RX_CONTROL,
  610. cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
  611. cpmac_write(priv->regs, CPMAC_TX_CONTROL,
  612. cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
  613. cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
  614. cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
  615. MAC_FDX);
  616. }
  617. static void cpmac_clear_rx(struct net_device *dev)
  618. {
  619. struct cpmac_priv *priv = netdev_priv(dev);
  620. struct cpmac_desc *desc;
  621. int i;
  622. if (unlikely(!priv->rx_head))
  623. return;
  624. desc = priv->rx_head;
  625. for (i = 0; i < priv->ring_size; i++) {
  626. if ((desc->dataflags & CPMAC_OWN) == 0) {
  627. if (netif_msg_rx_err(priv) && net_ratelimit())
  628. printk(KERN_WARNING "%s: packet dropped\n",
  629. dev->name);
  630. if (unlikely(netif_msg_hw(priv)))
  631. cpmac_dump_desc(dev, desc);
  632. desc->dataflags = CPMAC_OWN;
  633. dev->stats.rx_dropped++;
  634. }
  635. desc->hw_next = desc->next->mapping;
  636. desc = desc->next;
  637. }
  638. priv->rx_head->prev->hw_next = 0;
  639. }
  640. static void cpmac_clear_tx(struct net_device *dev)
  641. {
  642. struct cpmac_priv *priv = netdev_priv(dev);
  643. int i;
  644. if (unlikely(!priv->desc_ring))
  645. return;
  646. for (i = 0; i < CPMAC_QUEUES; i++) {
  647. priv->desc_ring[i].dataflags = 0;
  648. if (priv->desc_ring[i].skb) {
  649. dev_kfree_skb_any(priv->desc_ring[i].skb);
  650. priv->desc_ring[i].skb = NULL;
  651. }
  652. }
  653. }
  654. static void cpmac_hw_error(struct work_struct *work)
  655. {
  656. struct cpmac_priv *priv =
  657. container_of(work, struct cpmac_priv, reset_work);
  658. spin_lock(&priv->rx_lock);
  659. cpmac_clear_rx(priv->dev);
  660. spin_unlock(&priv->rx_lock);
  661. cpmac_clear_tx(priv->dev);
  662. cpmac_hw_start(priv->dev);
  663. barrier();
  664. atomic_dec(&priv->reset_pending);
  665. netif_tx_wake_all_queues(priv->dev);
  666. cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
  667. }
  668. static void cpmac_check_status(struct net_device *dev)
  669. {
  670. struct cpmac_priv *priv = netdev_priv(dev);
  671. u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
  672. int rx_channel = (macstatus >> 8) & 7;
  673. int rx_code = (macstatus >> 12) & 15;
  674. int tx_channel = (macstatus >> 16) & 7;
  675. int tx_code = (macstatus >> 20) & 15;
  676. if (rx_code || tx_code) {
  677. if (netif_msg_drv(priv) && net_ratelimit()) {
  678. /* Can't find any documentation on what these
  679. *error codes actually are. So just log them and hope..
  680. */
  681. if (rx_code)
  682. printk(KERN_WARNING "%s: host error %d on rx "
  683. "channel %d (macstatus %08x), resetting\n",
  684. dev->name, rx_code, rx_channel, macstatus);
  685. if (tx_code)
  686. printk(KERN_WARNING "%s: host error %d on tx "
  687. "channel %d (macstatus %08x), resetting\n",
  688. dev->name, tx_code, tx_channel, macstatus);
  689. }
  690. netif_tx_stop_all_queues(dev);
  691. cpmac_hw_stop(dev);
  692. if (schedule_work(&priv->reset_work))
  693. atomic_inc(&priv->reset_pending);
  694. if (unlikely(netif_msg_hw(priv)))
  695. cpmac_dump_regs(dev);
  696. }
  697. cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
  698. }
  699. static irqreturn_t cpmac_irq(int irq, void *dev_id)
  700. {
  701. struct net_device *dev = dev_id;
  702. struct cpmac_priv *priv;
  703. int queue;
  704. u32 status;
  705. priv = netdev_priv(dev);
  706. status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
  707. if (unlikely(netif_msg_intr(priv)))
  708. printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
  709. status);
  710. if (status & MAC_INT_TX)
  711. cpmac_end_xmit(dev, (status & 7));
  712. if (status & MAC_INT_RX) {
  713. queue = (status >> 8) & 7;
  714. if (napi_schedule_prep(&priv->napi)) {
  715. cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
  716. __napi_schedule(&priv->napi);
  717. }
  718. }
  719. cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
  720. if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
  721. cpmac_check_status(dev);
  722. return IRQ_HANDLED;
  723. }
  724. static void cpmac_tx_timeout(struct net_device *dev)
  725. {
  726. struct cpmac_priv *priv = netdev_priv(dev);
  727. spin_lock(&priv->lock);
  728. dev->stats.tx_errors++;
  729. spin_unlock(&priv->lock);
  730. if (netif_msg_tx_err(priv) && net_ratelimit())
  731. printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
  732. atomic_inc(&priv->reset_pending);
  733. barrier();
  734. cpmac_clear_tx(dev);
  735. barrier();
  736. atomic_dec(&priv->reset_pending);
  737. netif_tx_wake_all_queues(priv->dev);
  738. }
  739. static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  740. {
  741. struct cpmac_priv *priv = netdev_priv(dev);
  742. if (!(netif_running(dev)))
  743. return -EINVAL;
  744. if (!priv->phy)
  745. return -EINVAL;
  746. if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) ||
  747. (cmd == SIOCSMIIREG))
  748. return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd);
  749. return -EOPNOTSUPP;
  750. }
  751. static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  752. {
  753. struct cpmac_priv *priv = netdev_priv(dev);
  754. if (priv->phy)
  755. return phy_ethtool_gset(priv->phy, cmd);
  756. return -EINVAL;
  757. }
  758. static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  759. {
  760. struct cpmac_priv *priv = netdev_priv(dev);
  761. if (!capable(CAP_NET_ADMIN))
  762. return -EPERM;
  763. if (priv->phy)
  764. return phy_ethtool_sset(priv->phy, cmd);
  765. return -EINVAL;
  766. }
  767. static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  768. {
  769. struct cpmac_priv *priv = netdev_priv(dev);
  770. ring->rx_max_pending = 1024;
  771. ring->rx_mini_max_pending = 1;
  772. ring->rx_jumbo_max_pending = 1;
  773. ring->tx_max_pending = 1;
  774. ring->rx_pending = priv->ring_size;
  775. ring->rx_mini_pending = 1;
  776. ring->rx_jumbo_pending = 1;
  777. ring->tx_pending = 1;
  778. }
  779. static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
  780. {
  781. struct cpmac_priv *priv = netdev_priv(dev);
  782. if (netif_running(dev))
  783. return -EBUSY;
  784. priv->ring_size = ring->rx_pending;
  785. return 0;
  786. }
  787. static void cpmac_get_drvinfo(struct net_device *dev,
  788. struct ethtool_drvinfo *info)
  789. {
  790. strcpy(info->driver, "cpmac");
  791. strcpy(info->version, CPMAC_VERSION);
  792. info->fw_version[0] = '\0';
  793. sprintf(info->bus_info, "%s", "cpmac");
  794. info->regdump_len = 0;
  795. }
  796. static const struct ethtool_ops cpmac_ethtool_ops = {
  797. .get_settings = cpmac_get_settings,
  798. .set_settings = cpmac_set_settings,
  799. .get_drvinfo = cpmac_get_drvinfo,
  800. .get_link = ethtool_op_get_link,
  801. .get_ringparam = cpmac_get_ringparam,
  802. .set_ringparam = cpmac_set_ringparam,
  803. };
  804. static void cpmac_adjust_link(struct net_device *dev)
  805. {
  806. struct cpmac_priv *priv = netdev_priv(dev);
  807. int new_state = 0;
  808. spin_lock(&priv->lock);
  809. if (priv->phy->link) {
  810. netif_tx_start_all_queues(dev);
  811. if (priv->phy->duplex != priv->oldduplex) {
  812. new_state = 1;
  813. priv->oldduplex = priv->phy->duplex;
  814. }
  815. if (priv->phy->speed != priv->oldspeed) {
  816. new_state = 1;
  817. priv->oldspeed = priv->phy->speed;
  818. }
  819. if (!priv->oldlink) {
  820. new_state = 1;
  821. priv->oldlink = 1;
  822. }
  823. } else if (priv->oldlink) {
  824. new_state = 1;
  825. priv->oldlink = 0;
  826. priv->oldspeed = 0;
  827. priv->oldduplex = -1;
  828. }
  829. if (new_state && netif_msg_link(priv) && net_ratelimit())
  830. phy_print_status(priv->phy);
  831. spin_unlock(&priv->lock);
  832. }
  833. static int cpmac_open(struct net_device *dev)
  834. {
  835. int i, size, res;
  836. struct cpmac_priv *priv = netdev_priv(dev);
  837. struct resource *mem;
  838. struct cpmac_desc *desc;
  839. struct sk_buff *skb;
  840. mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
  841. if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) {
  842. if (netif_msg_drv(priv))
  843. printk(KERN_ERR "%s: failed to request registers\n",
  844. dev->name);
  845. res = -ENXIO;
  846. goto fail_reserve;
  847. }
  848. priv->regs = ioremap(mem->start, mem->end - mem->start);
  849. if (!priv->regs) {
  850. if (netif_msg_drv(priv))
  851. printk(KERN_ERR "%s: failed to remap registers\n",
  852. dev->name);
  853. res = -ENXIO;
  854. goto fail_remap;
  855. }
  856. size = priv->ring_size + CPMAC_QUEUES;
  857. priv->desc_ring = dma_alloc_coherent(&dev->dev,
  858. sizeof(struct cpmac_desc) * size,
  859. &priv->dma_ring,
  860. GFP_KERNEL);
  861. if (!priv->desc_ring) {
  862. res = -ENOMEM;
  863. goto fail_alloc;
  864. }
  865. for (i = 0; i < size; i++)
  866. priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
  867. priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
  868. for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
  869. skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
  870. if (unlikely(!skb)) {
  871. res = -ENOMEM;
  872. goto fail_desc;
  873. }
  874. desc->skb = skb;
  875. desc->data_mapping = dma_map_single(&dev->dev, skb->data,
  876. CPMAC_SKB_SIZE,
  877. DMA_FROM_DEVICE);
  878. desc->hw_data = (u32)desc->data_mapping;
  879. desc->buflen = CPMAC_SKB_SIZE;
  880. desc->dataflags = CPMAC_OWN;
  881. desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
  882. desc->next->prev = desc;
  883. desc->hw_next = (u32)desc->next->mapping;
  884. }
  885. priv->rx_head->prev->hw_next = (u32)0;
  886. if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
  887. dev->name, dev))) {
  888. if (netif_msg_drv(priv))
  889. printk(KERN_ERR "%s: failed to obtain irq\n",
  890. dev->name);
  891. goto fail_irq;
  892. }
  893. atomic_set(&priv->reset_pending, 0);
  894. INIT_WORK(&priv->reset_work, cpmac_hw_error);
  895. cpmac_hw_start(dev);
  896. napi_enable(&priv->napi);
  897. priv->phy->state = PHY_CHANGELINK;
  898. phy_start(priv->phy);
  899. return 0;
  900. fail_irq:
  901. fail_desc:
  902. for (i = 0; i < priv->ring_size; i++) {
  903. if (priv->rx_head[i].skb) {
  904. dma_unmap_single(&dev->dev,
  905. priv->rx_head[i].data_mapping,
  906. CPMAC_SKB_SIZE,
  907. DMA_FROM_DEVICE);
  908. kfree_skb(priv->rx_head[i].skb);
  909. }
  910. }
  911. fail_alloc:
  912. kfree(priv->desc_ring);
  913. iounmap(priv->regs);
  914. fail_remap:
  915. release_mem_region(mem->start, mem->end - mem->start);
  916. fail_reserve:
  917. return res;
  918. }
  919. static int cpmac_stop(struct net_device *dev)
  920. {
  921. int i;
  922. struct cpmac_priv *priv = netdev_priv(dev);
  923. struct resource *mem;
  924. netif_tx_stop_all_queues(dev);
  925. cancel_work_sync(&priv->reset_work);
  926. napi_disable(&priv->napi);
  927. phy_stop(priv->phy);
  928. cpmac_hw_stop(dev);
  929. for (i = 0; i < 8; i++)
  930. cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
  931. cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
  932. cpmac_write(priv->regs, CPMAC_MBP, 0);
  933. free_irq(dev->irq, dev);
  934. iounmap(priv->regs);
  935. mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
  936. release_mem_region(mem->start, mem->end - mem->start);
  937. priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
  938. for (i = 0; i < priv->ring_size; i++) {
  939. if (priv->rx_head[i].skb) {
  940. dma_unmap_single(&dev->dev,
  941. priv->rx_head[i].data_mapping,
  942. CPMAC_SKB_SIZE,
  943. DMA_FROM_DEVICE);
  944. kfree_skb(priv->rx_head[i].skb);
  945. }
  946. }
  947. dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
  948. (CPMAC_QUEUES + priv->ring_size),
  949. priv->desc_ring, priv->dma_ring);
  950. return 0;
  951. }
  952. static const struct net_device_ops cpmac_netdev_ops = {
  953. .ndo_open = cpmac_open,
  954. .ndo_stop = cpmac_stop,
  955. .ndo_start_xmit = cpmac_start_xmit,
  956. .ndo_tx_timeout = cpmac_tx_timeout,
  957. .ndo_set_multicast_list = cpmac_set_multicast_list,
  958. .ndo_do_ioctl = cpmac_ioctl,
  959. .ndo_set_config = cpmac_config,
  960. .ndo_change_mtu = eth_change_mtu,
  961. .ndo_validate_addr = eth_validate_addr,
  962. .ndo_set_mac_address = eth_mac_addr,
  963. };
  964. static int external_switch;
  965. static int __devinit cpmac_probe(struct platform_device *pdev)
  966. {
  967. int rc, phy_id;
  968. char mdio_bus_id[MII_BUS_ID_SIZE];
  969. struct resource *mem;
  970. struct cpmac_priv *priv;
  971. struct net_device *dev;
  972. struct plat_cpmac_data *pdata;
  973. pdata = pdev->dev.platform_data;
  974. if (external_switch || dumb_switch) {
  975. strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
  976. phy_id = pdev->id;
  977. } else {
  978. for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
  979. if (!(pdata->phy_mask & (1 << phy_id)))
  980. continue;
  981. if (!cpmac_mii->phy_map[phy_id])
  982. continue;
  983. strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
  984. break;
  985. }
  986. }
  987. if (phy_id == PHY_MAX_ADDR) {
  988. dev_err(&pdev->dev, "no PHY present\n");
  989. return -ENODEV;
  990. }
  991. dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
  992. if (!dev) {
  993. printk(KERN_ERR "cpmac: Unable to allocate net_device\n");
  994. return -ENOMEM;
  995. }
  996. platform_set_drvdata(pdev, dev);
  997. priv = netdev_priv(dev);
  998. priv->pdev = pdev;
  999. mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  1000. if (!mem) {
  1001. rc = -ENODEV;
  1002. goto fail;
  1003. }
  1004. dev->irq = platform_get_irq_byname(pdev, "irq");
  1005. dev->netdev_ops = &cpmac_netdev_ops;
  1006. dev->ethtool_ops = &cpmac_ethtool_ops;
  1007. netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
  1008. spin_lock_init(&priv->lock);
  1009. spin_lock_init(&priv->rx_lock);
  1010. priv->dev = dev;
  1011. priv->ring_size = 64;
  1012. priv->msg_enable = netif_msg_init(debug_level, 0xff);
  1013. memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
  1014. snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
  1015. priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
  1016. PHY_INTERFACE_MODE_MII);
  1017. if (IS_ERR(priv->phy)) {
  1018. if (netif_msg_drv(priv))
  1019. printk(KERN_ERR "%s: Could not attach to PHY\n",
  1020. dev->name);
  1021. return PTR_ERR(priv->phy);
  1022. }
  1023. if ((rc = register_netdev(dev))) {
  1024. printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
  1025. dev->name);
  1026. goto fail;
  1027. }
  1028. if (netif_msg_probe(priv)) {
  1029. printk(KERN_INFO
  1030. "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
  1031. "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
  1032. priv->phy_name, dev->dev_addr);
  1033. }
  1034. return 0;
  1035. fail:
  1036. free_netdev(dev);
  1037. return rc;
  1038. }
  1039. static int __devexit cpmac_remove(struct platform_device *pdev)
  1040. {
  1041. struct net_device *dev = platform_get_drvdata(pdev);
  1042. unregister_netdev(dev);
  1043. free_netdev(dev);
  1044. return 0;
  1045. }
  1046. static struct platform_driver cpmac_driver = {
  1047. .driver.name = "cpmac",
  1048. .driver.owner = THIS_MODULE,
  1049. .probe = cpmac_probe,
  1050. .remove = __devexit_p(cpmac_remove),
  1051. };
  1052. int __devinit cpmac_init(void)
  1053. {
  1054. u32 mask;
  1055. int i, res;
  1056. cpmac_mii = mdiobus_alloc();
  1057. if (cpmac_mii == NULL)
  1058. return -ENOMEM;
  1059. cpmac_mii->name = "cpmac-mii";
  1060. cpmac_mii->read = cpmac_mdio_read;
  1061. cpmac_mii->write = cpmac_mdio_write;
  1062. cpmac_mii->reset = cpmac_mdio_reset;
  1063. cpmac_mii->irq = mii_irqs;
  1064. cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
  1065. if (!cpmac_mii->priv) {
  1066. printk(KERN_ERR "Can't ioremap mdio registers\n");
  1067. res = -ENXIO;
  1068. goto fail_alloc;
  1069. }
  1070. #warning FIXME: unhardcode gpio&reset bits
  1071. ar7_gpio_disable(26);
  1072. ar7_gpio_disable(27);
  1073. ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
  1074. ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
  1075. ar7_device_reset(AR7_RESET_BIT_EPHY);
  1076. cpmac_mii->reset(cpmac_mii);
  1077. for (i = 0; i < 300; i++)
  1078. if ((mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE)))
  1079. break;
  1080. else
  1081. msleep(10);
  1082. mask &= 0x7fffffff;
  1083. if (mask & (mask - 1)) {
  1084. external_switch = 1;
  1085. mask = 0;
  1086. }
  1087. cpmac_mii->phy_mask = ~(mask | 0x80000000);
  1088. snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1");
  1089. res = mdiobus_register(cpmac_mii);
  1090. if (res)
  1091. goto fail_mii;
  1092. res = platform_driver_register(&cpmac_driver);
  1093. if (res)
  1094. goto fail_cpmac;
  1095. return 0;
  1096. fail_cpmac:
  1097. mdiobus_unregister(cpmac_mii);
  1098. fail_mii:
  1099. iounmap(cpmac_mii->priv);
  1100. fail_alloc:
  1101. mdiobus_free(cpmac_mii);
  1102. return res;
  1103. }
  1104. void __devexit cpmac_exit(void)
  1105. {
  1106. platform_driver_unregister(&cpmac_driver);
  1107. mdiobus_unregister(cpmac_mii);
  1108. mdiobus_free(cpmac_mii);
  1109. iounmap(cpmac_mii->priv);
  1110. }
  1111. module_init(cpmac_init);
  1112. module_exit(cpmac_exit);