pxa168_eth.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666
  1. /*
  2. * PXA168 ethernet driver.
  3. * Most of the code is derived from mv643xx ethernet driver.
  4. *
  5. * Copyright (C) 2010 Marvell International Ltd.
  6. * Sachin Sanap <ssanap@marvell.com>
  7. * Philip Rakity <prakity@marvell.com>
  8. * Mark Brown <markb@marvell.com>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version 2
  13. * of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23. */
  24. #include <linux/init.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/in.h>
  27. #include <linux/ip.h>
  28. #include <linux/tcp.h>
  29. #include <linux/udp.h>
  30. #include <linux/etherdevice.h>
  31. #include <linux/bitops.h>
  32. #include <linux/delay.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/module.h>
  36. #include <linux/kernel.h>
  37. #include <linux/workqueue.h>
  38. #include <linux/clk.h>
  39. #include <linux/phy.h>
  40. #include <linux/io.h>
  41. #include <linux/types.h>
  42. #include <asm/pgtable.h>
  43. #include <asm/system.h>
  44. #include <linux/delay.h>
  45. #include <linux/dma-mapping.h>
  46. #include <asm/cacheflush.h>
  47. #include <linux/pxa168_eth.h>
  48. #define DRIVER_NAME "pxa168-eth"
  49. #define DRIVER_VERSION "0.3"
  50. /*
  51. * Registers
  52. */
  53. #define PHY_ADDRESS 0x0000
  54. #define SMI 0x0010
  55. #define PORT_CONFIG 0x0400
  56. #define PORT_CONFIG_EXT 0x0408
  57. #define PORT_COMMAND 0x0410
  58. #define PORT_STATUS 0x0418
  59. #define HTPR 0x0428
  60. #define SDMA_CONFIG 0x0440
  61. #define SDMA_CMD 0x0448
  62. #define INT_CAUSE 0x0450
  63. #define INT_W_CLEAR 0x0454
  64. #define INT_MASK 0x0458
  65. #define ETH_F_RX_DESC_0 0x0480
  66. #define ETH_C_RX_DESC_0 0x04A0
  67. #define ETH_C_TX_DESC_1 0x04E4
  68. /* smi register */
  69. #define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
  70. #define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
  71. #define SMI_OP_W (0 << 26) /* Write operation */
  72. #define SMI_OP_R (1 << 26) /* Read operation */
  73. #define PHY_WAIT_ITERATIONS 10
  74. #define PXA168_ETH_PHY_ADDR_DEFAULT 0
  75. /* RX & TX descriptor command */
  76. #define BUF_OWNED_BY_DMA (1 << 31)
  77. /* RX descriptor status */
  78. #define RX_EN_INT (1 << 23)
  79. #define RX_FIRST_DESC (1 << 17)
  80. #define RX_LAST_DESC (1 << 16)
  81. #define RX_ERROR (1 << 15)
  82. /* TX descriptor command */
  83. #define TX_EN_INT (1 << 23)
  84. #define TX_GEN_CRC (1 << 22)
  85. #define TX_ZERO_PADDING (1 << 18)
  86. #define TX_FIRST_DESC (1 << 17)
  87. #define TX_LAST_DESC (1 << 16)
  88. #define TX_ERROR (1 << 15)
  89. /* SDMA_CMD */
  90. #define SDMA_CMD_AT (1 << 31)
  91. #define SDMA_CMD_TXDL (1 << 24)
  92. #define SDMA_CMD_TXDH (1 << 23)
  93. #define SDMA_CMD_AR (1 << 15)
  94. #define SDMA_CMD_ERD (1 << 7)
  95. /* Bit definitions of the Port Config Reg */
  96. #define PCR_HS (1 << 12)
  97. #define PCR_EN (1 << 7)
  98. #define PCR_PM (1 << 0)
  99. /* Bit definitions of the Port Config Extend Reg */
  100. #define PCXR_2BSM (1 << 28)
  101. #define PCXR_DSCP_EN (1 << 21)
  102. #define PCXR_MFL_1518 (0 << 14)
  103. #define PCXR_MFL_1536 (1 << 14)
  104. #define PCXR_MFL_2048 (2 << 14)
  105. #define PCXR_MFL_64K (3 << 14)
  106. #define PCXR_FLP (1 << 11)
  107. #define PCXR_PRIO_TX_OFF 3
  108. #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
  109. /* Bit definitions of the SDMA Config Reg */
  110. #define SDCR_BSZ_OFF 12
  111. #define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
  112. #define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
  113. #define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
  114. #define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
  115. #define SDCR_BLMR (1 << 6)
  116. #define SDCR_BLMT (1 << 7)
  117. #define SDCR_RIFB (1 << 9)
  118. #define SDCR_RC_OFF 2
  119. #define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
  120. /*
  121. * Bit definitions of the Interrupt Cause Reg
  122. * and Interrupt MASK Reg is the same
  123. */
  124. #define ICR_RXBUF (1 << 0)
  125. #define ICR_TXBUF_H (1 << 2)
  126. #define ICR_TXBUF_L (1 << 3)
  127. #define ICR_TXEND_H (1 << 6)
  128. #define ICR_TXEND_L (1 << 7)
  129. #define ICR_RXERR (1 << 8)
  130. #define ICR_TXERR_H (1 << 10)
  131. #define ICR_TXERR_L (1 << 11)
  132. #define ICR_TX_UDR (1 << 13)
  133. #define ICR_MII_CH (1 << 28)
  134. #define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
  135. ICR_TXERR_H | ICR_TXERR_L |\
  136. ICR_TXEND_H | ICR_TXEND_L |\
  137. ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
  138. #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
  139. #define NUM_RX_DESCS 64
  140. #define NUM_TX_DESCS 64
  141. #define HASH_ADD 0
  142. #define HASH_DELETE 1
  143. #define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
  144. #define HOP_NUMBER 12
  145. /* Bit definitions for Port status */
  146. #define PORT_SPEED_100 (1 << 0)
  147. #define FULL_DUPLEX (1 << 1)
  148. #define FLOW_CONTROL_ENABLED (1 << 2)
  149. #define LINK_UP (1 << 3)
  150. /* Bit definitions for work to be done */
  151. #define WORK_LINK (1 << 0)
  152. #define WORK_TX_DONE (1 << 1)
  153. /*
  154. * Misc definitions.
  155. */
  156. #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
  157. struct rx_desc {
  158. u32 cmd_sts; /* Descriptor command status */
  159. u16 byte_cnt; /* Descriptor buffer byte count */
  160. u16 buf_size; /* Buffer size */
  161. u32 buf_ptr; /* Descriptor buffer pointer */
  162. u32 next_desc_ptr; /* Next descriptor pointer */
  163. };
  164. struct tx_desc {
  165. u32 cmd_sts; /* Command/status field */
  166. u16 reserved;
  167. u16 byte_cnt; /* buffer byte count */
  168. u32 buf_ptr; /* pointer to buffer for this descriptor */
  169. u32 next_desc_ptr; /* Pointer to next descriptor */
  170. };
  171. struct pxa168_eth_private {
  172. int port_num; /* User Ethernet port number */
  173. int rx_resource_err; /* Rx ring resource error flag */
  174. /* Next available and first returning Rx resource */
  175. int rx_curr_desc_q, rx_used_desc_q;
  176. /* Next available and first returning Tx resource */
  177. int tx_curr_desc_q, tx_used_desc_q;
  178. struct rx_desc *p_rx_desc_area;
  179. dma_addr_t rx_desc_dma;
  180. int rx_desc_area_size;
  181. struct sk_buff **rx_skb;
  182. struct tx_desc *p_tx_desc_area;
  183. dma_addr_t tx_desc_dma;
  184. int tx_desc_area_size;
  185. struct sk_buff **tx_skb;
  186. struct work_struct tx_timeout_task;
  187. struct net_device *dev;
  188. struct napi_struct napi;
  189. u8 work_todo;
  190. int skb_size;
  191. struct net_device_stats stats;
  192. /* Size of Tx Ring per queue */
  193. int tx_ring_size;
  194. /* Number of tx descriptors in use */
  195. int tx_desc_count;
  196. /* Size of Rx Ring per queue */
  197. int rx_ring_size;
  198. /* Number of rx descriptors in use */
  199. int rx_desc_count;
  200. /*
  201. * Used in case RX Ring is empty, which can occur when
  202. * system does not have resources (skb's)
  203. */
  204. struct timer_list timeout;
  205. struct mii_bus *smi_bus;
  206. struct phy_device *phy;
  207. /* clock */
  208. struct clk *clk;
  209. struct pxa168_eth_platform_data *pd;
  210. /*
  211. * Ethernet controller base address.
  212. */
  213. void __iomem *base;
  214. /* Pointer to the hardware address filter table */
  215. void *htpr;
  216. dma_addr_t htpr_dma;
  217. };
  218. struct addr_table_entry {
  219. __le32 lo;
  220. __le32 hi;
  221. };
  222. /* Bit fields of a Hash Table Entry */
  223. enum hash_table_entry {
  224. HASH_ENTRY_VALID = 1,
  225. SKIP = 2,
  226. HASH_ENTRY_RECEIVE_DISCARD = 4,
  227. HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
  228. };
  229. static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
  230. static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
  231. static int pxa168_init_hw(struct pxa168_eth_private *pep);
  232. static void eth_port_reset(struct net_device *dev);
  233. static void eth_port_start(struct net_device *dev);
  234. static int pxa168_eth_open(struct net_device *dev);
  235. static int pxa168_eth_stop(struct net_device *dev);
  236. static int ethernet_phy_setup(struct net_device *dev);
  237. static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
  238. {
  239. return readl(pep->base + offset);
  240. }
  241. static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
  242. {
  243. writel(data, pep->base + offset);
  244. }
  245. static void abort_dma(struct pxa168_eth_private *pep)
  246. {
  247. int delay;
  248. int max_retries = 40;
  249. do {
  250. wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
  251. udelay(100);
  252. delay = 10;
  253. while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
  254. && delay-- > 0) {
  255. udelay(10);
  256. }
  257. } while (max_retries-- > 0 && delay <= 0);
  258. if (max_retries <= 0)
  259. printk(KERN_ERR "%s : DMA Stuck\n", __func__);
  260. }
  261. static int ethernet_phy_get(struct pxa168_eth_private *pep)
  262. {
  263. unsigned int reg_data;
  264. reg_data = rdl(pep, PHY_ADDRESS);
  265. return (reg_data >> (5 * pep->port_num)) & 0x1f;
  266. }
  267. static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
  268. {
  269. u32 reg_data;
  270. int addr_shift = 5 * pep->port_num;
  271. reg_data = rdl(pep, PHY_ADDRESS);
  272. reg_data &= ~(0x1f << addr_shift);
  273. reg_data |= (phy_addr & 0x1f) << addr_shift;
  274. wrl(pep, PHY_ADDRESS, reg_data);
  275. }
  276. static void ethernet_phy_reset(struct pxa168_eth_private *pep)
  277. {
  278. int data;
  279. data = phy_read(pep->phy, MII_BMCR);
  280. if (data < 0)
  281. return;
  282. data |= BMCR_RESET;
  283. if (phy_write(pep->phy, MII_BMCR, data) < 0)
  284. return;
  285. do {
  286. data = phy_read(pep->phy, MII_BMCR);
  287. } while (data >= 0 && data & BMCR_RESET);
  288. }
  289. static void rxq_refill(struct net_device *dev)
  290. {
  291. struct pxa168_eth_private *pep = netdev_priv(dev);
  292. struct sk_buff *skb;
  293. struct rx_desc *p_used_rx_desc;
  294. int used_rx_desc;
  295. while (pep->rx_desc_count < pep->rx_ring_size) {
  296. int size;
  297. skb = dev_alloc_skb(pep->skb_size);
  298. if (!skb)
  299. break;
  300. if (SKB_DMA_REALIGN)
  301. skb_reserve(skb, SKB_DMA_REALIGN);
  302. pep->rx_desc_count++;
  303. /* Get 'used' Rx descriptor */
  304. used_rx_desc = pep->rx_used_desc_q;
  305. p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
  306. size = skb->end - skb->data;
  307. p_used_rx_desc->buf_ptr = dma_map_single(NULL,
  308. skb->data,
  309. size,
  310. DMA_FROM_DEVICE);
  311. p_used_rx_desc->buf_size = size;
  312. pep->rx_skb[used_rx_desc] = skb;
  313. /* Return the descriptor to DMA ownership */
  314. wmb();
  315. p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
  316. wmb();
  317. /* Move the used descriptor pointer to the next descriptor */
  318. pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
  319. /* Any Rx return cancels the Rx resource error status */
  320. pep->rx_resource_err = 0;
  321. skb_reserve(skb, ETH_HW_IP_ALIGN);
  322. }
  323. /*
  324. * If RX ring is empty of SKB, set a timer to try allocating
  325. * again at a later time.
  326. */
  327. if (pep->rx_desc_count == 0) {
  328. pep->timeout.expires = jiffies + (HZ / 10);
  329. add_timer(&pep->timeout);
  330. }
  331. }
  332. static inline void rxq_refill_timer_wrapper(unsigned long data)
  333. {
  334. struct pxa168_eth_private *pep = (void *)data;
  335. napi_schedule(&pep->napi);
  336. }
  337. static inline u8 flip_8_bits(u8 x)
  338. {
  339. return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
  340. | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
  341. | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
  342. | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
  343. }
  344. static void nibble_swap_every_byte(unsigned char *mac_addr)
  345. {
  346. int i;
  347. for (i = 0; i < ETH_ALEN; i++) {
  348. mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
  349. ((mac_addr[i] & 0xf0) >> 4);
  350. }
  351. }
  352. static void inverse_every_nibble(unsigned char *mac_addr)
  353. {
  354. int i;
  355. for (i = 0; i < ETH_ALEN; i++)
  356. mac_addr[i] = flip_8_bits(mac_addr[i]);
  357. }
  358. /*
  359. * ----------------------------------------------------------------------------
  360. * This function will calculate the hash function of the address.
  361. * Inputs
  362. * mac_addr_orig - MAC address.
  363. * Outputs
  364. * return the calculated entry.
  365. */
  366. static u32 hash_function(unsigned char *mac_addr_orig)
  367. {
  368. u32 hash_result;
  369. u32 addr0;
  370. u32 addr1;
  371. u32 addr2;
  372. u32 addr3;
  373. unsigned char mac_addr[ETH_ALEN];
  374. /* Make a copy of MAC address since we are going to performe bit
  375. * operations on it
  376. */
  377. memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
  378. nibble_swap_every_byte(mac_addr);
  379. inverse_every_nibble(mac_addr);
  380. addr0 = (mac_addr[5] >> 2) & 0x3f;
  381. addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
  382. addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
  383. addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
  384. hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
  385. hash_result = hash_result & 0x07ff;
  386. return hash_result;
  387. }
  388. /*
  389. * ----------------------------------------------------------------------------
  390. * This function will add/del an entry to the address table.
  391. * Inputs
  392. * pep - ETHERNET .
  393. * mac_addr - MAC address.
  394. * skip - if 1, skip this address.Used in case of deleting an entry which is a
  395. * part of chain in the hash table.We cant just delete the entry since
  396. * that will break the chain.We need to defragment the tables time to
  397. * time.
  398. * rd - 0 Discard packet upon match.
  399. * - 1 Receive packet upon match.
  400. * Outputs
  401. * address table entry is added/deleted.
  402. * 0 if success.
  403. * -ENOSPC if table full
  404. */
  405. static int add_del_hash_entry(struct pxa168_eth_private *pep,
  406. unsigned char *mac_addr,
  407. u32 rd, u32 skip, int del)
  408. {
  409. struct addr_table_entry *entry, *start;
  410. u32 new_high;
  411. u32 new_low;
  412. u32 i;
  413. new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
  414. | (((mac_addr[1] >> 0) & 0xf) << 11)
  415. | (((mac_addr[0] >> 4) & 0xf) << 7)
  416. | (((mac_addr[0] >> 0) & 0xf) << 3)
  417. | (((mac_addr[3] >> 4) & 0x1) << 31)
  418. | (((mac_addr[3] >> 0) & 0xf) << 27)
  419. | (((mac_addr[2] >> 4) & 0xf) << 23)
  420. | (((mac_addr[2] >> 0) & 0xf) << 19)
  421. | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
  422. | HASH_ENTRY_VALID;
  423. new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
  424. | (((mac_addr[5] >> 0) & 0xf) << 11)
  425. | (((mac_addr[4] >> 4) & 0xf) << 7)
  426. | (((mac_addr[4] >> 0) & 0xf) << 3)
  427. | (((mac_addr[3] >> 5) & 0x7) << 0);
  428. /*
  429. * Pick the appropriate table, start scanning for free/reusable
  430. * entries at the index obtained by hashing the specified MAC address
  431. */
  432. start = (struct addr_table_entry *)(pep->htpr);
  433. entry = start + hash_function(mac_addr);
  434. for (i = 0; i < HOP_NUMBER; i++) {
  435. if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
  436. break;
  437. } else {
  438. /* if same address put in same position */
  439. if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
  440. (new_low & 0xfffffff8)) &&
  441. (le32_to_cpu(entry->hi) == new_high)) {
  442. break;
  443. }
  444. }
  445. if (entry == start + 0x7ff)
  446. entry = start;
  447. else
  448. entry++;
  449. }
  450. if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
  451. (le32_to_cpu(entry->hi) != new_high) && del)
  452. return 0;
  453. if (i == HOP_NUMBER) {
  454. if (!del) {
  455. printk(KERN_INFO "%s: table section is full, need to "
  456. "move to 16kB implementation?\n",
  457. __FILE__);
  458. return -ENOSPC;
  459. } else
  460. return 0;
  461. }
  462. /*
  463. * Update the selected entry
  464. */
  465. if (del) {
  466. entry->hi = 0;
  467. entry->lo = 0;
  468. } else {
  469. entry->hi = cpu_to_le32(new_high);
  470. entry->lo = cpu_to_le32(new_low);
  471. }
  472. return 0;
  473. }
  474. /*
  475. * ----------------------------------------------------------------------------
  476. * Create an addressTable entry from MAC address info
  477. * found in the specifed net_device struct
  478. *
  479. * Input : pointer to ethernet interface network device structure
  480. * Output : N/A
  481. */
  482. static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
  483. unsigned char *oaddr,
  484. unsigned char *addr)
  485. {
  486. /* Delete old entry */
  487. if (oaddr)
  488. add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
  489. /* Add new entry */
  490. add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
  491. }
  492. static int init_hash_table(struct pxa168_eth_private *pep)
  493. {
  494. /*
  495. * Hardware expects CPU to build a hash table based on a predefined
  496. * hash function and populate it based on hardware address. The
  497. * location of the hash table is identified by 32-bit pointer stored
  498. * in HTPR internal register. Two possible sizes exists for the hash
  499. * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
  500. * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
  501. * 1/2kB.
  502. */
  503. /* TODO: Add support for 8kB hash table and alternative hash
  504. * function.Driver can dynamically switch to them if the 1/2kB hash
  505. * table is full.
  506. */
  507. if (pep->htpr == NULL) {
  508. pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
  509. HASH_ADDR_TABLE_SIZE,
  510. &pep->htpr_dma, GFP_KERNEL);
  511. if (pep->htpr == NULL)
  512. return -ENOMEM;
  513. }
  514. memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
  515. wrl(pep, HTPR, pep->htpr_dma);
  516. return 0;
  517. }
  518. static void pxa168_eth_set_rx_mode(struct net_device *dev)
  519. {
  520. struct pxa168_eth_private *pep = netdev_priv(dev);
  521. struct netdev_hw_addr *ha;
  522. u32 val;
  523. val = rdl(pep, PORT_CONFIG);
  524. if (dev->flags & IFF_PROMISC)
  525. val |= PCR_PM;
  526. else
  527. val &= ~PCR_PM;
  528. wrl(pep, PORT_CONFIG, val);
  529. /*
  530. * Remove the old list of MAC address and add dev->addr
  531. * and multicast address.
  532. */
  533. memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
  534. update_hash_table_mac_address(pep, NULL, dev->dev_addr);
  535. netdev_for_each_mc_addr(ha, dev)
  536. update_hash_table_mac_address(pep, NULL, ha->addr);
  537. }
  538. static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
  539. {
  540. struct sockaddr *sa = addr;
  541. struct pxa168_eth_private *pep = netdev_priv(dev);
  542. unsigned char oldMac[ETH_ALEN];
  543. if (!is_valid_ether_addr(sa->sa_data))
  544. return -EINVAL;
  545. memcpy(oldMac, dev->dev_addr, ETH_ALEN);
  546. memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
  547. netif_addr_lock_bh(dev);
  548. update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
  549. netif_addr_unlock_bh(dev);
  550. return 0;
  551. }
  552. static void eth_port_start(struct net_device *dev)
  553. {
  554. unsigned int val = 0;
  555. struct pxa168_eth_private *pep = netdev_priv(dev);
  556. int tx_curr_desc, rx_curr_desc;
  557. /* Perform PHY reset, if there is a PHY. */
  558. if (pep->phy != NULL) {
  559. struct ethtool_cmd cmd;
  560. pxa168_get_settings(pep->dev, &cmd);
  561. ethernet_phy_reset(pep);
  562. pxa168_set_settings(pep->dev, &cmd);
  563. }
  564. /* Assignment of Tx CTRP of given queue */
  565. tx_curr_desc = pep->tx_curr_desc_q;
  566. wrl(pep, ETH_C_TX_DESC_1,
  567. (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
  568. /* Assignment of Rx CRDP of given queue */
  569. rx_curr_desc = pep->rx_curr_desc_q;
  570. wrl(pep, ETH_C_RX_DESC_0,
  571. (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
  572. wrl(pep, ETH_F_RX_DESC_0,
  573. (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
  574. /* Clear all interrupts */
  575. wrl(pep, INT_CAUSE, 0);
  576. /* Enable all interrupts for receive, transmit and error. */
  577. wrl(pep, INT_MASK, ALL_INTS);
  578. val = rdl(pep, PORT_CONFIG);
  579. val |= PCR_EN;
  580. wrl(pep, PORT_CONFIG, val);
  581. /* Start RX DMA engine */
  582. val = rdl(pep, SDMA_CMD);
  583. val |= SDMA_CMD_ERD;
  584. wrl(pep, SDMA_CMD, val);
  585. }
  586. static void eth_port_reset(struct net_device *dev)
  587. {
  588. struct pxa168_eth_private *pep = netdev_priv(dev);
  589. unsigned int val = 0;
  590. /* Stop all interrupts for receive, transmit and error. */
  591. wrl(pep, INT_MASK, 0);
  592. /* Clear all interrupts */
  593. wrl(pep, INT_CAUSE, 0);
  594. /* Stop RX DMA */
  595. val = rdl(pep, SDMA_CMD);
  596. val &= ~SDMA_CMD_ERD; /* abort dma command */
  597. /* Abort any transmit and receive operations and put DMA
  598. * in idle state.
  599. */
  600. abort_dma(pep);
  601. /* Disable port */
  602. val = rdl(pep, PORT_CONFIG);
  603. val &= ~PCR_EN;
  604. wrl(pep, PORT_CONFIG, val);
  605. }
  606. /*
  607. * txq_reclaim - Free the tx desc data for completed descriptors
  608. * If force is non-zero, frees uncompleted descriptors as well
  609. */
  610. static int txq_reclaim(struct net_device *dev, int force)
  611. {
  612. struct pxa168_eth_private *pep = netdev_priv(dev);
  613. struct tx_desc *desc;
  614. u32 cmd_sts;
  615. struct sk_buff *skb;
  616. int tx_index;
  617. dma_addr_t addr;
  618. int count;
  619. int released = 0;
  620. netif_tx_lock(dev);
  621. pep->work_todo &= ~WORK_TX_DONE;
  622. while (pep->tx_desc_count > 0) {
  623. tx_index = pep->tx_used_desc_q;
  624. desc = &pep->p_tx_desc_area[tx_index];
  625. cmd_sts = desc->cmd_sts;
  626. if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
  627. if (released > 0) {
  628. goto txq_reclaim_end;
  629. } else {
  630. released = -1;
  631. goto txq_reclaim_end;
  632. }
  633. }
  634. pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
  635. pep->tx_desc_count--;
  636. addr = desc->buf_ptr;
  637. count = desc->byte_cnt;
  638. skb = pep->tx_skb[tx_index];
  639. if (skb)
  640. pep->tx_skb[tx_index] = NULL;
  641. if (cmd_sts & TX_ERROR) {
  642. if (net_ratelimit())
  643. printk(KERN_ERR "%s: Error in TX\n", dev->name);
  644. dev->stats.tx_errors++;
  645. }
  646. dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
  647. if (skb)
  648. dev_kfree_skb_irq(skb);
  649. released++;
  650. }
  651. txq_reclaim_end:
  652. netif_tx_unlock(dev);
  653. return released;
  654. }
  655. static void pxa168_eth_tx_timeout(struct net_device *dev)
  656. {
  657. struct pxa168_eth_private *pep = netdev_priv(dev);
  658. printk(KERN_INFO "%s: TX timeout desc_count %d\n",
  659. dev->name, pep->tx_desc_count);
  660. schedule_work(&pep->tx_timeout_task);
  661. }
  662. static void pxa168_eth_tx_timeout_task(struct work_struct *work)
  663. {
  664. struct pxa168_eth_private *pep = container_of(work,
  665. struct pxa168_eth_private,
  666. tx_timeout_task);
  667. struct net_device *dev = pep->dev;
  668. pxa168_eth_stop(dev);
  669. pxa168_eth_open(dev);
  670. }
  671. static int rxq_process(struct net_device *dev, int budget)
  672. {
  673. struct pxa168_eth_private *pep = netdev_priv(dev);
  674. struct net_device_stats *stats = &dev->stats;
  675. unsigned int received_packets = 0;
  676. struct sk_buff *skb;
  677. while (budget-- > 0) {
  678. int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
  679. struct rx_desc *rx_desc;
  680. unsigned int cmd_sts;
  681. /* Do not process Rx ring in case of Rx ring resource error */
  682. if (pep->rx_resource_err)
  683. break;
  684. rx_curr_desc = pep->rx_curr_desc_q;
  685. rx_used_desc = pep->rx_used_desc_q;
  686. rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
  687. cmd_sts = rx_desc->cmd_sts;
  688. rmb();
  689. if (cmd_sts & (BUF_OWNED_BY_DMA))
  690. break;
  691. skb = pep->rx_skb[rx_curr_desc];
  692. pep->rx_skb[rx_curr_desc] = NULL;
  693. rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
  694. pep->rx_curr_desc_q = rx_next_curr_desc;
  695. /* Rx descriptors exhausted. */
  696. /* Set the Rx ring resource error flag */
  697. if (rx_next_curr_desc == rx_used_desc)
  698. pep->rx_resource_err = 1;
  699. pep->rx_desc_count--;
  700. dma_unmap_single(NULL, rx_desc->buf_ptr,
  701. rx_desc->buf_size,
  702. DMA_FROM_DEVICE);
  703. received_packets++;
  704. /*
  705. * Update statistics.
  706. * Note byte count includes 4 byte CRC count
  707. */
  708. stats->rx_packets++;
  709. stats->rx_bytes += rx_desc->byte_cnt;
  710. /*
  711. * In case received a packet without first / last bits on OR
  712. * the error summary bit is on, the packets needs to be droped.
  713. */
  714. if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  715. (RX_FIRST_DESC | RX_LAST_DESC))
  716. || (cmd_sts & RX_ERROR)) {
  717. stats->rx_dropped++;
  718. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  719. (RX_FIRST_DESC | RX_LAST_DESC)) {
  720. if (net_ratelimit())
  721. printk(KERN_ERR
  722. "%s: Rx pkt on multiple desc\n",
  723. dev->name);
  724. }
  725. if (cmd_sts & RX_ERROR)
  726. stats->rx_errors++;
  727. dev_kfree_skb_irq(skb);
  728. } else {
  729. /*
  730. * The -4 is for the CRC in the trailer of the
  731. * received packet
  732. */
  733. skb_put(skb, rx_desc->byte_cnt - 4);
  734. skb->protocol = eth_type_trans(skb, dev);
  735. netif_receive_skb(skb);
  736. }
  737. dev->last_rx = jiffies;
  738. }
  739. /* Fill RX ring with skb's */
  740. rxq_refill(dev);
  741. return received_packets;
  742. }
  743. static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
  744. struct net_device *dev)
  745. {
  746. u32 icr;
  747. int ret = 0;
  748. icr = rdl(pep, INT_CAUSE);
  749. if (icr == 0)
  750. return IRQ_NONE;
  751. wrl(pep, INT_CAUSE, ~icr);
  752. if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
  753. pep->work_todo |= WORK_TX_DONE;
  754. ret = 1;
  755. }
  756. if (icr & ICR_RXBUF)
  757. ret = 1;
  758. if (icr & ICR_MII_CH) {
  759. pep->work_todo |= WORK_LINK;
  760. ret = 1;
  761. }
  762. return ret;
  763. }
  764. static void handle_link_event(struct pxa168_eth_private *pep)
  765. {
  766. struct net_device *dev = pep->dev;
  767. u32 port_status;
  768. int speed;
  769. int duplex;
  770. int fc;
  771. port_status = rdl(pep, PORT_STATUS);
  772. if (!(port_status & LINK_UP)) {
  773. if (netif_carrier_ok(dev)) {
  774. printk(KERN_INFO "%s: link down\n", dev->name);
  775. netif_carrier_off(dev);
  776. txq_reclaim(dev, 1);
  777. }
  778. return;
  779. }
  780. if (port_status & PORT_SPEED_100)
  781. speed = 100;
  782. else
  783. speed = 10;
  784. duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
  785. fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
  786. printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
  787. "flow control %sabled\n", dev->name,
  788. speed, duplex ? "full" : "half", fc ? "en" : "dis");
  789. if (!netif_carrier_ok(dev))
  790. netif_carrier_on(dev);
  791. }
  792. static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
  793. {
  794. struct net_device *dev = (struct net_device *)dev_id;
  795. struct pxa168_eth_private *pep = netdev_priv(dev);
  796. if (unlikely(!pxa168_eth_collect_events(pep, dev)))
  797. return IRQ_NONE;
  798. /* Disable interrupts */
  799. wrl(pep, INT_MASK, 0);
  800. napi_schedule(&pep->napi);
  801. return IRQ_HANDLED;
  802. }
  803. static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
  804. {
  805. int skb_size;
  806. /*
  807. * Reserve 2+14 bytes for an ethernet header (the hardware
  808. * automatically prepends 2 bytes of dummy data to each
  809. * received packet), 16 bytes for up to four VLAN tags, and
  810. * 4 bytes for the trailing FCS -- 36 bytes total.
  811. */
  812. skb_size = pep->dev->mtu + 36;
  813. /*
  814. * Make sure that the skb size is a multiple of 8 bytes, as
  815. * the lower three bits of the receive descriptor's buffer
  816. * size field are ignored by the hardware.
  817. */
  818. pep->skb_size = (skb_size + 7) & ~7;
  819. /*
  820. * If NET_SKB_PAD is smaller than a cache line,
  821. * netdev_alloc_skb() will cause skb->data to be misaligned
  822. * to a cache line boundary. If this is the case, include
  823. * some extra space to allow re-aligning the data area.
  824. */
  825. pep->skb_size += SKB_DMA_REALIGN;
  826. }
  827. static int set_port_config_ext(struct pxa168_eth_private *pep)
  828. {
  829. int skb_size;
  830. pxa168_eth_recalc_skb_size(pep);
  831. if (pep->skb_size <= 1518)
  832. skb_size = PCXR_MFL_1518;
  833. else if (pep->skb_size <= 1536)
  834. skb_size = PCXR_MFL_1536;
  835. else if (pep->skb_size <= 2048)
  836. skb_size = PCXR_MFL_2048;
  837. else
  838. skb_size = PCXR_MFL_64K;
  839. /* Extended Port Configuration */
  840. wrl(pep,
  841. PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
  842. PCXR_DSCP_EN | /* Enable DSCP in IP */
  843. skb_size | PCXR_FLP | /* do not force link pass */
  844. PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
  845. return 0;
  846. }
  847. static int pxa168_init_hw(struct pxa168_eth_private *pep)
  848. {
  849. int err = 0;
  850. /* Disable interrupts */
  851. wrl(pep, INT_MASK, 0);
  852. wrl(pep, INT_CAUSE, 0);
  853. /* Write to ICR to clear interrupts. */
  854. wrl(pep, INT_W_CLEAR, 0);
  855. /* Abort any transmit and receive operations and put DMA
  856. * in idle state.
  857. */
  858. abort_dma(pep);
  859. /* Initialize address hash table */
  860. err = init_hash_table(pep);
  861. if (err)
  862. return err;
  863. /* SDMA configuration */
  864. wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
  865. SDCR_RIFB | /* Rx interrupt on frame */
  866. SDCR_BLMT | /* Little endian transmit */
  867. SDCR_BLMR | /* Little endian receive */
  868. SDCR_RC_MAX_RETRANS); /* Max retransmit count */
  869. /* Port Configuration */
  870. wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
  871. set_port_config_ext(pep);
  872. return err;
  873. }
  874. static int rxq_init(struct net_device *dev)
  875. {
  876. struct pxa168_eth_private *pep = netdev_priv(dev);
  877. struct rx_desc *p_rx_desc;
  878. int size = 0, i = 0;
  879. int rx_desc_num = pep->rx_ring_size;
  880. /* Allocate RX skb rings */
  881. pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
  882. GFP_KERNEL);
  883. if (!pep->rx_skb) {
  884. printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
  885. return -ENOMEM;
  886. }
  887. /* Allocate RX ring */
  888. pep->rx_desc_count = 0;
  889. size = pep->rx_ring_size * sizeof(struct rx_desc);
  890. pep->rx_desc_area_size = size;
  891. pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
  892. &pep->rx_desc_dma, GFP_KERNEL);
  893. if (!pep->p_rx_desc_area) {
  894. printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
  895. dev->name, size);
  896. goto out;
  897. }
  898. memset((void *)pep->p_rx_desc_area, 0, size);
  899. /* initialize the next_desc_ptr links in the Rx descriptors ring */
  900. p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
  901. for (i = 0; i < rx_desc_num; i++) {
  902. p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
  903. ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
  904. }
  905. /* Save Rx desc pointer to driver struct. */
  906. pep->rx_curr_desc_q = 0;
  907. pep->rx_used_desc_q = 0;
  908. pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
  909. return 0;
  910. out:
  911. kfree(pep->rx_skb);
  912. return -ENOMEM;
  913. }
  914. static void rxq_deinit(struct net_device *dev)
  915. {
  916. struct pxa168_eth_private *pep = netdev_priv(dev);
  917. int curr;
  918. /* Free preallocated skb's on RX rings */
  919. for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
  920. if (pep->rx_skb[curr]) {
  921. dev_kfree_skb(pep->rx_skb[curr]);
  922. pep->rx_desc_count--;
  923. }
  924. }
  925. if (pep->rx_desc_count)
  926. printk(KERN_ERR
  927. "Error in freeing Rx Ring. %d skb's still\n",
  928. pep->rx_desc_count);
  929. /* Free RX ring */
  930. if (pep->p_rx_desc_area)
  931. dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
  932. pep->p_rx_desc_area, pep->rx_desc_dma);
  933. kfree(pep->rx_skb);
  934. }
  935. static int txq_init(struct net_device *dev)
  936. {
  937. struct pxa168_eth_private *pep = netdev_priv(dev);
  938. struct tx_desc *p_tx_desc;
  939. int size = 0, i = 0;
  940. int tx_desc_num = pep->tx_ring_size;
  941. pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
  942. GFP_KERNEL);
  943. if (!pep->tx_skb) {
  944. printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
  945. return -ENOMEM;
  946. }
  947. /* Allocate TX ring */
  948. pep->tx_desc_count = 0;
  949. size = pep->tx_ring_size * sizeof(struct tx_desc);
  950. pep->tx_desc_area_size = size;
  951. pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
  952. &pep->tx_desc_dma, GFP_KERNEL);
  953. if (!pep->p_tx_desc_area) {
  954. printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
  955. dev->name, size);
  956. goto out;
  957. }
  958. memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
  959. /* Initialize the next_desc_ptr links in the Tx descriptors ring */
  960. p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
  961. for (i = 0; i < tx_desc_num; i++) {
  962. p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
  963. ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
  964. }
  965. pep->tx_curr_desc_q = 0;
  966. pep->tx_used_desc_q = 0;
  967. pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
  968. return 0;
  969. out:
  970. kfree(pep->tx_skb);
  971. return -ENOMEM;
  972. }
  973. static void txq_deinit(struct net_device *dev)
  974. {
  975. struct pxa168_eth_private *pep = netdev_priv(dev);
  976. /* Free outstanding skb's on TX ring */
  977. txq_reclaim(dev, 1);
  978. BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
  979. /* Free TX ring */
  980. if (pep->p_tx_desc_area)
  981. dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
  982. pep->p_tx_desc_area, pep->tx_desc_dma);
  983. kfree(pep->tx_skb);
  984. }
  985. static int pxa168_eth_open(struct net_device *dev)
  986. {
  987. struct pxa168_eth_private *pep = netdev_priv(dev);
  988. int err;
  989. err = request_irq(dev->irq, pxa168_eth_int_handler,
  990. IRQF_DISABLED, dev->name, dev);
  991. if (err) {
  992. dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
  993. return -EAGAIN;
  994. }
  995. pep->rx_resource_err = 0;
  996. err = rxq_init(dev);
  997. if (err != 0)
  998. goto out_free_irq;
  999. err = txq_init(dev);
  1000. if (err != 0)
  1001. goto out_free_rx_skb;
  1002. pep->rx_used_desc_q = 0;
  1003. pep->rx_curr_desc_q = 0;
  1004. /* Fill RX ring with skb's */
  1005. rxq_refill(dev);
  1006. pep->rx_used_desc_q = 0;
  1007. pep->rx_curr_desc_q = 0;
  1008. netif_carrier_off(dev);
  1009. eth_port_start(dev);
  1010. napi_enable(&pep->napi);
  1011. return 0;
  1012. out_free_rx_skb:
  1013. rxq_deinit(dev);
  1014. out_free_irq:
  1015. free_irq(dev->irq, dev);
  1016. return err;
  1017. }
  1018. static int pxa168_eth_stop(struct net_device *dev)
  1019. {
  1020. struct pxa168_eth_private *pep = netdev_priv(dev);
  1021. eth_port_reset(dev);
  1022. /* Disable interrupts */
  1023. wrl(pep, INT_MASK, 0);
  1024. wrl(pep, INT_CAUSE, 0);
  1025. /* Write to ICR to clear interrupts. */
  1026. wrl(pep, INT_W_CLEAR, 0);
  1027. napi_disable(&pep->napi);
  1028. del_timer_sync(&pep->timeout);
  1029. netif_carrier_off(dev);
  1030. free_irq(dev->irq, dev);
  1031. rxq_deinit(dev);
  1032. txq_deinit(dev);
  1033. return 0;
  1034. }
  1035. static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
  1036. {
  1037. int retval;
  1038. struct pxa168_eth_private *pep = netdev_priv(dev);
  1039. if ((mtu > 9500) || (mtu < 68))
  1040. return -EINVAL;
  1041. dev->mtu = mtu;
  1042. retval = set_port_config_ext(pep);
  1043. if (!netif_running(dev))
  1044. return 0;
  1045. /*
  1046. * Stop and then re-open the interface. This will allocate RX
  1047. * skbs of the new MTU.
  1048. * There is a possible danger that the open will not succeed,
  1049. * due to memory being full.
  1050. */
  1051. pxa168_eth_stop(dev);
  1052. if (pxa168_eth_open(dev)) {
  1053. dev_printk(KERN_ERR, &dev->dev,
  1054. "fatal error on re-opening device after "
  1055. "MTU change\n");
  1056. }
  1057. return 0;
  1058. }
  1059. static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
  1060. {
  1061. int tx_desc_curr;
  1062. tx_desc_curr = pep->tx_curr_desc_q;
  1063. pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
  1064. BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
  1065. pep->tx_desc_count++;
  1066. return tx_desc_curr;
  1067. }
  1068. static int pxa168_rx_poll(struct napi_struct *napi, int budget)
  1069. {
  1070. struct pxa168_eth_private *pep =
  1071. container_of(napi, struct pxa168_eth_private, napi);
  1072. struct net_device *dev = pep->dev;
  1073. int work_done = 0;
  1074. if (unlikely(pep->work_todo & WORK_LINK)) {
  1075. pep->work_todo &= ~(WORK_LINK);
  1076. handle_link_event(pep);
  1077. }
  1078. /*
  1079. * We call txq_reclaim every time since in NAPI interupts are disabled
  1080. * and due to this we miss the TX_DONE interrupt,which is not updated in
  1081. * interrupt status register.
  1082. */
  1083. txq_reclaim(dev, 0);
  1084. if (netif_queue_stopped(dev)
  1085. && pep->tx_ring_size - pep->tx_desc_count > 1) {
  1086. netif_wake_queue(dev);
  1087. }
  1088. work_done = rxq_process(dev, budget);
  1089. if (work_done < budget) {
  1090. napi_complete(napi);
  1091. wrl(pep, INT_MASK, ALL_INTS);
  1092. }
  1093. return work_done;
  1094. }
  1095. static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1096. {
  1097. struct pxa168_eth_private *pep = netdev_priv(dev);
  1098. struct net_device_stats *stats = &dev->stats;
  1099. struct tx_desc *desc;
  1100. int tx_index;
  1101. int length;
  1102. tx_index = eth_alloc_tx_desc_index(pep);
  1103. desc = &pep->p_tx_desc_area[tx_index];
  1104. length = skb->len;
  1105. pep->tx_skb[tx_index] = skb;
  1106. desc->byte_cnt = length;
  1107. desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
  1108. wmb();
  1109. desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
  1110. TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
  1111. wmb();
  1112. wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
  1113. stats->tx_bytes += skb->len;
  1114. stats->tx_packets++;
  1115. dev->trans_start = jiffies;
  1116. if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
  1117. /* We handled the current skb, but now we are out of space.*/
  1118. netif_stop_queue(dev);
  1119. }
  1120. return NETDEV_TX_OK;
  1121. }
  1122. static int smi_wait_ready(struct pxa168_eth_private *pep)
  1123. {
  1124. int i = 0;
  1125. /* wait for the SMI register to become available */
  1126. for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
  1127. if (i == PHY_WAIT_ITERATIONS)
  1128. return -ETIMEDOUT;
  1129. msleep(10);
  1130. }
  1131. return 0;
  1132. }
  1133. static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
  1134. {
  1135. struct pxa168_eth_private *pep = bus->priv;
  1136. int i = 0;
  1137. int val;
  1138. if (smi_wait_ready(pep)) {
  1139. printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
  1140. return -ETIMEDOUT;
  1141. }
  1142. wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
  1143. /* now wait for the data to be valid */
  1144. for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
  1145. if (i == PHY_WAIT_ITERATIONS) {
  1146. printk(KERN_WARNING
  1147. "pxa168_eth: SMI bus read not valid\n");
  1148. return -ENODEV;
  1149. }
  1150. msleep(10);
  1151. }
  1152. return val & 0xffff;
  1153. }
  1154. static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
  1155. u16 value)
  1156. {
  1157. struct pxa168_eth_private *pep = bus->priv;
  1158. if (smi_wait_ready(pep)) {
  1159. printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
  1160. return -ETIMEDOUT;
  1161. }
  1162. wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
  1163. SMI_OP_W | (value & 0xffff));
  1164. if (smi_wait_ready(pep)) {
  1165. printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
  1166. return -ETIMEDOUT;
  1167. }
  1168. return 0;
  1169. }
  1170. static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
  1171. int cmd)
  1172. {
  1173. struct pxa168_eth_private *pep = netdev_priv(dev);
  1174. if (pep->phy != NULL)
  1175. return phy_mii_ioctl(pep->phy, ifr, cmd);
  1176. return -EOPNOTSUPP;
  1177. }
  1178. static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
  1179. {
  1180. struct mii_bus *bus = pep->smi_bus;
  1181. struct phy_device *phydev;
  1182. int start;
  1183. int num;
  1184. int i;
  1185. if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
  1186. /* Scan entire range */
  1187. start = ethernet_phy_get(pep);
  1188. num = 32;
  1189. } else {
  1190. /* Use phy addr specific to platform */
  1191. start = phy_addr & 0x1f;
  1192. num = 1;
  1193. }
  1194. phydev = NULL;
  1195. for (i = 0; i < num; i++) {
  1196. int addr = (start + i) & 0x1f;
  1197. if (bus->phy_map[addr] == NULL)
  1198. mdiobus_scan(bus, addr);
  1199. if (phydev == NULL) {
  1200. phydev = bus->phy_map[addr];
  1201. if (phydev != NULL)
  1202. ethernet_phy_set_addr(pep, addr);
  1203. }
  1204. }
  1205. return phydev;
  1206. }
  1207. static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
  1208. {
  1209. struct phy_device *phy = pep->phy;
  1210. ethernet_phy_reset(pep);
  1211. phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
  1212. if (speed == 0) {
  1213. phy->autoneg = AUTONEG_ENABLE;
  1214. phy->speed = 0;
  1215. phy->duplex = 0;
  1216. phy->supported &= PHY_BASIC_FEATURES;
  1217. phy->advertising = phy->supported | ADVERTISED_Autoneg;
  1218. } else {
  1219. phy->autoneg = AUTONEG_DISABLE;
  1220. phy->advertising = 0;
  1221. phy->speed = speed;
  1222. phy->duplex = duplex;
  1223. }
  1224. phy_start_aneg(phy);
  1225. }
  1226. static int ethernet_phy_setup(struct net_device *dev)
  1227. {
  1228. struct pxa168_eth_private *pep = netdev_priv(dev);
  1229. if (pep->pd->init)
  1230. pep->pd->init();
  1231. pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
  1232. if (pep->phy != NULL)
  1233. phy_init(pep, pep->pd->speed, pep->pd->duplex);
  1234. update_hash_table_mac_address(pep, NULL, dev->dev_addr);
  1235. return 0;
  1236. }
  1237. static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1238. {
  1239. struct pxa168_eth_private *pep = netdev_priv(dev);
  1240. int err;
  1241. err = phy_read_status(pep->phy);
  1242. if (err == 0)
  1243. err = phy_ethtool_gset(pep->phy, cmd);
  1244. return err;
  1245. }
  1246. static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1247. {
  1248. struct pxa168_eth_private *pep = netdev_priv(dev);
  1249. return phy_ethtool_sset(pep->phy, cmd);
  1250. }
  1251. static void pxa168_get_drvinfo(struct net_device *dev,
  1252. struct ethtool_drvinfo *info)
  1253. {
  1254. strncpy(info->driver, DRIVER_NAME, 32);
  1255. strncpy(info->version, DRIVER_VERSION, 32);
  1256. strncpy(info->fw_version, "N/A", 32);
  1257. strncpy(info->bus_info, "N/A", 32);
  1258. }
  1259. static u32 pxa168_get_link(struct net_device *dev)
  1260. {
  1261. return !!netif_carrier_ok(dev);
  1262. }
  1263. static const struct ethtool_ops pxa168_ethtool_ops = {
  1264. .get_settings = pxa168_get_settings,
  1265. .set_settings = pxa168_set_settings,
  1266. .get_drvinfo = pxa168_get_drvinfo,
  1267. .get_link = pxa168_get_link,
  1268. };
  1269. static const struct net_device_ops pxa168_eth_netdev_ops = {
  1270. .ndo_open = pxa168_eth_open,
  1271. .ndo_stop = pxa168_eth_stop,
  1272. .ndo_start_xmit = pxa168_eth_start_xmit,
  1273. .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
  1274. .ndo_set_mac_address = pxa168_eth_set_mac_address,
  1275. .ndo_validate_addr = eth_validate_addr,
  1276. .ndo_do_ioctl = pxa168_eth_do_ioctl,
  1277. .ndo_change_mtu = pxa168_eth_change_mtu,
  1278. .ndo_tx_timeout = pxa168_eth_tx_timeout,
  1279. };
  1280. static int pxa168_eth_probe(struct platform_device *pdev)
  1281. {
  1282. struct pxa168_eth_private *pep = NULL;
  1283. struct net_device *dev = NULL;
  1284. struct resource *res;
  1285. struct clk *clk;
  1286. int err;
  1287. printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
  1288. clk = clk_get(&pdev->dev, "MFUCLK");
  1289. if (IS_ERR(clk)) {
  1290. printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
  1291. DRIVER_NAME);
  1292. return -ENODEV;
  1293. }
  1294. clk_enable(clk);
  1295. dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
  1296. if (!dev) {
  1297. err = -ENOMEM;
  1298. goto err_clk;
  1299. }
  1300. platform_set_drvdata(pdev, dev);
  1301. pep = netdev_priv(dev);
  1302. pep->dev = dev;
  1303. pep->clk = clk;
  1304. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1305. if (res == NULL) {
  1306. err = -ENODEV;
  1307. goto err_netdev;
  1308. }
  1309. pep->base = ioremap(res->start, res->end - res->start + 1);
  1310. if (pep->base == NULL) {
  1311. err = -ENOMEM;
  1312. goto err_netdev;
  1313. }
  1314. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1315. BUG_ON(!res);
  1316. dev->irq = res->start;
  1317. dev->netdev_ops = &pxa168_eth_netdev_ops;
  1318. dev->watchdog_timeo = 2 * HZ;
  1319. dev->base_addr = 0;
  1320. SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
  1321. INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
  1322. printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
  1323. random_ether_addr(dev->dev_addr);
  1324. pep->pd = pdev->dev.platform_data;
  1325. pep->rx_ring_size = NUM_RX_DESCS;
  1326. if (pep->pd->rx_queue_size)
  1327. pep->rx_ring_size = pep->pd->rx_queue_size;
  1328. pep->tx_ring_size = NUM_TX_DESCS;
  1329. if (pep->pd->tx_queue_size)
  1330. pep->tx_ring_size = pep->pd->tx_queue_size;
  1331. pep->port_num = pep->pd->port_number;
  1332. /* Hardware supports only 3 ports */
  1333. BUG_ON(pep->port_num > 2);
  1334. netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
  1335. memset(&pep->timeout, 0, sizeof(struct timer_list));
  1336. init_timer(&pep->timeout);
  1337. pep->timeout.function = rxq_refill_timer_wrapper;
  1338. pep->timeout.data = (unsigned long)pep;
  1339. pep->smi_bus = mdiobus_alloc();
  1340. if (pep->smi_bus == NULL) {
  1341. err = -ENOMEM;
  1342. goto err_base;
  1343. }
  1344. pep->smi_bus->priv = pep;
  1345. pep->smi_bus->name = "pxa168_eth smi";
  1346. pep->smi_bus->read = pxa168_smi_read;
  1347. pep->smi_bus->write = pxa168_smi_write;
  1348. snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
  1349. pep->smi_bus->parent = &pdev->dev;
  1350. pep->smi_bus->phy_mask = 0xffffffff;
  1351. err = mdiobus_register(pep->smi_bus);
  1352. if (err)
  1353. goto err_free_mdio;
  1354. pxa168_init_hw(pep);
  1355. err = ethernet_phy_setup(dev);
  1356. if (err)
  1357. goto err_mdiobus;
  1358. SET_NETDEV_DEV(dev, &pdev->dev);
  1359. err = register_netdev(dev);
  1360. if (err)
  1361. goto err_mdiobus;
  1362. return 0;
  1363. err_mdiobus:
  1364. mdiobus_unregister(pep->smi_bus);
  1365. err_free_mdio:
  1366. mdiobus_free(pep->smi_bus);
  1367. err_base:
  1368. iounmap(pep->base);
  1369. err_netdev:
  1370. free_netdev(dev);
  1371. err_clk:
  1372. clk_disable(clk);
  1373. clk_put(clk);
  1374. return err;
  1375. }
  1376. static int pxa168_eth_remove(struct platform_device *pdev)
  1377. {
  1378. struct net_device *dev = platform_get_drvdata(pdev);
  1379. struct pxa168_eth_private *pep = netdev_priv(dev);
  1380. if (pep->htpr) {
  1381. dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
  1382. pep->htpr, pep->htpr_dma);
  1383. pep->htpr = NULL;
  1384. }
  1385. if (pep->clk) {
  1386. clk_disable(pep->clk);
  1387. clk_put(pep->clk);
  1388. pep->clk = NULL;
  1389. }
  1390. if (pep->phy != NULL)
  1391. phy_detach(pep->phy);
  1392. iounmap(pep->base);
  1393. pep->base = NULL;
  1394. mdiobus_unregister(pep->smi_bus);
  1395. mdiobus_free(pep->smi_bus);
  1396. unregister_netdev(dev);
  1397. flush_scheduled_work();
  1398. free_netdev(dev);
  1399. platform_set_drvdata(pdev, NULL);
  1400. return 0;
  1401. }
  1402. static void pxa168_eth_shutdown(struct platform_device *pdev)
  1403. {
  1404. struct net_device *dev = platform_get_drvdata(pdev);
  1405. eth_port_reset(dev);
  1406. }
  1407. #ifdef CONFIG_PM
  1408. static int pxa168_eth_resume(struct platform_device *pdev)
  1409. {
  1410. return -ENOSYS;
  1411. }
  1412. static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
  1413. {
  1414. return -ENOSYS;
  1415. }
  1416. #else
  1417. #define pxa168_eth_resume NULL
  1418. #define pxa168_eth_suspend NULL
  1419. #endif
  1420. static struct platform_driver pxa168_eth_driver = {
  1421. .probe = pxa168_eth_probe,
  1422. .remove = pxa168_eth_remove,
  1423. .shutdown = pxa168_eth_shutdown,
  1424. .resume = pxa168_eth_resume,
  1425. .suspend = pxa168_eth_suspend,
  1426. .driver = {
  1427. .name = DRIVER_NAME,
  1428. },
  1429. };
  1430. static int __init pxa168_init_module(void)
  1431. {
  1432. return platform_driver_register(&pxa168_eth_driver);
  1433. }
  1434. static void __exit pxa168_cleanup_module(void)
  1435. {
  1436. platform_driver_unregister(&pxa168_eth_driver);
  1437. }
  1438. module_init(pxa168_init_module);
  1439. module_exit(pxa168_cleanup_module);
  1440. MODULE_LICENSE("GPL");
  1441. MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
  1442. MODULE_ALIAS("platform:pxa168_eth");