pxa168_eth.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649
  1. /*
  2. * PXA168 ethernet driver.
  3. * Most of the code is derived from mv643xx ethernet driver.
  4. *
  5. * Copyright (C) 2010 Marvell International Ltd.
  6. * Sachin Sanap <ssanap@marvell.com>
  7. * Zhangfei Gao <zgao6@marvell.com>
  8. * Philip Rakity <prakity@marvell.com>
  9. * Mark Brown <markb@marvell.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version 2
  14. * of the License, or (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24. */
  25. #include <linux/init.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/in.h>
  28. #include <linux/ip.h>
  29. #include <linux/tcp.h>
  30. #include <linux/udp.h>
  31. #include <linux/etherdevice.h>
  32. #include <linux/bitops.h>
  33. #include <linux/delay.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/platform_device.h>
  36. #include <linux/module.h>
  37. #include <linux/kernel.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/clk.h>
  40. #include <linux/phy.h>
  41. #include <linux/io.h>
  42. #include <linux/interrupt.h>
  43. #include <linux/types.h>
  44. #include <asm/pgtable.h>
  45. #include <asm/cacheflush.h>
  46. #include <linux/pxa168_eth.h>
  47. #define DRIVER_NAME "pxa168-eth"
  48. #define DRIVER_VERSION "0.3"
  49. /*
  50. * Registers
  51. */
  52. #define PHY_ADDRESS 0x0000
  53. #define SMI 0x0010
  54. #define PORT_CONFIG 0x0400
  55. #define PORT_CONFIG_EXT 0x0408
  56. #define PORT_COMMAND 0x0410
  57. #define PORT_STATUS 0x0418
  58. #define HTPR 0x0428
  59. #define SDMA_CONFIG 0x0440
  60. #define SDMA_CMD 0x0448
  61. #define INT_CAUSE 0x0450
  62. #define INT_W_CLEAR 0x0454
  63. #define INT_MASK 0x0458
  64. #define ETH_F_RX_DESC_0 0x0480
  65. #define ETH_C_RX_DESC_0 0x04A0
  66. #define ETH_C_TX_DESC_1 0x04E4
  67. /* smi register */
  68. #define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
  69. #define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
  70. #define SMI_OP_W (0 << 26) /* Write operation */
  71. #define SMI_OP_R (1 << 26) /* Read operation */
  72. #define PHY_WAIT_ITERATIONS 10
  73. #define PXA168_ETH_PHY_ADDR_DEFAULT 0
  74. /* RX & TX descriptor command */
  75. #define BUF_OWNED_BY_DMA (1 << 31)
  76. /* RX descriptor status */
  77. #define RX_EN_INT (1 << 23)
  78. #define RX_FIRST_DESC (1 << 17)
  79. #define RX_LAST_DESC (1 << 16)
  80. #define RX_ERROR (1 << 15)
  81. /* TX descriptor command */
  82. #define TX_EN_INT (1 << 23)
  83. #define TX_GEN_CRC (1 << 22)
  84. #define TX_ZERO_PADDING (1 << 18)
  85. #define TX_FIRST_DESC (1 << 17)
  86. #define TX_LAST_DESC (1 << 16)
  87. #define TX_ERROR (1 << 15)
  88. /* SDMA_CMD */
  89. #define SDMA_CMD_AT (1 << 31)
  90. #define SDMA_CMD_TXDL (1 << 24)
  91. #define SDMA_CMD_TXDH (1 << 23)
  92. #define SDMA_CMD_AR (1 << 15)
  93. #define SDMA_CMD_ERD (1 << 7)
  94. /* Bit definitions of the Port Config Reg */
  95. #define PCR_HS (1 << 12)
  96. #define PCR_EN (1 << 7)
  97. #define PCR_PM (1 << 0)
  98. /* Bit definitions of the Port Config Extend Reg */
  99. #define PCXR_2BSM (1 << 28)
  100. #define PCXR_DSCP_EN (1 << 21)
  101. #define PCXR_MFL_1518 (0 << 14)
  102. #define PCXR_MFL_1536 (1 << 14)
  103. #define PCXR_MFL_2048 (2 << 14)
  104. #define PCXR_MFL_64K (3 << 14)
  105. #define PCXR_FLP (1 << 11)
  106. #define PCXR_PRIO_TX_OFF 3
  107. #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
  108. /* Bit definitions of the SDMA Config Reg */
  109. #define SDCR_BSZ_OFF 12
  110. #define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
  111. #define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
  112. #define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
  113. #define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
  114. #define SDCR_BLMR (1 << 6)
  115. #define SDCR_BLMT (1 << 7)
  116. #define SDCR_RIFB (1 << 9)
  117. #define SDCR_RC_OFF 2
  118. #define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
  119. /*
  120. * Bit definitions of the Interrupt Cause Reg
  121. * and Interrupt MASK Reg is the same
  122. */
  123. #define ICR_RXBUF (1 << 0)
  124. #define ICR_TXBUF_H (1 << 2)
  125. #define ICR_TXBUF_L (1 << 3)
  126. #define ICR_TXEND_H (1 << 6)
  127. #define ICR_TXEND_L (1 << 7)
  128. #define ICR_RXERR (1 << 8)
  129. #define ICR_TXERR_H (1 << 10)
  130. #define ICR_TXERR_L (1 << 11)
  131. #define ICR_TX_UDR (1 << 13)
  132. #define ICR_MII_CH (1 << 28)
  133. #define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
  134. ICR_TXERR_H | ICR_TXERR_L |\
  135. ICR_TXEND_H | ICR_TXEND_L |\
  136. ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
  137. #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
  138. #define NUM_RX_DESCS 64
  139. #define NUM_TX_DESCS 64
  140. #define HASH_ADD 0
  141. #define HASH_DELETE 1
  142. #define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
  143. #define HOP_NUMBER 12
  144. /* Bit definitions for Port status */
  145. #define PORT_SPEED_100 (1 << 0)
  146. #define FULL_DUPLEX (1 << 1)
  147. #define FLOW_CONTROL_ENABLED (1 << 2)
  148. #define LINK_UP (1 << 3)
  149. /* Bit definitions for work to be done */
  150. #define WORK_LINK (1 << 0)
  151. #define WORK_TX_DONE (1 << 1)
  152. /*
  153. * Misc definitions.
  154. */
  155. #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
  156. struct rx_desc {
  157. u32 cmd_sts; /* Descriptor command status */
  158. u16 byte_cnt; /* Descriptor buffer byte count */
  159. u16 buf_size; /* Buffer size */
  160. u32 buf_ptr; /* Descriptor buffer pointer */
  161. u32 next_desc_ptr; /* Next descriptor pointer */
  162. };
  163. struct tx_desc {
  164. u32 cmd_sts; /* Command/status field */
  165. u16 reserved;
  166. u16 byte_cnt; /* buffer byte count */
  167. u32 buf_ptr; /* pointer to buffer for this descriptor */
  168. u32 next_desc_ptr; /* Pointer to next descriptor */
  169. };
  170. struct pxa168_eth_private {
  171. int port_num; /* User Ethernet port number */
  172. int rx_resource_err; /* Rx ring resource error flag */
  173. /* Next available and first returning Rx resource */
  174. int rx_curr_desc_q, rx_used_desc_q;
  175. /* Next available and first returning Tx resource */
  176. int tx_curr_desc_q, tx_used_desc_q;
  177. struct rx_desc *p_rx_desc_area;
  178. dma_addr_t rx_desc_dma;
  179. int rx_desc_area_size;
  180. struct sk_buff **rx_skb;
  181. struct tx_desc *p_tx_desc_area;
  182. dma_addr_t tx_desc_dma;
  183. int tx_desc_area_size;
  184. struct sk_buff **tx_skb;
  185. struct work_struct tx_timeout_task;
  186. struct net_device *dev;
  187. struct napi_struct napi;
  188. u8 work_todo;
  189. int skb_size;
  190. /* Size of Tx Ring per queue */
  191. int tx_ring_size;
  192. /* Number of tx descriptors in use */
  193. int tx_desc_count;
  194. /* Size of Rx Ring per queue */
  195. int rx_ring_size;
  196. /* Number of rx descriptors in use */
  197. int rx_desc_count;
  198. /*
  199. * Used in case RX Ring is empty, which can occur when
  200. * system does not have resources (skb's)
  201. */
  202. struct timer_list timeout;
  203. struct mii_bus *smi_bus;
  204. struct phy_device *phy;
  205. /* clock */
  206. struct clk *clk;
  207. struct pxa168_eth_platform_data *pd;
  208. /*
  209. * Ethernet controller base address.
  210. */
  211. void __iomem *base;
  212. /* Pointer to the hardware address filter table */
  213. void *htpr;
  214. dma_addr_t htpr_dma;
  215. };
  216. struct addr_table_entry {
  217. __le32 lo;
  218. __le32 hi;
  219. };
  220. /* Bit fields of a Hash Table Entry */
  221. enum hash_table_entry {
  222. HASH_ENTRY_VALID = 1,
  223. SKIP = 2,
  224. HASH_ENTRY_RECEIVE_DISCARD = 4,
  225. HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
  226. };
  227. static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
  228. static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
  229. static int pxa168_init_hw(struct pxa168_eth_private *pep);
  230. static void eth_port_reset(struct net_device *dev);
  231. static void eth_port_start(struct net_device *dev);
  232. static int pxa168_eth_open(struct net_device *dev);
  233. static int pxa168_eth_stop(struct net_device *dev);
  234. static int ethernet_phy_setup(struct net_device *dev);
  235. static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
  236. {
  237. return readl(pep->base + offset);
  238. }
  239. static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
  240. {
  241. writel(data, pep->base + offset);
  242. }
  243. static void abort_dma(struct pxa168_eth_private *pep)
  244. {
  245. int delay;
  246. int max_retries = 40;
  247. do {
  248. wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
  249. udelay(100);
  250. delay = 10;
  251. while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
  252. && delay-- > 0) {
  253. udelay(10);
  254. }
  255. } while (max_retries-- > 0 && delay <= 0);
  256. if (max_retries <= 0)
  257. printk(KERN_ERR "%s : DMA Stuck\n", __func__);
  258. }
  259. static int ethernet_phy_get(struct pxa168_eth_private *pep)
  260. {
  261. unsigned int reg_data;
  262. reg_data = rdl(pep, PHY_ADDRESS);
  263. return (reg_data >> (5 * pep->port_num)) & 0x1f;
  264. }
  265. static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
  266. {
  267. u32 reg_data;
  268. int addr_shift = 5 * pep->port_num;
  269. reg_data = rdl(pep, PHY_ADDRESS);
  270. reg_data &= ~(0x1f << addr_shift);
  271. reg_data |= (phy_addr & 0x1f) << addr_shift;
  272. wrl(pep, PHY_ADDRESS, reg_data);
  273. }
  274. static void ethernet_phy_reset(struct pxa168_eth_private *pep)
  275. {
  276. int data;
  277. data = phy_read(pep->phy, MII_BMCR);
  278. if (data < 0)
  279. return;
  280. data |= BMCR_RESET;
  281. if (phy_write(pep->phy, MII_BMCR, data) < 0)
  282. return;
  283. do {
  284. data = phy_read(pep->phy, MII_BMCR);
  285. } while (data >= 0 && data & BMCR_RESET);
  286. }
  287. static void rxq_refill(struct net_device *dev)
  288. {
  289. struct pxa168_eth_private *pep = netdev_priv(dev);
  290. struct sk_buff *skb;
  291. struct rx_desc *p_used_rx_desc;
  292. int used_rx_desc;
  293. while (pep->rx_desc_count < pep->rx_ring_size) {
  294. int size;
  295. skb = netdev_alloc_skb(dev, pep->skb_size);
  296. if (!skb)
  297. break;
  298. if (SKB_DMA_REALIGN)
  299. skb_reserve(skb, SKB_DMA_REALIGN);
  300. pep->rx_desc_count++;
  301. /* Get 'used' Rx descriptor */
  302. used_rx_desc = pep->rx_used_desc_q;
  303. p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
  304. size = skb->end - skb->data;
  305. p_used_rx_desc->buf_ptr = dma_map_single(NULL,
  306. skb->data,
  307. size,
  308. DMA_FROM_DEVICE);
  309. p_used_rx_desc->buf_size = size;
  310. pep->rx_skb[used_rx_desc] = skb;
  311. /* Return the descriptor to DMA ownership */
  312. wmb();
  313. p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
  314. wmb();
  315. /* Move the used descriptor pointer to the next descriptor */
  316. pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
  317. /* Any Rx return cancels the Rx resource error status */
  318. pep->rx_resource_err = 0;
  319. skb_reserve(skb, ETH_HW_IP_ALIGN);
  320. }
  321. /*
  322. * If RX ring is empty of SKB, set a timer to try allocating
  323. * again at a later time.
  324. */
  325. if (pep->rx_desc_count == 0) {
  326. pep->timeout.expires = jiffies + (HZ / 10);
  327. add_timer(&pep->timeout);
  328. }
  329. }
  330. static inline void rxq_refill_timer_wrapper(unsigned long data)
  331. {
  332. struct pxa168_eth_private *pep = (void *)data;
  333. napi_schedule(&pep->napi);
  334. }
  335. static inline u8 flip_8_bits(u8 x)
  336. {
  337. return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
  338. | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
  339. | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
  340. | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
  341. }
  342. static void nibble_swap_every_byte(unsigned char *mac_addr)
  343. {
  344. int i;
  345. for (i = 0; i < ETH_ALEN; i++) {
  346. mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
  347. ((mac_addr[i] & 0xf0) >> 4);
  348. }
  349. }
  350. static void inverse_every_nibble(unsigned char *mac_addr)
  351. {
  352. int i;
  353. for (i = 0; i < ETH_ALEN; i++)
  354. mac_addr[i] = flip_8_bits(mac_addr[i]);
  355. }
  356. /*
  357. * ----------------------------------------------------------------------------
  358. * This function will calculate the hash function of the address.
  359. * Inputs
  360. * mac_addr_orig - MAC address.
  361. * Outputs
  362. * return the calculated entry.
  363. */
  364. static u32 hash_function(unsigned char *mac_addr_orig)
  365. {
  366. u32 hash_result;
  367. u32 addr0;
  368. u32 addr1;
  369. u32 addr2;
  370. u32 addr3;
  371. unsigned char mac_addr[ETH_ALEN];
  372. /* Make a copy of MAC address since we are going to performe bit
  373. * operations on it
  374. */
  375. memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
  376. nibble_swap_every_byte(mac_addr);
  377. inverse_every_nibble(mac_addr);
  378. addr0 = (mac_addr[5] >> 2) & 0x3f;
  379. addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
  380. addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
  381. addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
  382. hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
  383. hash_result = hash_result & 0x07ff;
  384. return hash_result;
  385. }
  386. /*
  387. * ----------------------------------------------------------------------------
  388. * This function will add/del an entry to the address table.
  389. * Inputs
  390. * pep - ETHERNET .
  391. * mac_addr - MAC address.
  392. * skip - if 1, skip this address.Used in case of deleting an entry which is a
  393. * part of chain in the hash table.We can't just delete the entry since
  394. * that will break the chain.We need to defragment the tables time to
  395. * time.
  396. * rd - 0 Discard packet upon match.
  397. * - 1 Receive packet upon match.
  398. * Outputs
  399. * address table entry is added/deleted.
  400. * 0 if success.
  401. * -ENOSPC if table full
  402. */
  403. static int add_del_hash_entry(struct pxa168_eth_private *pep,
  404. unsigned char *mac_addr,
  405. u32 rd, u32 skip, int del)
  406. {
  407. struct addr_table_entry *entry, *start;
  408. u32 new_high;
  409. u32 new_low;
  410. u32 i;
  411. new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
  412. | (((mac_addr[1] >> 0) & 0xf) << 11)
  413. | (((mac_addr[0] >> 4) & 0xf) << 7)
  414. | (((mac_addr[0] >> 0) & 0xf) << 3)
  415. | (((mac_addr[3] >> 4) & 0x1) << 31)
  416. | (((mac_addr[3] >> 0) & 0xf) << 27)
  417. | (((mac_addr[2] >> 4) & 0xf) << 23)
  418. | (((mac_addr[2] >> 0) & 0xf) << 19)
  419. | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
  420. | HASH_ENTRY_VALID;
  421. new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
  422. | (((mac_addr[5] >> 0) & 0xf) << 11)
  423. | (((mac_addr[4] >> 4) & 0xf) << 7)
  424. | (((mac_addr[4] >> 0) & 0xf) << 3)
  425. | (((mac_addr[3] >> 5) & 0x7) << 0);
  426. /*
  427. * Pick the appropriate table, start scanning for free/reusable
  428. * entries at the index obtained by hashing the specified MAC address
  429. */
  430. start = pep->htpr;
  431. entry = start + hash_function(mac_addr);
  432. for (i = 0; i < HOP_NUMBER; i++) {
  433. if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
  434. break;
  435. } else {
  436. /* if same address put in same position */
  437. if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
  438. (new_low & 0xfffffff8)) &&
  439. (le32_to_cpu(entry->hi) == new_high)) {
  440. break;
  441. }
  442. }
  443. if (entry == start + 0x7ff)
  444. entry = start;
  445. else
  446. entry++;
  447. }
  448. if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
  449. (le32_to_cpu(entry->hi) != new_high) && del)
  450. return 0;
  451. if (i == HOP_NUMBER) {
  452. if (!del) {
  453. printk(KERN_INFO "%s: table section is full, need to "
  454. "move to 16kB implementation?\n",
  455. __FILE__);
  456. return -ENOSPC;
  457. } else
  458. return 0;
  459. }
  460. /*
  461. * Update the selected entry
  462. */
  463. if (del) {
  464. entry->hi = 0;
  465. entry->lo = 0;
  466. } else {
  467. entry->hi = cpu_to_le32(new_high);
  468. entry->lo = cpu_to_le32(new_low);
  469. }
  470. return 0;
  471. }
  472. /*
  473. * ----------------------------------------------------------------------------
  474. * Create an addressTable entry from MAC address info
  475. * found in the specifed net_device struct
  476. *
  477. * Input : pointer to ethernet interface network device structure
  478. * Output : N/A
  479. */
  480. static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
  481. unsigned char *oaddr,
  482. unsigned char *addr)
  483. {
  484. /* Delete old entry */
  485. if (oaddr)
  486. add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
  487. /* Add new entry */
  488. add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
  489. }
  490. static int init_hash_table(struct pxa168_eth_private *pep)
  491. {
  492. /*
  493. * Hardware expects CPU to build a hash table based on a predefined
  494. * hash function and populate it based on hardware address. The
  495. * location of the hash table is identified by 32-bit pointer stored
  496. * in HTPR internal register. Two possible sizes exists for the hash
  497. * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
  498. * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
  499. * 1/2kB.
  500. */
  501. /* TODO: Add support for 8kB hash table and alternative hash
  502. * function.Driver can dynamically switch to them if the 1/2kB hash
  503. * table is full.
  504. */
  505. if (pep->htpr == NULL) {
  506. pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
  507. HASH_ADDR_TABLE_SIZE,
  508. &pep->htpr_dma, GFP_KERNEL);
  509. if (pep->htpr == NULL)
  510. return -ENOMEM;
  511. }
  512. memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
  513. wrl(pep, HTPR, pep->htpr_dma);
  514. return 0;
  515. }
  516. static void pxa168_eth_set_rx_mode(struct net_device *dev)
  517. {
  518. struct pxa168_eth_private *pep = netdev_priv(dev);
  519. struct netdev_hw_addr *ha;
  520. u32 val;
  521. val = rdl(pep, PORT_CONFIG);
  522. if (dev->flags & IFF_PROMISC)
  523. val |= PCR_PM;
  524. else
  525. val &= ~PCR_PM;
  526. wrl(pep, PORT_CONFIG, val);
  527. /*
  528. * Remove the old list of MAC address and add dev->addr
  529. * and multicast address.
  530. */
  531. memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
  532. update_hash_table_mac_address(pep, NULL, dev->dev_addr);
  533. netdev_for_each_mc_addr(ha, dev)
  534. update_hash_table_mac_address(pep, NULL, ha->addr);
  535. }
  536. static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
  537. {
  538. struct sockaddr *sa = addr;
  539. struct pxa168_eth_private *pep = netdev_priv(dev);
  540. unsigned char oldMac[ETH_ALEN];
  541. if (!is_valid_ether_addr(sa->sa_data))
  542. return -EADDRNOTAVAIL;
  543. memcpy(oldMac, dev->dev_addr, ETH_ALEN);
  544. memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
  545. netif_addr_lock_bh(dev);
  546. update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
  547. netif_addr_unlock_bh(dev);
  548. return 0;
  549. }
  550. static void eth_port_start(struct net_device *dev)
  551. {
  552. unsigned int val = 0;
  553. struct pxa168_eth_private *pep = netdev_priv(dev);
  554. int tx_curr_desc, rx_curr_desc;
  555. /* Perform PHY reset, if there is a PHY. */
  556. if (pep->phy != NULL) {
  557. struct ethtool_cmd cmd;
  558. pxa168_get_settings(pep->dev, &cmd);
  559. ethernet_phy_reset(pep);
  560. pxa168_set_settings(pep->dev, &cmd);
  561. }
  562. /* Assignment of Tx CTRP of given queue */
  563. tx_curr_desc = pep->tx_curr_desc_q;
  564. wrl(pep, ETH_C_TX_DESC_1,
  565. (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
  566. /* Assignment of Rx CRDP of given queue */
  567. rx_curr_desc = pep->rx_curr_desc_q;
  568. wrl(pep, ETH_C_RX_DESC_0,
  569. (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
  570. wrl(pep, ETH_F_RX_DESC_0,
  571. (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
  572. /* Clear all interrupts */
  573. wrl(pep, INT_CAUSE, 0);
  574. /* Enable all interrupts for receive, transmit and error. */
  575. wrl(pep, INT_MASK, ALL_INTS);
  576. val = rdl(pep, PORT_CONFIG);
  577. val |= PCR_EN;
  578. wrl(pep, PORT_CONFIG, val);
  579. /* Start RX DMA engine */
  580. val = rdl(pep, SDMA_CMD);
  581. val |= SDMA_CMD_ERD;
  582. wrl(pep, SDMA_CMD, val);
  583. }
  584. static void eth_port_reset(struct net_device *dev)
  585. {
  586. struct pxa168_eth_private *pep = netdev_priv(dev);
  587. unsigned int val = 0;
  588. /* Stop all interrupts for receive, transmit and error. */
  589. wrl(pep, INT_MASK, 0);
  590. /* Clear all interrupts */
  591. wrl(pep, INT_CAUSE, 0);
  592. /* Stop RX DMA */
  593. val = rdl(pep, SDMA_CMD);
  594. val &= ~SDMA_CMD_ERD; /* abort dma command */
  595. /* Abort any transmit and receive operations and put DMA
  596. * in idle state.
  597. */
  598. abort_dma(pep);
  599. /* Disable port */
  600. val = rdl(pep, PORT_CONFIG);
  601. val &= ~PCR_EN;
  602. wrl(pep, PORT_CONFIG, val);
  603. }
  604. /*
  605. * txq_reclaim - Free the tx desc data for completed descriptors
  606. * If force is non-zero, frees uncompleted descriptors as well
  607. */
  608. static int txq_reclaim(struct net_device *dev, int force)
  609. {
  610. struct pxa168_eth_private *pep = netdev_priv(dev);
  611. struct tx_desc *desc;
  612. u32 cmd_sts;
  613. struct sk_buff *skb;
  614. int tx_index;
  615. dma_addr_t addr;
  616. int count;
  617. int released = 0;
  618. netif_tx_lock(dev);
  619. pep->work_todo &= ~WORK_TX_DONE;
  620. while (pep->tx_desc_count > 0) {
  621. tx_index = pep->tx_used_desc_q;
  622. desc = &pep->p_tx_desc_area[tx_index];
  623. cmd_sts = desc->cmd_sts;
  624. if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
  625. if (released > 0) {
  626. goto txq_reclaim_end;
  627. } else {
  628. released = -1;
  629. goto txq_reclaim_end;
  630. }
  631. }
  632. pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
  633. pep->tx_desc_count--;
  634. addr = desc->buf_ptr;
  635. count = desc->byte_cnt;
  636. skb = pep->tx_skb[tx_index];
  637. if (skb)
  638. pep->tx_skb[tx_index] = NULL;
  639. if (cmd_sts & TX_ERROR) {
  640. if (net_ratelimit())
  641. printk(KERN_ERR "%s: Error in TX\n", dev->name);
  642. dev->stats.tx_errors++;
  643. }
  644. dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
  645. if (skb)
  646. dev_kfree_skb_irq(skb);
  647. released++;
  648. }
  649. txq_reclaim_end:
  650. netif_tx_unlock(dev);
  651. return released;
  652. }
  653. static void pxa168_eth_tx_timeout(struct net_device *dev)
  654. {
  655. struct pxa168_eth_private *pep = netdev_priv(dev);
  656. printk(KERN_INFO "%s: TX timeout desc_count %d\n",
  657. dev->name, pep->tx_desc_count);
  658. schedule_work(&pep->tx_timeout_task);
  659. }
  660. static void pxa168_eth_tx_timeout_task(struct work_struct *work)
  661. {
  662. struct pxa168_eth_private *pep = container_of(work,
  663. struct pxa168_eth_private,
  664. tx_timeout_task);
  665. struct net_device *dev = pep->dev;
  666. pxa168_eth_stop(dev);
  667. pxa168_eth_open(dev);
  668. }
  669. static int rxq_process(struct net_device *dev, int budget)
  670. {
  671. struct pxa168_eth_private *pep = netdev_priv(dev);
  672. struct net_device_stats *stats = &dev->stats;
  673. unsigned int received_packets = 0;
  674. struct sk_buff *skb;
  675. while (budget-- > 0) {
  676. int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
  677. struct rx_desc *rx_desc;
  678. unsigned int cmd_sts;
  679. /* Do not process Rx ring in case of Rx ring resource error */
  680. if (pep->rx_resource_err)
  681. break;
  682. rx_curr_desc = pep->rx_curr_desc_q;
  683. rx_used_desc = pep->rx_used_desc_q;
  684. rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
  685. cmd_sts = rx_desc->cmd_sts;
  686. rmb();
  687. if (cmd_sts & (BUF_OWNED_BY_DMA))
  688. break;
  689. skb = pep->rx_skb[rx_curr_desc];
  690. pep->rx_skb[rx_curr_desc] = NULL;
  691. rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
  692. pep->rx_curr_desc_q = rx_next_curr_desc;
  693. /* Rx descriptors exhausted. */
  694. /* Set the Rx ring resource error flag */
  695. if (rx_next_curr_desc == rx_used_desc)
  696. pep->rx_resource_err = 1;
  697. pep->rx_desc_count--;
  698. dma_unmap_single(NULL, rx_desc->buf_ptr,
  699. rx_desc->buf_size,
  700. DMA_FROM_DEVICE);
  701. received_packets++;
  702. /*
  703. * Update statistics.
  704. * Note byte count includes 4 byte CRC count
  705. */
  706. stats->rx_packets++;
  707. stats->rx_bytes += rx_desc->byte_cnt;
  708. /*
  709. * In case received a packet without first / last bits on OR
  710. * the error summary bit is on, the packets needs to be droped.
  711. */
  712. if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  713. (RX_FIRST_DESC | RX_LAST_DESC))
  714. || (cmd_sts & RX_ERROR)) {
  715. stats->rx_dropped++;
  716. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  717. (RX_FIRST_DESC | RX_LAST_DESC)) {
  718. if (net_ratelimit())
  719. printk(KERN_ERR
  720. "%s: Rx pkt on multiple desc\n",
  721. dev->name);
  722. }
  723. if (cmd_sts & RX_ERROR)
  724. stats->rx_errors++;
  725. dev_kfree_skb_irq(skb);
  726. } else {
  727. /*
  728. * The -4 is for the CRC in the trailer of the
  729. * received packet
  730. */
  731. skb_put(skb, rx_desc->byte_cnt - 4);
  732. skb->protocol = eth_type_trans(skb, dev);
  733. netif_receive_skb(skb);
  734. }
  735. }
  736. /* Fill RX ring with skb's */
  737. rxq_refill(dev);
  738. return received_packets;
  739. }
  740. static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
  741. struct net_device *dev)
  742. {
  743. u32 icr;
  744. int ret = 0;
  745. icr = rdl(pep, INT_CAUSE);
  746. if (icr == 0)
  747. return IRQ_NONE;
  748. wrl(pep, INT_CAUSE, ~icr);
  749. if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
  750. pep->work_todo |= WORK_TX_DONE;
  751. ret = 1;
  752. }
  753. if (icr & ICR_RXBUF)
  754. ret = 1;
  755. if (icr & ICR_MII_CH) {
  756. pep->work_todo |= WORK_LINK;
  757. ret = 1;
  758. }
  759. return ret;
  760. }
  761. static void handle_link_event(struct pxa168_eth_private *pep)
  762. {
  763. struct net_device *dev = pep->dev;
  764. u32 port_status;
  765. int speed;
  766. int duplex;
  767. int fc;
  768. port_status = rdl(pep, PORT_STATUS);
  769. if (!(port_status & LINK_UP)) {
  770. if (netif_carrier_ok(dev)) {
  771. printk(KERN_INFO "%s: link down\n", dev->name);
  772. netif_carrier_off(dev);
  773. txq_reclaim(dev, 1);
  774. }
  775. return;
  776. }
  777. if (port_status & PORT_SPEED_100)
  778. speed = 100;
  779. else
  780. speed = 10;
  781. duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
  782. fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
  783. printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
  784. "flow control %sabled\n", dev->name,
  785. speed, duplex ? "full" : "half", fc ? "en" : "dis");
  786. if (!netif_carrier_ok(dev))
  787. netif_carrier_on(dev);
  788. }
  789. static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
  790. {
  791. struct net_device *dev = (struct net_device *)dev_id;
  792. struct pxa168_eth_private *pep = netdev_priv(dev);
  793. if (unlikely(!pxa168_eth_collect_events(pep, dev)))
  794. return IRQ_NONE;
  795. /* Disable interrupts */
  796. wrl(pep, INT_MASK, 0);
  797. napi_schedule(&pep->napi);
  798. return IRQ_HANDLED;
  799. }
  800. static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
  801. {
  802. int skb_size;
  803. /*
  804. * Reserve 2+14 bytes for an ethernet header (the hardware
  805. * automatically prepends 2 bytes of dummy data to each
  806. * received packet), 16 bytes for up to four VLAN tags, and
  807. * 4 bytes for the trailing FCS -- 36 bytes total.
  808. */
  809. skb_size = pep->dev->mtu + 36;
  810. /*
  811. * Make sure that the skb size is a multiple of 8 bytes, as
  812. * the lower three bits of the receive descriptor's buffer
  813. * size field are ignored by the hardware.
  814. */
  815. pep->skb_size = (skb_size + 7) & ~7;
  816. /*
  817. * If NET_SKB_PAD is smaller than a cache line,
  818. * netdev_alloc_skb() will cause skb->data to be misaligned
  819. * to a cache line boundary. If this is the case, include
  820. * some extra space to allow re-aligning the data area.
  821. */
  822. pep->skb_size += SKB_DMA_REALIGN;
  823. }
  824. static int set_port_config_ext(struct pxa168_eth_private *pep)
  825. {
  826. int skb_size;
  827. pxa168_eth_recalc_skb_size(pep);
  828. if (pep->skb_size <= 1518)
  829. skb_size = PCXR_MFL_1518;
  830. else if (pep->skb_size <= 1536)
  831. skb_size = PCXR_MFL_1536;
  832. else if (pep->skb_size <= 2048)
  833. skb_size = PCXR_MFL_2048;
  834. else
  835. skb_size = PCXR_MFL_64K;
  836. /* Extended Port Configuration */
  837. wrl(pep,
  838. PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
  839. PCXR_DSCP_EN | /* Enable DSCP in IP */
  840. skb_size | PCXR_FLP | /* do not force link pass */
  841. PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
  842. return 0;
  843. }
  844. static int pxa168_init_hw(struct pxa168_eth_private *pep)
  845. {
  846. int err = 0;
  847. /* Disable interrupts */
  848. wrl(pep, INT_MASK, 0);
  849. wrl(pep, INT_CAUSE, 0);
  850. /* Write to ICR to clear interrupts. */
  851. wrl(pep, INT_W_CLEAR, 0);
  852. /* Abort any transmit and receive operations and put DMA
  853. * in idle state.
  854. */
  855. abort_dma(pep);
  856. /* Initialize address hash table */
  857. err = init_hash_table(pep);
  858. if (err)
  859. return err;
  860. /* SDMA configuration */
  861. wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
  862. SDCR_RIFB | /* Rx interrupt on frame */
  863. SDCR_BLMT | /* Little endian transmit */
  864. SDCR_BLMR | /* Little endian receive */
  865. SDCR_RC_MAX_RETRANS); /* Max retransmit count */
  866. /* Port Configuration */
  867. wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
  868. set_port_config_ext(pep);
  869. return err;
  870. }
  871. static int rxq_init(struct net_device *dev)
  872. {
  873. struct pxa168_eth_private *pep = netdev_priv(dev);
  874. struct rx_desc *p_rx_desc;
  875. int size = 0, i = 0;
  876. int rx_desc_num = pep->rx_ring_size;
  877. /* Allocate RX skb rings */
  878. pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
  879. GFP_KERNEL);
  880. if (!pep->rx_skb)
  881. return -ENOMEM;
  882. /* Allocate RX ring */
  883. pep->rx_desc_count = 0;
  884. size = pep->rx_ring_size * sizeof(struct rx_desc);
  885. pep->rx_desc_area_size = size;
  886. pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
  887. &pep->rx_desc_dma, GFP_KERNEL);
  888. if (!pep->p_rx_desc_area) {
  889. printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
  890. dev->name, size);
  891. goto out;
  892. }
  893. memset((void *)pep->p_rx_desc_area, 0, size);
  894. /* initialize the next_desc_ptr links in the Rx descriptors ring */
  895. p_rx_desc = pep->p_rx_desc_area;
  896. for (i = 0; i < rx_desc_num; i++) {
  897. p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
  898. ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
  899. }
  900. /* Save Rx desc pointer to driver struct. */
  901. pep->rx_curr_desc_q = 0;
  902. pep->rx_used_desc_q = 0;
  903. pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
  904. return 0;
  905. out:
  906. kfree(pep->rx_skb);
  907. return -ENOMEM;
  908. }
  909. static void rxq_deinit(struct net_device *dev)
  910. {
  911. struct pxa168_eth_private *pep = netdev_priv(dev);
  912. int curr;
  913. /* Free preallocated skb's on RX rings */
  914. for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
  915. if (pep->rx_skb[curr]) {
  916. dev_kfree_skb(pep->rx_skb[curr]);
  917. pep->rx_desc_count--;
  918. }
  919. }
  920. if (pep->rx_desc_count)
  921. printk(KERN_ERR
  922. "Error in freeing Rx Ring. %d skb's still\n",
  923. pep->rx_desc_count);
  924. /* Free RX ring */
  925. if (pep->p_rx_desc_area)
  926. dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
  927. pep->p_rx_desc_area, pep->rx_desc_dma);
  928. kfree(pep->rx_skb);
  929. }
  930. static int txq_init(struct net_device *dev)
  931. {
  932. struct pxa168_eth_private *pep = netdev_priv(dev);
  933. struct tx_desc *p_tx_desc;
  934. int size = 0, i = 0;
  935. int tx_desc_num = pep->tx_ring_size;
  936. pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
  937. GFP_KERNEL);
  938. if (!pep->tx_skb)
  939. return -ENOMEM;
  940. /* Allocate TX ring */
  941. pep->tx_desc_count = 0;
  942. size = pep->tx_ring_size * sizeof(struct tx_desc);
  943. pep->tx_desc_area_size = size;
  944. pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
  945. &pep->tx_desc_dma, GFP_KERNEL);
  946. if (!pep->p_tx_desc_area) {
  947. printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
  948. dev->name, size);
  949. goto out;
  950. }
  951. memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
  952. /* Initialize the next_desc_ptr links in the Tx descriptors ring */
  953. p_tx_desc = pep->p_tx_desc_area;
  954. for (i = 0; i < tx_desc_num; i++) {
  955. p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
  956. ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
  957. }
  958. pep->tx_curr_desc_q = 0;
  959. pep->tx_used_desc_q = 0;
  960. pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
  961. return 0;
  962. out:
  963. kfree(pep->tx_skb);
  964. return -ENOMEM;
  965. }
  966. static void txq_deinit(struct net_device *dev)
  967. {
  968. struct pxa168_eth_private *pep = netdev_priv(dev);
  969. /* Free outstanding skb's on TX ring */
  970. txq_reclaim(dev, 1);
  971. BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
  972. /* Free TX ring */
  973. if (pep->p_tx_desc_area)
  974. dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
  975. pep->p_tx_desc_area, pep->tx_desc_dma);
  976. kfree(pep->tx_skb);
  977. }
  978. static int pxa168_eth_open(struct net_device *dev)
  979. {
  980. struct pxa168_eth_private *pep = netdev_priv(dev);
  981. int err;
  982. err = request_irq(dev->irq, pxa168_eth_int_handler,
  983. IRQF_DISABLED, dev->name, dev);
  984. if (err) {
  985. dev_err(&dev->dev, "can't assign irq\n");
  986. return -EAGAIN;
  987. }
  988. pep->rx_resource_err = 0;
  989. err = rxq_init(dev);
  990. if (err != 0)
  991. goto out_free_irq;
  992. err = txq_init(dev);
  993. if (err != 0)
  994. goto out_free_rx_skb;
  995. pep->rx_used_desc_q = 0;
  996. pep->rx_curr_desc_q = 0;
  997. /* Fill RX ring with skb's */
  998. rxq_refill(dev);
  999. pep->rx_used_desc_q = 0;
  1000. pep->rx_curr_desc_q = 0;
  1001. netif_carrier_off(dev);
  1002. eth_port_start(dev);
  1003. napi_enable(&pep->napi);
  1004. return 0;
  1005. out_free_rx_skb:
  1006. rxq_deinit(dev);
  1007. out_free_irq:
  1008. free_irq(dev->irq, dev);
  1009. return err;
  1010. }
  1011. static int pxa168_eth_stop(struct net_device *dev)
  1012. {
  1013. struct pxa168_eth_private *pep = netdev_priv(dev);
  1014. eth_port_reset(dev);
  1015. /* Disable interrupts */
  1016. wrl(pep, INT_MASK, 0);
  1017. wrl(pep, INT_CAUSE, 0);
  1018. /* Write to ICR to clear interrupts. */
  1019. wrl(pep, INT_W_CLEAR, 0);
  1020. napi_disable(&pep->napi);
  1021. del_timer_sync(&pep->timeout);
  1022. netif_carrier_off(dev);
  1023. free_irq(dev->irq, dev);
  1024. rxq_deinit(dev);
  1025. txq_deinit(dev);
  1026. return 0;
  1027. }
  1028. static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
  1029. {
  1030. int retval;
  1031. struct pxa168_eth_private *pep = netdev_priv(dev);
  1032. if ((mtu > 9500) || (mtu < 68))
  1033. return -EINVAL;
  1034. dev->mtu = mtu;
  1035. retval = set_port_config_ext(pep);
  1036. if (!netif_running(dev))
  1037. return 0;
  1038. /*
  1039. * Stop and then re-open the interface. This will allocate RX
  1040. * skbs of the new MTU.
  1041. * There is a possible danger that the open will not succeed,
  1042. * due to memory being full.
  1043. */
  1044. pxa168_eth_stop(dev);
  1045. if (pxa168_eth_open(dev)) {
  1046. dev_err(&dev->dev,
  1047. "fatal error on re-opening device after MTU change\n");
  1048. }
  1049. return 0;
  1050. }
  1051. static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
  1052. {
  1053. int tx_desc_curr;
  1054. tx_desc_curr = pep->tx_curr_desc_q;
  1055. pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
  1056. BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
  1057. pep->tx_desc_count++;
  1058. return tx_desc_curr;
  1059. }
  1060. static int pxa168_rx_poll(struct napi_struct *napi, int budget)
  1061. {
  1062. struct pxa168_eth_private *pep =
  1063. container_of(napi, struct pxa168_eth_private, napi);
  1064. struct net_device *dev = pep->dev;
  1065. int work_done = 0;
  1066. if (unlikely(pep->work_todo & WORK_LINK)) {
  1067. pep->work_todo &= ~(WORK_LINK);
  1068. handle_link_event(pep);
  1069. }
  1070. /*
  1071. * We call txq_reclaim every time since in NAPI interupts are disabled
  1072. * and due to this we miss the TX_DONE interrupt,which is not updated in
  1073. * interrupt status register.
  1074. */
  1075. txq_reclaim(dev, 0);
  1076. if (netif_queue_stopped(dev)
  1077. && pep->tx_ring_size - pep->tx_desc_count > 1) {
  1078. netif_wake_queue(dev);
  1079. }
  1080. work_done = rxq_process(dev, budget);
  1081. if (work_done < budget) {
  1082. napi_complete(napi);
  1083. wrl(pep, INT_MASK, ALL_INTS);
  1084. }
  1085. return work_done;
  1086. }
  1087. static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1088. {
  1089. struct pxa168_eth_private *pep = netdev_priv(dev);
  1090. struct net_device_stats *stats = &dev->stats;
  1091. struct tx_desc *desc;
  1092. int tx_index;
  1093. int length;
  1094. tx_index = eth_alloc_tx_desc_index(pep);
  1095. desc = &pep->p_tx_desc_area[tx_index];
  1096. length = skb->len;
  1097. pep->tx_skb[tx_index] = skb;
  1098. desc->byte_cnt = length;
  1099. desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
  1100. skb_tx_timestamp(skb);
  1101. wmb();
  1102. desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
  1103. TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
  1104. wmb();
  1105. wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
  1106. stats->tx_bytes += length;
  1107. stats->tx_packets++;
  1108. dev->trans_start = jiffies;
  1109. if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
  1110. /* We handled the current skb, but now we are out of space.*/
  1111. netif_stop_queue(dev);
  1112. }
  1113. return NETDEV_TX_OK;
  1114. }
  1115. static int smi_wait_ready(struct pxa168_eth_private *pep)
  1116. {
  1117. int i = 0;
  1118. /* wait for the SMI register to become available */
  1119. for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
  1120. if (i == PHY_WAIT_ITERATIONS)
  1121. return -ETIMEDOUT;
  1122. msleep(10);
  1123. }
  1124. return 0;
  1125. }
  1126. static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
  1127. {
  1128. struct pxa168_eth_private *pep = bus->priv;
  1129. int i = 0;
  1130. int val;
  1131. if (smi_wait_ready(pep)) {
  1132. printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
  1133. return -ETIMEDOUT;
  1134. }
  1135. wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
  1136. /* now wait for the data to be valid */
  1137. for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
  1138. if (i == PHY_WAIT_ITERATIONS) {
  1139. printk(KERN_WARNING
  1140. "pxa168_eth: SMI bus read not valid\n");
  1141. return -ENODEV;
  1142. }
  1143. msleep(10);
  1144. }
  1145. return val & 0xffff;
  1146. }
  1147. static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
  1148. u16 value)
  1149. {
  1150. struct pxa168_eth_private *pep = bus->priv;
  1151. if (smi_wait_ready(pep)) {
  1152. printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
  1153. return -ETIMEDOUT;
  1154. }
  1155. wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
  1156. SMI_OP_W | (value & 0xffff));
  1157. if (smi_wait_ready(pep)) {
  1158. printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
  1159. return -ETIMEDOUT;
  1160. }
  1161. return 0;
  1162. }
  1163. static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
  1164. int cmd)
  1165. {
  1166. struct pxa168_eth_private *pep = netdev_priv(dev);
  1167. if (pep->phy != NULL)
  1168. return phy_mii_ioctl(pep->phy, ifr, cmd);
  1169. return -EOPNOTSUPP;
  1170. }
  1171. static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
  1172. {
  1173. struct mii_bus *bus = pep->smi_bus;
  1174. struct phy_device *phydev;
  1175. int start;
  1176. int num;
  1177. int i;
  1178. if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
  1179. /* Scan entire range */
  1180. start = ethernet_phy_get(pep);
  1181. num = 32;
  1182. } else {
  1183. /* Use phy addr specific to platform */
  1184. start = phy_addr & 0x1f;
  1185. num = 1;
  1186. }
  1187. phydev = NULL;
  1188. for (i = 0; i < num; i++) {
  1189. int addr = (start + i) & 0x1f;
  1190. if (bus->phy_map[addr] == NULL)
  1191. mdiobus_scan(bus, addr);
  1192. if (phydev == NULL) {
  1193. phydev = bus->phy_map[addr];
  1194. if (phydev != NULL)
  1195. ethernet_phy_set_addr(pep, addr);
  1196. }
  1197. }
  1198. return phydev;
  1199. }
  1200. static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
  1201. {
  1202. struct phy_device *phy = pep->phy;
  1203. ethernet_phy_reset(pep);
  1204. phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
  1205. if (speed == 0) {
  1206. phy->autoneg = AUTONEG_ENABLE;
  1207. phy->speed = 0;
  1208. phy->duplex = 0;
  1209. phy->supported &= PHY_BASIC_FEATURES;
  1210. phy->advertising = phy->supported | ADVERTISED_Autoneg;
  1211. } else {
  1212. phy->autoneg = AUTONEG_DISABLE;
  1213. phy->advertising = 0;
  1214. phy->speed = speed;
  1215. phy->duplex = duplex;
  1216. }
  1217. phy_start_aneg(phy);
  1218. }
  1219. static int ethernet_phy_setup(struct net_device *dev)
  1220. {
  1221. struct pxa168_eth_private *pep = netdev_priv(dev);
  1222. if (pep->pd->init)
  1223. pep->pd->init();
  1224. pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
  1225. if (pep->phy != NULL)
  1226. phy_init(pep, pep->pd->speed, pep->pd->duplex);
  1227. update_hash_table_mac_address(pep, NULL, dev->dev_addr);
  1228. return 0;
  1229. }
  1230. static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1231. {
  1232. struct pxa168_eth_private *pep = netdev_priv(dev);
  1233. int err;
  1234. err = phy_read_status(pep->phy);
  1235. if (err == 0)
  1236. err = phy_ethtool_gset(pep->phy, cmd);
  1237. return err;
  1238. }
  1239. static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1240. {
  1241. struct pxa168_eth_private *pep = netdev_priv(dev);
  1242. return phy_ethtool_sset(pep->phy, cmd);
  1243. }
  1244. static void pxa168_get_drvinfo(struct net_device *dev,
  1245. struct ethtool_drvinfo *info)
  1246. {
  1247. strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
  1248. strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
  1249. strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
  1250. strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
  1251. }
  1252. static const struct ethtool_ops pxa168_ethtool_ops = {
  1253. .get_settings = pxa168_get_settings,
  1254. .set_settings = pxa168_set_settings,
  1255. .get_drvinfo = pxa168_get_drvinfo,
  1256. .get_link = ethtool_op_get_link,
  1257. .get_ts_info = ethtool_op_get_ts_info,
  1258. };
  1259. static const struct net_device_ops pxa168_eth_netdev_ops = {
  1260. .ndo_open = pxa168_eth_open,
  1261. .ndo_stop = pxa168_eth_stop,
  1262. .ndo_start_xmit = pxa168_eth_start_xmit,
  1263. .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
  1264. .ndo_set_mac_address = pxa168_eth_set_mac_address,
  1265. .ndo_validate_addr = eth_validate_addr,
  1266. .ndo_do_ioctl = pxa168_eth_do_ioctl,
  1267. .ndo_change_mtu = pxa168_eth_change_mtu,
  1268. .ndo_tx_timeout = pxa168_eth_tx_timeout,
  1269. };
  1270. static int pxa168_eth_probe(struct platform_device *pdev)
  1271. {
  1272. struct pxa168_eth_private *pep = NULL;
  1273. struct net_device *dev = NULL;
  1274. struct resource *res;
  1275. struct clk *clk;
  1276. int err;
  1277. printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
  1278. clk = clk_get(&pdev->dev, "MFUCLK");
  1279. if (IS_ERR(clk)) {
  1280. printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
  1281. DRIVER_NAME);
  1282. return -ENODEV;
  1283. }
  1284. clk_enable(clk);
  1285. dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
  1286. if (!dev) {
  1287. err = -ENOMEM;
  1288. goto err_clk;
  1289. }
  1290. platform_set_drvdata(pdev, dev);
  1291. pep = netdev_priv(dev);
  1292. pep->dev = dev;
  1293. pep->clk = clk;
  1294. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1295. if (res == NULL) {
  1296. err = -ENODEV;
  1297. goto err_netdev;
  1298. }
  1299. pep->base = ioremap(res->start, resource_size(res));
  1300. if (pep->base == NULL) {
  1301. err = -ENOMEM;
  1302. goto err_netdev;
  1303. }
  1304. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1305. BUG_ON(!res);
  1306. dev->irq = res->start;
  1307. dev->netdev_ops = &pxa168_eth_netdev_ops;
  1308. dev->watchdog_timeo = 2 * HZ;
  1309. dev->base_addr = 0;
  1310. SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
  1311. INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
  1312. printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
  1313. eth_hw_addr_random(dev);
  1314. pep->pd = pdev->dev.platform_data;
  1315. pep->rx_ring_size = NUM_RX_DESCS;
  1316. if (pep->pd->rx_queue_size)
  1317. pep->rx_ring_size = pep->pd->rx_queue_size;
  1318. pep->tx_ring_size = NUM_TX_DESCS;
  1319. if (pep->pd->tx_queue_size)
  1320. pep->tx_ring_size = pep->pd->tx_queue_size;
  1321. pep->port_num = pep->pd->port_number;
  1322. /* Hardware supports only 3 ports */
  1323. BUG_ON(pep->port_num > 2);
  1324. netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
  1325. memset(&pep->timeout, 0, sizeof(struct timer_list));
  1326. init_timer(&pep->timeout);
  1327. pep->timeout.function = rxq_refill_timer_wrapper;
  1328. pep->timeout.data = (unsigned long)pep;
  1329. pep->smi_bus = mdiobus_alloc();
  1330. if (pep->smi_bus == NULL) {
  1331. err = -ENOMEM;
  1332. goto err_base;
  1333. }
  1334. pep->smi_bus->priv = pep;
  1335. pep->smi_bus->name = "pxa168_eth smi";
  1336. pep->smi_bus->read = pxa168_smi_read;
  1337. pep->smi_bus->write = pxa168_smi_write;
  1338. snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
  1339. pdev->name, pdev->id);
  1340. pep->smi_bus->parent = &pdev->dev;
  1341. pep->smi_bus->phy_mask = 0xffffffff;
  1342. err = mdiobus_register(pep->smi_bus);
  1343. if (err)
  1344. goto err_free_mdio;
  1345. pxa168_init_hw(pep);
  1346. err = ethernet_phy_setup(dev);
  1347. if (err)
  1348. goto err_mdiobus;
  1349. SET_NETDEV_DEV(dev, &pdev->dev);
  1350. err = register_netdev(dev);
  1351. if (err)
  1352. goto err_mdiobus;
  1353. return 0;
  1354. err_mdiobus:
  1355. mdiobus_unregister(pep->smi_bus);
  1356. err_free_mdio:
  1357. mdiobus_free(pep->smi_bus);
  1358. err_base:
  1359. iounmap(pep->base);
  1360. err_netdev:
  1361. free_netdev(dev);
  1362. err_clk:
  1363. clk_disable(clk);
  1364. clk_put(clk);
  1365. return err;
  1366. }
  1367. static int pxa168_eth_remove(struct platform_device *pdev)
  1368. {
  1369. struct net_device *dev = platform_get_drvdata(pdev);
  1370. struct pxa168_eth_private *pep = netdev_priv(dev);
  1371. if (pep->htpr) {
  1372. dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
  1373. pep->htpr, pep->htpr_dma);
  1374. pep->htpr = NULL;
  1375. }
  1376. if (pep->clk) {
  1377. clk_disable(pep->clk);
  1378. clk_put(pep->clk);
  1379. pep->clk = NULL;
  1380. }
  1381. if (pep->phy != NULL)
  1382. phy_detach(pep->phy);
  1383. iounmap(pep->base);
  1384. pep->base = NULL;
  1385. mdiobus_unregister(pep->smi_bus);
  1386. mdiobus_free(pep->smi_bus);
  1387. unregister_netdev(dev);
  1388. cancel_work_sync(&pep->tx_timeout_task);
  1389. free_netdev(dev);
  1390. platform_set_drvdata(pdev, NULL);
  1391. return 0;
  1392. }
  1393. static void pxa168_eth_shutdown(struct platform_device *pdev)
  1394. {
  1395. struct net_device *dev = platform_get_drvdata(pdev);
  1396. eth_port_reset(dev);
  1397. }
  1398. #ifdef CONFIG_PM
  1399. static int pxa168_eth_resume(struct platform_device *pdev)
  1400. {
  1401. return -ENOSYS;
  1402. }
  1403. static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
  1404. {
  1405. return -ENOSYS;
  1406. }
  1407. #else
  1408. #define pxa168_eth_resume NULL
  1409. #define pxa168_eth_suspend NULL
  1410. #endif
  1411. static struct platform_driver pxa168_eth_driver = {
  1412. .probe = pxa168_eth_probe,
  1413. .remove = pxa168_eth_remove,
  1414. .shutdown = pxa168_eth_shutdown,
  1415. .resume = pxa168_eth_resume,
  1416. .suspend = pxa168_eth_suspend,
  1417. .driver = {
  1418. .name = DRIVER_NAME,
  1419. },
  1420. };
  1421. module_platform_driver(pxa168_eth_driver);
  1422. MODULE_LICENSE("GPL");
  1423. MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
  1424. MODULE_ALIAS("platform:pxa168_eth");