sh_eth.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967
  1. /*
  2. * SuperH Ethernet device driver
  3. *
  4. * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
  5. * Copyright (C) 2008-2009 Renesas Solutions Corp.
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc.,
  17. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18. *
  19. * The full GNU General Public License is included in this distribution in
  20. * the file called "COPYING".
  21. */
  22. #include <linux/init.h>
  23. #include <linux/module.h>
  24. #include <linux/kernel.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/delay.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/mdio-bitbang.h>
  32. #include <linux/netdevice.h>
  33. #include <linux/phy.h>
  34. #include <linux/cache.h>
  35. #include <linux/io.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/pm_runtime.h>
  38. #include <linux/slab.h>
  39. #include <linux/ethtool.h>
  40. #include <linux/if_vlan.h>
  41. #include <linux/sh_eth.h>
  42. #include "sh_eth.h"
  43. #define SH_ETH_DEF_MSG_ENABLE \
  44. (NETIF_MSG_LINK | \
  45. NETIF_MSG_TIMER | \
  46. NETIF_MSG_RX_ERR| \
  47. NETIF_MSG_TX_ERR)
  48. /* There is CPU dependent code */
  49. #if defined(CONFIG_CPU_SUBTYPE_SH7724)
  50. #define SH_ETH_RESET_DEFAULT 1
  51. static void sh_eth_set_duplex(struct net_device *ndev)
  52. {
  53. struct sh_eth_private *mdp = netdev_priv(ndev);
  54. if (mdp->duplex) /* Full */
  55. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
  56. else /* Half */
  57. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
  58. }
  59. static void sh_eth_set_rate(struct net_device *ndev)
  60. {
  61. struct sh_eth_private *mdp = netdev_priv(ndev);
  62. switch (mdp->speed) {
  63. case 10: /* 10BASE */
  64. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
  65. break;
  66. case 100:/* 100BASE */
  67. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
  68. break;
  69. default:
  70. break;
  71. }
  72. }
  73. /* SH7724 */
  74. static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  75. .set_duplex = sh_eth_set_duplex,
  76. .set_rate = sh_eth_set_rate,
  77. .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
  78. .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
  79. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
  80. .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
  81. .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
  82. EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
  83. .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
  84. .apr = 1,
  85. .mpr = 1,
  86. .tpauser = 1,
  87. .hw_swap = 1,
  88. .rpadir = 1,
  89. .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
  90. };
  91. #elif defined(CONFIG_CPU_SUBTYPE_SH7757)
  92. #define SH_ETH_HAS_BOTH_MODULES 1
  93. #define SH_ETH_HAS_TSU 1
  94. static void sh_eth_set_duplex(struct net_device *ndev)
  95. {
  96. struct sh_eth_private *mdp = netdev_priv(ndev);
  97. if (mdp->duplex) /* Full */
  98. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
  99. else /* Half */
  100. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
  101. }
  102. static void sh_eth_set_rate(struct net_device *ndev)
  103. {
  104. struct sh_eth_private *mdp = netdev_priv(ndev);
  105. switch (mdp->speed) {
  106. case 10: /* 10BASE */
  107. sh_eth_write(ndev, 0, RTRATE);
  108. break;
  109. case 100:/* 100BASE */
  110. sh_eth_write(ndev, 1, RTRATE);
  111. break;
  112. default:
  113. break;
  114. }
  115. }
  116. /* SH7757 */
  117. static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  118. .set_duplex = sh_eth_set_duplex,
  119. .set_rate = sh_eth_set_rate,
  120. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
  121. .rmcr_value = 0x00000001,
  122. .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
  123. .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
  124. EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
  125. .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
  126. .apr = 1,
  127. .mpr = 1,
  128. .tpauser = 1,
  129. .hw_swap = 1,
  130. .no_ade = 1,
  131. .rpadir = 1,
  132. .rpadir_value = 2 << 16,
  133. };
  134. #define SH_GIGA_ETH_BASE 0xfee00000
  135. #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
  136. #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
  137. static void sh_eth_chip_reset_giga(struct net_device *ndev)
  138. {
  139. int i;
  140. unsigned long mahr[2], malr[2];
  141. /* save MAHR and MALR */
  142. for (i = 0; i < 2; i++) {
  143. malr[i] = ioread32((void *)GIGA_MALR(i));
  144. mahr[i] = ioread32((void *)GIGA_MAHR(i));
  145. }
  146. /* reset device */
  147. iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
  148. mdelay(1);
  149. /* restore MAHR and MALR */
  150. for (i = 0; i < 2; i++) {
  151. iowrite32(malr[i], (void *)GIGA_MALR(i));
  152. iowrite32(mahr[i], (void *)GIGA_MAHR(i));
  153. }
  154. }
  155. static int sh_eth_is_gether(struct sh_eth_private *mdp);
  156. static void sh_eth_reset(struct net_device *ndev)
  157. {
  158. struct sh_eth_private *mdp = netdev_priv(ndev);
  159. int cnt = 100;
  160. if (sh_eth_is_gether(mdp)) {
  161. sh_eth_write(ndev, 0x03, EDSR);
  162. sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
  163. EDMR);
  164. while (cnt > 0) {
  165. if (!(sh_eth_read(ndev, EDMR) & 0x3))
  166. break;
  167. mdelay(1);
  168. cnt--;
  169. }
  170. if (cnt < 0)
  171. printk(KERN_ERR "Device reset fail\n");
  172. /* Table Init */
  173. sh_eth_write(ndev, 0x0, TDLAR);
  174. sh_eth_write(ndev, 0x0, TDFAR);
  175. sh_eth_write(ndev, 0x0, TDFXR);
  176. sh_eth_write(ndev, 0x0, TDFFR);
  177. sh_eth_write(ndev, 0x0, RDLAR);
  178. sh_eth_write(ndev, 0x0, RDFAR);
  179. sh_eth_write(ndev, 0x0, RDFXR);
  180. sh_eth_write(ndev, 0x0, RDFFR);
  181. } else {
  182. sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
  183. EDMR);
  184. mdelay(3);
  185. sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
  186. EDMR);
  187. }
  188. }
  189. static void sh_eth_set_duplex_giga(struct net_device *ndev)
  190. {
  191. struct sh_eth_private *mdp = netdev_priv(ndev);
  192. if (mdp->duplex) /* Full */
  193. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
  194. else /* Half */
  195. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
  196. }
  197. static void sh_eth_set_rate_giga(struct net_device *ndev)
  198. {
  199. struct sh_eth_private *mdp = netdev_priv(ndev);
  200. switch (mdp->speed) {
  201. case 10: /* 10BASE */
  202. sh_eth_write(ndev, 0x00000000, GECMR);
  203. break;
  204. case 100:/* 100BASE */
  205. sh_eth_write(ndev, 0x00000010, GECMR);
  206. break;
  207. case 1000: /* 1000BASE */
  208. sh_eth_write(ndev, 0x00000020, GECMR);
  209. break;
  210. default:
  211. break;
  212. }
  213. }
  214. /* SH7757(GETHERC) */
  215. static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
  216. .chip_reset = sh_eth_chip_reset_giga,
  217. .set_duplex = sh_eth_set_duplex_giga,
  218. .set_rate = sh_eth_set_rate_giga,
  219. .ecsr_value = ECSR_ICD | ECSR_MPD,
  220. .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
  221. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
  222. .tx_check = EESR_TC1 | EESR_FTC,
  223. .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
  224. EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
  225. EESR_ECI,
  226. .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
  227. EESR_TFE,
  228. .fdr_value = 0x0000072f,
  229. .rmcr_value = 0x00000001,
  230. .apr = 1,
  231. .mpr = 1,
  232. .tpauser = 1,
  233. .bculr = 1,
  234. .hw_swap = 1,
  235. .rpadir = 1,
  236. .rpadir_value = 2 << 16,
  237. .no_trimd = 1,
  238. .no_ade = 1,
  239. };
  240. static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
  241. {
  242. if (sh_eth_is_gether(mdp))
  243. return &sh_eth_my_cpu_data_giga;
  244. else
  245. return &sh_eth_my_cpu_data;
  246. }
  247. #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
  248. #define SH_ETH_HAS_TSU 1
  249. static void sh_eth_chip_reset(struct net_device *ndev)
  250. {
  251. struct sh_eth_private *mdp = netdev_priv(ndev);
  252. /* reset device */
  253. sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
  254. mdelay(1);
  255. }
  256. static void sh_eth_reset(struct net_device *ndev)
  257. {
  258. int cnt = 100;
  259. sh_eth_write(ndev, EDSR_ENALL, EDSR);
  260. sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
  261. while (cnt > 0) {
  262. if (!(sh_eth_read(ndev, EDMR) & 0x3))
  263. break;
  264. mdelay(1);
  265. cnt--;
  266. }
  267. if (cnt == 0)
  268. printk(KERN_ERR "Device reset fail\n");
  269. /* Table Init */
  270. sh_eth_write(ndev, 0x0, TDLAR);
  271. sh_eth_write(ndev, 0x0, TDFAR);
  272. sh_eth_write(ndev, 0x0, TDFXR);
  273. sh_eth_write(ndev, 0x0, TDFFR);
  274. sh_eth_write(ndev, 0x0, RDLAR);
  275. sh_eth_write(ndev, 0x0, RDFAR);
  276. sh_eth_write(ndev, 0x0, RDFXR);
  277. sh_eth_write(ndev, 0x0, RDFFR);
  278. }
  279. static void sh_eth_set_duplex(struct net_device *ndev)
  280. {
  281. struct sh_eth_private *mdp = netdev_priv(ndev);
  282. if (mdp->duplex) /* Full */
  283. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
  284. else /* Half */
  285. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
  286. }
  287. static void sh_eth_set_rate(struct net_device *ndev)
  288. {
  289. struct sh_eth_private *mdp = netdev_priv(ndev);
  290. switch (mdp->speed) {
  291. case 10: /* 10BASE */
  292. sh_eth_write(ndev, GECMR_10, GECMR);
  293. break;
  294. case 100:/* 100BASE */
  295. sh_eth_write(ndev, GECMR_100, GECMR);
  296. break;
  297. case 1000: /* 1000BASE */
  298. sh_eth_write(ndev, GECMR_1000, GECMR);
  299. break;
  300. default:
  301. break;
  302. }
  303. }
  304. /* sh7763 */
  305. static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  306. .chip_reset = sh_eth_chip_reset,
  307. .set_duplex = sh_eth_set_duplex,
  308. .set_rate = sh_eth_set_rate,
  309. .ecsr_value = ECSR_ICD | ECSR_MPD,
  310. .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
  311. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
  312. .tx_check = EESR_TC1 | EESR_FTC,
  313. .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
  314. EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
  315. EESR_ECI,
  316. .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
  317. EESR_TFE,
  318. .apr = 1,
  319. .mpr = 1,
  320. .tpauser = 1,
  321. .bculr = 1,
  322. .hw_swap = 1,
  323. .no_trimd = 1,
  324. .no_ade = 1,
  325. .tsu = 1,
  326. };
  327. #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
  328. #define SH_ETH_RESET_DEFAULT 1
  329. static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  330. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
  331. .apr = 1,
  332. .mpr = 1,
  333. .tpauser = 1,
  334. .hw_swap = 1,
  335. };
  336. #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
  337. #define SH_ETH_RESET_DEFAULT 1
  338. #define SH_ETH_HAS_TSU 1
  339. static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  340. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
  341. .tsu = 1,
  342. };
  343. #endif
  344. static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
  345. {
  346. if (!cd->ecsr_value)
  347. cd->ecsr_value = DEFAULT_ECSR_INIT;
  348. if (!cd->ecsipr_value)
  349. cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
  350. if (!cd->fcftr_value)
  351. cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
  352. DEFAULT_FIFO_F_D_RFD;
  353. if (!cd->fdr_value)
  354. cd->fdr_value = DEFAULT_FDR_INIT;
  355. if (!cd->rmcr_value)
  356. cd->rmcr_value = DEFAULT_RMCR_VALUE;
  357. if (!cd->tx_check)
  358. cd->tx_check = DEFAULT_TX_CHECK;
  359. if (!cd->eesr_err_check)
  360. cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
  361. if (!cd->tx_error_check)
  362. cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
  363. }
  364. #if defined(SH_ETH_RESET_DEFAULT)
  365. /* Chip Reset */
  366. static void sh_eth_reset(struct net_device *ndev)
  367. {
  368. sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
  369. mdelay(3);
  370. sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
  371. }
  372. #endif
  373. #if defined(CONFIG_CPU_SH4)
  374. static void sh_eth_set_receive_align(struct sk_buff *skb)
  375. {
  376. int reserve;
  377. reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
  378. if (reserve)
  379. skb_reserve(skb, reserve);
  380. }
  381. #else
  382. static void sh_eth_set_receive_align(struct sk_buff *skb)
  383. {
  384. skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
  385. }
  386. #endif
  387. /* CPU <-> EDMAC endian convert */
  388. static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
  389. {
  390. switch (mdp->edmac_endian) {
  391. case EDMAC_LITTLE_ENDIAN:
  392. return cpu_to_le32(x);
  393. case EDMAC_BIG_ENDIAN:
  394. return cpu_to_be32(x);
  395. }
  396. return x;
  397. }
  398. static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
  399. {
  400. switch (mdp->edmac_endian) {
  401. case EDMAC_LITTLE_ENDIAN:
  402. return le32_to_cpu(x);
  403. case EDMAC_BIG_ENDIAN:
  404. return be32_to_cpu(x);
  405. }
  406. return x;
  407. }
  408. /*
  409. * Program the hardware MAC address from dev->dev_addr.
  410. */
  411. static void update_mac_address(struct net_device *ndev)
  412. {
  413. sh_eth_write(ndev,
  414. (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
  415. (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
  416. sh_eth_write(ndev,
  417. (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
  418. }
  419. /*
  420. * Get MAC address from SuperH MAC address register
  421. *
  422. * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
  423. * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
  424. * When you want use this device, you must set MAC address in bootloader.
  425. *
  426. */
  427. static void read_mac_address(struct net_device *ndev, unsigned char *mac)
  428. {
  429. if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
  430. memcpy(ndev->dev_addr, mac, 6);
  431. } else {
  432. ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
  433. ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
  434. ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
  435. ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
  436. ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
  437. ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
  438. }
  439. }
  440. static int sh_eth_is_gether(struct sh_eth_private *mdp)
  441. {
  442. if (mdp->reg_offset == sh_eth_offset_gigabit)
  443. return 1;
  444. else
  445. return 0;
  446. }
  447. static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
  448. {
  449. if (sh_eth_is_gether(mdp))
  450. return EDTRR_TRNS_GETHER;
  451. else
  452. return EDTRR_TRNS_ETHER;
  453. }
  454. struct bb_info {
  455. void (*set_gate)(void *addr);
  456. struct mdiobb_ctrl ctrl;
  457. void *addr;
  458. u32 mmd_msk;/* MMD */
  459. u32 mdo_msk;
  460. u32 mdi_msk;
  461. u32 mdc_msk;
  462. };
  463. /* PHY bit set */
  464. static void bb_set(void *addr, u32 msk)
  465. {
  466. iowrite32(ioread32(addr) | msk, addr);
  467. }
  468. /* PHY bit clear */
  469. static void bb_clr(void *addr, u32 msk)
  470. {
  471. iowrite32((ioread32(addr) & ~msk), addr);
  472. }
  473. /* PHY bit read */
  474. static int bb_read(void *addr, u32 msk)
  475. {
  476. return (ioread32(addr) & msk) != 0;
  477. }
  478. /* Data I/O pin control */
  479. static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
  480. {
  481. struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
  482. if (bitbang->set_gate)
  483. bitbang->set_gate(bitbang->addr);
  484. if (bit)
  485. bb_set(bitbang->addr, bitbang->mmd_msk);
  486. else
  487. bb_clr(bitbang->addr, bitbang->mmd_msk);
  488. }
  489. /* Set bit data*/
  490. static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
  491. {
  492. struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
  493. if (bitbang->set_gate)
  494. bitbang->set_gate(bitbang->addr);
  495. if (bit)
  496. bb_set(bitbang->addr, bitbang->mdo_msk);
  497. else
  498. bb_clr(bitbang->addr, bitbang->mdo_msk);
  499. }
  500. /* Get bit data*/
  501. static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
  502. {
  503. struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
  504. if (bitbang->set_gate)
  505. bitbang->set_gate(bitbang->addr);
  506. return bb_read(bitbang->addr, bitbang->mdi_msk);
  507. }
  508. /* MDC pin control */
  509. static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
  510. {
  511. struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
  512. if (bitbang->set_gate)
  513. bitbang->set_gate(bitbang->addr);
  514. if (bit)
  515. bb_set(bitbang->addr, bitbang->mdc_msk);
  516. else
  517. bb_clr(bitbang->addr, bitbang->mdc_msk);
  518. }
  519. /* mdio bus control struct */
  520. static struct mdiobb_ops bb_ops = {
  521. .owner = THIS_MODULE,
  522. .set_mdc = sh_mdc_ctrl,
  523. .set_mdio_dir = sh_mmd_ctrl,
  524. .set_mdio_data = sh_set_mdio,
  525. .get_mdio_data = sh_get_mdio,
  526. };
  527. /* free skb and descriptor buffer */
  528. static void sh_eth_ring_free(struct net_device *ndev)
  529. {
  530. struct sh_eth_private *mdp = netdev_priv(ndev);
  531. int i;
  532. /* Free Rx skb ringbuffer */
  533. if (mdp->rx_skbuff) {
  534. for (i = 0; i < RX_RING_SIZE; i++) {
  535. if (mdp->rx_skbuff[i])
  536. dev_kfree_skb(mdp->rx_skbuff[i]);
  537. }
  538. }
  539. kfree(mdp->rx_skbuff);
  540. /* Free Tx skb ringbuffer */
  541. if (mdp->tx_skbuff) {
  542. for (i = 0; i < TX_RING_SIZE; i++) {
  543. if (mdp->tx_skbuff[i])
  544. dev_kfree_skb(mdp->tx_skbuff[i]);
  545. }
  546. }
  547. kfree(mdp->tx_skbuff);
  548. }
  549. /* format skb and descriptor buffer */
  550. static void sh_eth_ring_format(struct net_device *ndev)
  551. {
  552. struct sh_eth_private *mdp = netdev_priv(ndev);
  553. int i;
  554. struct sk_buff *skb;
  555. struct sh_eth_rxdesc *rxdesc = NULL;
  556. struct sh_eth_txdesc *txdesc = NULL;
  557. int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
  558. int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
  559. mdp->cur_rx = mdp->cur_tx = 0;
  560. mdp->dirty_rx = mdp->dirty_tx = 0;
  561. memset(mdp->rx_ring, 0, rx_ringsize);
  562. /* build Rx ring buffer */
  563. for (i = 0; i < RX_RING_SIZE; i++) {
  564. /* skb */
  565. mdp->rx_skbuff[i] = NULL;
  566. skb = dev_alloc_skb(mdp->rx_buf_sz);
  567. mdp->rx_skbuff[i] = skb;
  568. if (skb == NULL)
  569. break;
  570. dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
  571. DMA_FROM_DEVICE);
  572. skb->dev = ndev; /* Mark as being used by this device. */
  573. sh_eth_set_receive_align(skb);
  574. /* RX descriptor */
  575. rxdesc = &mdp->rx_ring[i];
  576. rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
  577. rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
  578. /* The size of the buffer is 16 byte boundary. */
  579. rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
  580. /* Rx descriptor address set */
  581. if (i == 0) {
  582. sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
  583. if (sh_eth_is_gether(mdp))
  584. sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
  585. }
  586. }
  587. mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
  588. /* Mark the last entry as wrapping the ring. */
  589. rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
  590. memset(mdp->tx_ring, 0, tx_ringsize);
  591. /* build Tx ring buffer */
  592. for (i = 0; i < TX_RING_SIZE; i++) {
  593. mdp->tx_skbuff[i] = NULL;
  594. txdesc = &mdp->tx_ring[i];
  595. txdesc->status = cpu_to_edmac(mdp, TD_TFP);
  596. txdesc->buffer_length = 0;
  597. if (i == 0) {
  598. /* Tx descriptor address set */
  599. sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
  600. if (sh_eth_is_gether(mdp))
  601. sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
  602. }
  603. }
  604. txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
  605. }
  606. /* Get skb and descriptor buffer */
  607. static int sh_eth_ring_init(struct net_device *ndev)
  608. {
  609. struct sh_eth_private *mdp = netdev_priv(ndev);
  610. int rx_ringsize, tx_ringsize, ret = 0;
  611. /*
  612. * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
  613. * card needs room to do 8 byte alignment, +2 so we can reserve
  614. * the first 2 bytes, and +16 gets room for the status word from the
  615. * card.
  616. */
  617. mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
  618. (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
  619. if (mdp->cd->rpadir)
  620. mdp->rx_buf_sz += NET_IP_ALIGN;
  621. /* Allocate RX and TX skb rings */
  622. mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
  623. GFP_KERNEL);
  624. if (!mdp->rx_skbuff) {
  625. dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
  626. ret = -ENOMEM;
  627. return ret;
  628. }
  629. mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
  630. GFP_KERNEL);
  631. if (!mdp->tx_skbuff) {
  632. dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
  633. ret = -ENOMEM;
  634. goto skb_ring_free;
  635. }
  636. /* Allocate all Rx descriptors. */
  637. rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
  638. mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
  639. GFP_KERNEL);
  640. if (!mdp->rx_ring) {
  641. dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
  642. rx_ringsize);
  643. ret = -ENOMEM;
  644. goto desc_ring_free;
  645. }
  646. mdp->dirty_rx = 0;
  647. /* Allocate all Tx descriptors. */
  648. tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
  649. mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
  650. GFP_KERNEL);
  651. if (!mdp->tx_ring) {
  652. dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
  653. tx_ringsize);
  654. ret = -ENOMEM;
  655. goto desc_ring_free;
  656. }
  657. return ret;
  658. desc_ring_free:
  659. /* free DMA buffer */
  660. dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
  661. skb_ring_free:
  662. /* Free Rx and Tx skb ring buffer */
  663. sh_eth_ring_free(ndev);
  664. return ret;
  665. }
  666. static int sh_eth_dev_init(struct net_device *ndev)
  667. {
  668. int ret = 0;
  669. struct sh_eth_private *mdp = netdev_priv(ndev);
  670. u_int32_t rx_int_var, tx_int_var;
  671. u32 val;
  672. /* Soft Reset */
  673. sh_eth_reset(ndev);
  674. /* Descriptor format */
  675. sh_eth_ring_format(ndev);
  676. if (mdp->cd->rpadir)
  677. sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
  678. /* all sh_eth int mask */
  679. sh_eth_write(ndev, 0, EESIPR);
  680. #if defined(__LITTLE_ENDIAN__)
  681. if (mdp->cd->hw_swap)
  682. sh_eth_write(ndev, EDMR_EL, EDMR);
  683. else
  684. #endif
  685. sh_eth_write(ndev, 0, EDMR);
  686. /* FIFO size set */
  687. sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
  688. sh_eth_write(ndev, 0, TFTR);
  689. /* Frame recv control */
  690. sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
  691. rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
  692. tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
  693. sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
  694. if (mdp->cd->bculr)
  695. sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
  696. sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
  697. if (!mdp->cd->no_trimd)
  698. sh_eth_write(ndev, 0, TRIMD);
  699. /* Recv frame limit set register */
  700. sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
  701. RFLR);
  702. sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
  703. sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
  704. /* PAUSE Prohibition */
  705. val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
  706. ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
  707. sh_eth_write(ndev, val, ECMR);
  708. if (mdp->cd->set_rate)
  709. mdp->cd->set_rate(ndev);
  710. /* E-MAC Status Register clear */
  711. sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
  712. /* E-MAC Interrupt Enable register */
  713. sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
  714. /* Set MAC address */
  715. update_mac_address(ndev);
  716. /* mask reset */
  717. if (mdp->cd->apr)
  718. sh_eth_write(ndev, APR_AP, APR);
  719. if (mdp->cd->mpr)
  720. sh_eth_write(ndev, MPR_MP, MPR);
  721. if (mdp->cd->tpauser)
  722. sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
  723. /* Setting the Rx mode will start the Rx process. */
  724. sh_eth_write(ndev, EDRRR_R, EDRRR);
  725. netif_start_queue(ndev);
  726. return ret;
  727. }
  728. /* free Tx skb function */
  729. static int sh_eth_txfree(struct net_device *ndev)
  730. {
  731. struct sh_eth_private *mdp = netdev_priv(ndev);
  732. struct sh_eth_txdesc *txdesc;
  733. int freeNum = 0;
  734. int entry = 0;
  735. for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
  736. entry = mdp->dirty_tx % TX_RING_SIZE;
  737. txdesc = &mdp->tx_ring[entry];
  738. if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
  739. break;
  740. /* Free the original skb. */
  741. if (mdp->tx_skbuff[entry]) {
  742. dma_unmap_single(&ndev->dev, txdesc->addr,
  743. txdesc->buffer_length, DMA_TO_DEVICE);
  744. dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
  745. mdp->tx_skbuff[entry] = NULL;
  746. freeNum++;
  747. }
  748. txdesc->status = cpu_to_edmac(mdp, TD_TFP);
  749. if (entry >= TX_RING_SIZE - 1)
  750. txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
  751. mdp->stats.tx_packets++;
  752. mdp->stats.tx_bytes += txdesc->buffer_length;
  753. }
  754. return freeNum;
  755. }
  756. /* Packet receive function */
  757. static int sh_eth_rx(struct net_device *ndev)
  758. {
  759. struct sh_eth_private *mdp = netdev_priv(ndev);
  760. struct sh_eth_rxdesc *rxdesc;
  761. int entry = mdp->cur_rx % RX_RING_SIZE;
  762. int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
  763. struct sk_buff *skb;
  764. u16 pkt_len = 0;
  765. u32 desc_status;
  766. rxdesc = &mdp->rx_ring[entry];
  767. while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
  768. desc_status = edmac_to_cpu(mdp, rxdesc->status);
  769. pkt_len = rxdesc->frame_length;
  770. if (--boguscnt < 0)
  771. break;
  772. if (!(desc_status & RDFEND))
  773. mdp->stats.rx_length_errors++;
  774. if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
  775. RD_RFS5 | RD_RFS6 | RD_RFS10)) {
  776. mdp->stats.rx_errors++;
  777. if (desc_status & RD_RFS1)
  778. mdp->stats.rx_crc_errors++;
  779. if (desc_status & RD_RFS2)
  780. mdp->stats.rx_frame_errors++;
  781. if (desc_status & RD_RFS3)
  782. mdp->stats.rx_length_errors++;
  783. if (desc_status & RD_RFS4)
  784. mdp->stats.rx_length_errors++;
  785. if (desc_status & RD_RFS6)
  786. mdp->stats.rx_missed_errors++;
  787. if (desc_status & RD_RFS10)
  788. mdp->stats.rx_over_errors++;
  789. } else {
  790. if (!mdp->cd->hw_swap)
  791. sh_eth_soft_swap(
  792. phys_to_virt(ALIGN(rxdesc->addr, 4)),
  793. pkt_len + 2);
  794. skb = mdp->rx_skbuff[entry];
  795. mdp->rx_skbuff[entry] = NULL;
  796. if (mdp->cd->rpadir)
  797. skb_reserve(skb, NET_IP_ALIGN);
  798. skb_put(skb, pkt_len);
  799. skb->protocol = eth_type_trans(skb, ndev);
  800. netif_rx(skb);
  801. mdp->stats.rx_packets++;
  802. mdp->stats.rx_bytes += pkt_len;
  803. }
  804. rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
  805. entry = (++mdp->cur_rx) % RX_RING_SIZE;
  806. rxdesc = &mdp->rx_ring[entry];
  807. }
  808. /* Refill the Rx ring buffers. */
  809. for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
  810. entry = mdp->dirty_rx % RX_RING_SIZE;
  811. rxdesc = &mdp->rx_ring[entry];
  812. /* The size of the buffer is 16 byte boundary. */
  813. rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
  814. if (mdp->rx_skbuff[entry] == NULL) {
  815. skb = dev_alloc_skb(mdp->rx_buf_sz);
  816. mdp->rx_skbuff[entry] = skb;
  817. if (skb == NULL)
  818. break; /* Better luck next round. */
  819. dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
  820. DMA_FROM_DEVICE);
  821. skb->dev = ndev;
  822. sh_eth_set_receive_align(skb);
  823. skb_checksum_none_assert(skb);
  824. rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
  825. }
  826. if (entry >= RX_RING_SIZE - 1)
  827. rxdesc->status |=
  828. cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
  829. else
  830. rxdesc->status |=
  831. cpu_to_edmac(mdp, RD_RACT | RD_RFP);
  832. }
  833. /* Restart Rx engine if stopped. */
  834. /* If we don't need to check status, don't. -KDU */
  835. if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
  836. sh_eth_write(ndev, EDRRR_R, EDRRR);
  837. return 0;
  838. }
  839. static void sh_eth_rcv_snd_disable(struct net_device *ndev)
  840. {
  841. /* disable tx and rx */
  842. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
  843. ~(ECMR_RE | ECMR_TE), ECMR);
  844. }
  845. static void sh_eth_rcv_snd_enable(struct net_device *ndev)
  846. {
  847. /* enable tx and rx */
  848. sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
  849. (ECMR_RE | ECMR_TE), ECMR);
  850. }
  851. /* error control function */
  852. static void sh_eth_error(struct net_device *ndev, int intr_status)
  853. {
  854. struct sh_eth_private *mdp = netdev_priv(ndev);
  855. u32 felic_stat;
  856. u32 link_stat;
  857. u32 mask;
  858. if (intr_status & EESR_ECI) {
  859. felic_stat = sh_eth_read(ndev, ECSR);
  860. sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
  861. if (felic_stat & ECSR_ICD)
  862. mdp->stats.tx_carrier_errors++;
  863. if (felic_stat & ECSR_LCHNG) {
  864. /* Link Changed */
  865. if (mdp->cd->no_psr || mdp->no_ether_link) {
  866. if (mdp->link == PHY_DOWN)
  867. link_stat = 0;
  868. else
  869. link_stat = PHY_ST_LINK;
  870. } else {
  871. link_stat = (sh_eth_read(ndev, PSR));
  872. if (mdp->ether_link_active_low)
  873. link_stat = ~link_stat;
  874. }
  875. if (!(link_stat & PHY_ST_LINK))
  876. sh_eth_rcv_snd_disable(ndev);
  877. else {
  878. /* Link Up */
  879. sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
  880. ~DMAC_M_ECI, EESIPR);
  881. /*clear int */
  882. sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
  883. ECSR);
  884. sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
  885. DMAC_M_ECI, EESIPR);
  886. /* enable tx and rx */
  887. sh_eth_rcv_snd_enable(ndev);
  888. }
  889. }
  890. }
  891. if (intr_status & EESR_TWB) {
  892. /* Write buck end. unused write back interrupt */
  893. if (intr_status & EESR_TABT) /* Transmit Abort int */
  894. mdp->stats.tx_aborted_errors++;
  895. if (netif_msg_tx_err(mdp))
  896. dev_err(&ndev->dev, "Transmit Abort\n");
  897. }
  898. if (intr_status & EESR_RABT) {
  899. /* Receive Abort int */
  900. if (intr_status & EESR_RFRMER) {
  901. /* Receive Frame Overflow int */
  902. mdp->stats.rx_frame_errors++;
  903. if (netif_msg_rx_err(mdp))
  904. dev_err(&ndev->dev, "Receive Abort\n");
  905. }
  906. }
  907. if (intr_status & EESR_TDE) {
  908. /* Transmit Descriptor Empty int */
  909. mdp->stats.tx_fifo_errors++;
  910. if (netif_msg_tx_err(mdp))
  911. dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
  912. }
  913. if (intr_status & EESR_TFE) {
  914. /* FIFO under flow */
  915. mdp->stats.tx_fifo_errors++;
  916. if (netif_msg_tx_err(mdp))
  917. dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
  918. }
  919. if (intr_status & EESR_RDE) {
  920. /* Receive Descriptor Empty int */
  921. mdp->stats.rx_over_errors++;
  922. if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
  923. sh_eth_write(ndev, EDRRR_R, EDRRR);
  924. if (netif_msg_rx_err(mdp))
  925. dev_err(&ndev->dev, "Receive Descriptor Empty\n");
  926. }
  927. if (intr_status & EESR_RFE) {
  928. /* Receive FIFO Overflow int */
  929. mdp->stats.rx_fifo_errors++;
  930. if (netif_msg_rx_err(mdp))
  931. dev_err(&ndev->dev, "Receive FIFO Overflow\n");
  932. }
  933. if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
  934. /* Address Error */
  935. mdp->stats.tx_fifo_errors++;
  936. if (netif_msg_tx_err(mdp))
  937. dev_err(&ndev->dev, "Address Error\n");
  938. }
  939. mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
  940. if (mdp->cd->no_ade)
  941. mask &= ~EESR_ADE;
  942. if (intr_status & mask) {
  943. /* Tx error */
  944. u32 edtrr = sh_eth_read(ndev, EDTRR);
  945. /* dmesg */
  946. dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
  947. intr_status, mdp->cur_tx);
  948. dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
  949. mdp->dirty_tx, (u32) ndev->state, edtrr);
  950. /* dirty buffer free */
  951. sh_eth_txfree(ndev);
  952. /* SH7712 BUG */
  953. if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
  954. /* tx dma start */
  955. sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
  956. }
  957. /* wakeup */
  958. netif_wake_queue(ndev);
  959. }
  960. }
  961. static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
  962. {
  963. struct net_device *ndev = netdev;
  964. struct sh_eth_private *mdp = netdev_priv(ndev);
  965. struct sh_eth_cpu_data *cd = mdp->cd;
  966. irqreturn_t ret = IRQ_NONE;
  967. u32 intr_status = 0;
  968. spin_lock(&mdp->lock);
  969. /* Get interrpt stat */
  970. intr_status = sh_eth_read(ndev, EESR);
  971. /* Clear interrupt */
  972. if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
  973. EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
  974. cd->tx_check | cd->eesr_err_check)) {
  975. sh_eth_write(ndev, intr_status, EESR);
  976. ret = IRQ_HANDLED;
  977. } else
  978. goto other_irq;
  979. if (intr_status & (EESR_FRC | /* Frame recv*/
  980. EESR_RMAF | /* Multi cast address recv*/
  981. EESR_RRF | /* Bit frame recv */
  982. EESR_RTLF | /* Long frame recv*/
  983. EESR_RTSF | /* short frame recv */
  984. EESR_PRE | /* PHY-LSI recv error */
  985. EESR_CERF)){ /* recv frame CRC error */
  986. sh_eth_rx(ndev);
  987. }
  988. /* Tx Check */
  989. if (intr_status & cd->tx_check) {
  990. sh_eth_txfree(ndev);
  991. netif_wake_queue(ndev);
  992. }
  993. if (intr_status & cd->eesr_err_check)
  994. sh_eth_error(ndev, intr_status);
  995. other_irq:
  996. spin_unlock(&mdp->lock);
  997. return ret;
  998. }
  999. static void sh_eth_timer(unsigned long data)
  1000. {
  1001. struct net_device *ndev = (struct net_device *)data;
  1002. struct sh_eth_private *mdp = netdev_priv(ndev);
  1003. mod_timer(&mdp->timer, jiffies + (10 * HZ));
  1004. }
  1005. /* PHY state control function */
  1006. static void sh_eth_adjust_link(struct net_device *ndev)
  1007. {
  1008. struct sh_eth_private *mdp = netdev_priv(ndev);
  1009. struct phy_device *phydev = mdp->phydev;
  1010. int new_state = 0;
  1011. if (phydev->link != PHY_DOWN) {
  1012. if (phydev->duplex != mdp->duplex) {
  1013. new_state = 1;
  1014. mdp->duplex = phydev->duplex;
  1015. if (mdp->cd->set_duplex)
  1016. mdp->cd->set_duplex(ndev);
  1017. }
  1018. if (phydev->speed != mdp->speed) {
  1019. new_state = 1;
  1020. mdp->speed = phydev->speed;
  1021. if (mdp->cd->set_rate)
  1022. mdp->cd->set_rate(ndev);
  1023. }
  1024. if (mdp->link == PHY_DOWN) {
  1025. sh_eth_write(ndev,
  1026. (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
  1027. new_state = 1;
  1028. mdp->link = phydev->link;
  1029. }
  1030. } else if (mdp->link) {
  1031. new_state = 1;
  1032. mdp->link = PHY_DOWN;
  1033. mdp->speed = 0;
  1034. mdp->duplex = -1;
  1035. }
  1036. if (new_state && netif_msg_link(mdp))
  1037. phy_print_status(phydev);
  1038. }
  1039. /* PHY init function */
  1040. static int sh_eth_phy_init(struct net_device *ndev)
  1041. {
  1042. struct sh_eth_private *mdp = netdev_priv(ndev);
  1043. char phy_id[MII_BUS_ID_SIZE + 3];
  1044. struct phy_device *phydev = NULL;
  1045. snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
  1046. mdp->mii_bus->id , mdp->phy_id);
  1047. mdp->link = PHY_DOWN;
  1048. mdp->speed = 0;
  1049. mdp->duplex = -1;
  1050. /* Try connect to PHY */
  1051. phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
  1052. 0, mdp->phy_interface);
  1053. if (IS_ERR(phydev)) {
  1054. dev_err(&ndev->dev, "phy_connect failed\n");
  1055. return PTR_ERR(phydev);
  1056. }
  1057. dev_info(&ndev->dev, "attached phy %i to driver %s\n",
  1058. phydev->addr, phydev->drv->name);
  1059. mdp->phydev = phydev;
  1060. return 0;
  1061. }
  1062. /* PHY control start function */
  1063. static int sh_eth_phy_start(struct net_device *ndev)
  1064. {
  1065. struct sh_eth_private *mdp = netdev_priv(ndev);
  1066. int ret;
  1067. ret = sh_eth_phy_init(ndev);
  1068. if (ret)
  1069. return ret;
  1070. /* reset phy - this also wakes it from PDOWN */
  1071. phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
  1072. phy_start(mdp->phydev);
  1073. return 0;
  1074. }
  1075. static int sh_eth_get_settings(struct net_device *ndev,
  1076. struct ethtool_cmd *ecmd)
  1077. {
  1078. struct sh_eth_private *mdp = netdev_priv(ndev);
  1079. unsigned long flags;
  1080. int ret;
  1081. spin_lock_irqsave(&mdp->lock, flags);
  1082. ret = phy_ethtool_gset(mdp->phydev, ecmd);
  1083. spin_unlock_irqrestore(&mdp->lock, flags);
  1084. return ret;
  1085. }
  1086. static int sh_eth_set_settings(struct net_device *ndev,
  1087. struct ethtool_cmd *ecmd)
  1088. {
  1089. struct sh_eth_private *mdp = netdev_priv(ndev);
  1090. unsigned long flags;
  1091. int ret;
  1092. spin_lock_irqsave(&mdp->lock, flags);
  1093. /* disable tx and rx */
  1094. sh_eth_rcv_snd_disable(ndev);
  1095. ret = phy_ethtool_sset(mdp->phydev, ecmd);
  1096. if (ret)
  1097. goto error_exit;
  1098. if (ecmd->duplex == DUPLEX_FULL)
  1099. mdp->duplex = 1;
  1100. else
  1101. mdp->duplex = 0;
  1102. if (mdp->cd->set_duplex)
  1103. mdp->cd->set_duplex(ndev);
  1104. error_exit:
  1105. mdelay(1);
  1106. /* enable tx and rx */
  1107. sh_eth_rcv_snd_enable(ndev);
  1108. spin_unlock_irqrestore(&mdp->lock, flags);
  1109. return ret;
  1110. }
  1111. static int sh_eth_nway_reset(struct net_device *ndev)
  1112. {
  1113. struct sh_eth_private *mdp = netdev_priv(ndev);
  1114. unsigned long flags;
  1115. int ret;
  1116. spin_lock_irqsave(&mdp->lock, flags);
  1117. ret = phy_start_aneg(mdp->phydev);
  1118. spin_unlock_irqrestore(&mdp->lock, flags);
  1119. return ret;
  1120. }
  1121. static u32 sh_eth_get_msglevel(struct net_device *ndev)
  1122. {
  1123. struct sh_eth_private *mdp = netdev_priv(ndev);
  1124. return mdp->msg_enable;
  1125. }
  1126. static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
  1127. {
  1128. struct sh_eth_private *mdp = netdev_priv(ndev);
  1129. mdp->msg_enable = value;
  1130. }
  1131. static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
  1132. "rx_current", "tx_current",
  1133. "rx_dirty", "tx_dirty",
  1134. };
  1135. #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
  1136. static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
  1137. {
  1138. switch (sset) {
  1139. case ETH_SS_STATS:
  1140. return SH_ETH_STATS_LEN;
  1141. default:
  1142. return -EOPNOTSUPP;
  1143. }
  1144. }
  1145. static void sh_eth_get_ethtool_stats(struct net_device *ndev,
  1146. struct ethtool_stats *stats, u64 *data)
  1147. {
  1148. struct sh_eth_private *mdp = netdev_priv(ndev);
  1149. int i = 0;
  1150. /* device-specific stats */
  1151. data[i++] = mdp->cur_rx;
  1152. data[i++] = mdp->cur_tx;
  1153. data[i++] = mdp->dirty_rx;
  1154. data[i++] = mdp->dirty_tx;
  1155. }
  1156. static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
  1157. {
  1158. switch (stringset) {
  1159. case ETH_SS_STATS:
  1160. memcpy(data, *sh_eth_gstrings_stats,
  1161. sizeof(sh_eth_gstrings_stats));
  1162. break;
  1163. }
  1164. }
  1165. static const struct ethtool_ops sh_eth_ethtool_ops = {
  1166. .get_settings = sh_eth_get_settings,
  1167. .set_settings = sh_eth_set_settings,
  1168. .nway_reset = sh_eth_nway_reset,
  1169. .get_msglevel = sh_eth_get_msglevel,
  1170. .set_msglevel = sh_eth_set_msglevel,
  1171. .get_link = ethtool_op_get_link,
  1172. .get_strings = sh_eth_get_strings,
  1173. .get_ethtool_stats = sh_eth_get_ethtool_stats,
  1174. .get_sset_count = sh_eth_get_sset_count,
  1175. };
  1176. /* network device open function */
  1177. static int sh_eth_open(struct net_device *ndev)
  1178. {
  1179. int ret = 0;
  1180. struct sh_eth_private *mdp = netdev_priv(ndev);
  1181. pm_runtime_get_sync(&mdp->pdev->dev);
  1182. ret = request_irq(ndev->irq, sh_eth_interrupt,
  1183. #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
  1184. defined(CONFIG_CPU_SUBTYPE_SH7764) || \
  1185. defined(CONFIG_CPU_SUBTYPE_SH7757)
  1186. IRQF_SHARED,
  1187. #else
  1188. 0,
  1189. #endif
  1190. ndev->name, ndev);
  1191. if (ret) {
  1192. dev_err(&ndev->dev, "Can not assign IRQ number\n");
  1193. return ret;
  1194. }
  1195. /* Descriptor set */
  1196. ret = sh_eth_ring_init(ndev);
  1197. if (ret)
  1198. goto out_free_irq;
  1199. /* device init */
  1200. ret = sh_eth_dev_init(ndev);
  1201. if (ret)
  1202. goto out_free_irq;
  1203. /* PHY control start*/
  1204. ret = sh_eth_phy_start(ndev);
  1205. if (ret)
  1206. goto out_free_irq;
  1207. /* Set the timer to check for link beat. */
  1208. init_timer(&mdp->timer);
  1209. mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
  1210. setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
  1211. return ret;
  1212. out_free_irq:
  1213. free_irq(ndev->irq, ndev);
  1214. pm_runtime_put_sync(&mdp->pdev->dev);
  1215. return ret;
  1216. }
  1217. /* Timeout function */
  1218. static void sh_eth_tx_timeout(struct net_device *ndev)
  1219. {
  1220. struct sh_eth_private *mdp = netdev_priv(ndev);
  1221. struct sh_eth_rxdesc *rxdesc;
  1222. int i;
  1223. netif_stop_queue(ndev);
  1224. if (netif_msg_timer(mdp))
  1225. dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
  1226. " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
  1227. /* tx_errors count up */
  1228. mdp->stats.tx_errors++;
  1229. /* timer off */
  1230. del_timer_sync(&mdp->timer);
  1231. /* Free all the skbuffs in the Rx queue. */
  1232. for (i = 0; i < RX_RING_SIZE; i++) {
  1233. rxdesc = &mdp->rx_ring[i];
  1234. rxdesc->status = 0;
  1235. rxdesc->addr = 0xBADF00D0;
  1236. if (mdp->rx_skbuff[i])
  1237. dev_kfree_skb(mdp->rx_skbuff[i]);
  1238. mdp->rx_skbuff[i] = NULL;
  1239. }
  1240. for (i = 0; i < TX_RING_SIZE; i++) {
  1241. if (mdp->tx_skbuff[i])
  1242. dev_kfree_skb(mdp->tx_skbuff[i]);
  1243. mdp->tx_skbuff[i] = NULL;
  1244. }
  1245. /* device init */
  1246. sh_eth_dev_init(ndev);
  1247. /* timer on */
  1248. mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
  1249. add_timer(&mdp->timer);
  1250. }
  1251. /* Packet transmit function */
  1252. static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  1253. {
  1254. struct sh_eth_private *mdp = netdev_priv(ndev);
  1255. struct sh_eth_txdesc *txdesc;
  1256. u32 entry;
  1257. unsigned long flags;
  1258. spin_lock_irqsave(&mdp->lock, flags);
  1259. if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
  1260. if (!sh_eth_txfree(ndev)) {
  1261. if (netif_msg_tx_queued(mdp))
  1262. dev_warn(&ndev->dev, "TxFD exhausted.\n");
  1263. netif_stop_queue(ndev);
  1264. spin_unlock_irqrestore(&mdp->lock, flags);
  1265. return NETDEV_TX_BUSY;
  1266. }
  1267. }
  1268. spin_unlock_irqrestore(&mdp->lock, flags);
  1269. entry = mdp->cur_tx % TX_RING_SIZE;
  1270. mdp->tx_skbuff[entry] = skb;
  1271. txdesc = &mdp->tx_ring[entry];
  1272. /* soft swap. */
  1273. if (!mdp->cd->hw_swap)
  1274. sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
  1275. skb->len + 2);
  1276. txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
  1277. DMA_TO_DEVICE);
  1278. if (skb->len < ETHERSMALL)
  1279. txdesc->buffer_length = ETHERSMALL;
  1280. else
  1281. txdesc->buffer_length = skb->len;
  1282. if (entry >= TX_RING_SIZE - 1)
  1283. txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
  1284. else
  1285. txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
  1286. mdp->cur_tx++;
  1287. if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
  1288. sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
  1289. return NETDEV_TX_OK;
  1290. }
  1291. /* device close function */
  1292. static int sh_eth_close(struct net_device *ndev)
  1293. {
  1294. struct sh_eth_private *mdp = netdev_priv(ndev);
  1295. int ringsize;
  1296. netif_stop_queue(ndev);
  1297. /* Disable interrupts by clearing the interrupt mask. */
  1298. sh_eth_write(ndev, 0x0000, EESIPR);
  1299. /* Stop the chip's Tx and Rx processes. */
  1300. sh_eth_write(ndev, 0, EDTRR);
  1301. sh_eth_write(ndev, 0, EDRRR);
  1302. /* PHY Disconnect */
  1303. if (mdp->phydev) {
  1304. phy_stop(mdp->phydev);
  1305. phy_disconnect(mdp->phydev);
  1306. }
  1307. free_irq(ndev->irq, ndev);
  1308. del_timer_sync(&mdp->timer);
  1309. /* Free all the skbuffs in the Rx queue. */
  1310. sh_eth_ring_free(ndev);
  1311. /* free DMA buffer */
  1312. ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
  1313. dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
  1314. /* free DMA buffer */
  1315. ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
  1316. dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
  1317. pm_runtime_put_sync(&mdp->pdev->dev);
  1318. return 0;
  1319. }
  1320. static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
  1321. {
  1322. struct sh_eth_private *mdp = netdev_priv(ndev);
  1323. pm_runtime_get_sync(&mdp->pdev->dev);
  1324. mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
  1325. sh_eth_write(ndev, 0, TROCR); /* (write clear) */
  1326. mdp->stats.collisions += sh_eth_read(ndev, CDCR);
  1327. sh_eth_write(ndev, 0, CDCR); /* (write clear) */
  1328. mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
  1329. sh_eth_write(ndev, 0, LCCR); /* (write clear) */
  1330. if (sh_eth_is_gether(mdp)) {
  1331. mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
  1332. sh_eth_write(ndev, 0, CERCR); /* (write clear) */
  1333. mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
  1334. sh_eth_write(ndev, 0, CEECR); /* (write clear) */
  1335. } else {
  1336. mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
  1337. sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
  1338. }
  1339. pm_runtime_put_sync(&mdp->pdev->dev);
  1340. return &mdp->stats;
  1341. }
  1342. /* ioctl to device funciotn*/
  1343. static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
  1344. int cmd)
  1345. {
  1346. struct sh_eth_private *mdp = netdev_priv(ndev);
  1347. struct phy_device *phydev = mdp->phydev;
  1348. if (!netif_running(ndev))
  1349. return -EINVAL;
  1350. if (!phydev)
  1351. return -ENODEV;
  1352. return phy_mii_ioctl(phydev, rq, cmd);
  1353. }
  1354. #if defined(SH_ETH_HAS_TSU)
  1355. /* Multicast reception directions set */
  1356. static void sh_eth_set_multicast_list(struct net_device *ndev)
  1357. {
  1358. if (ndev->flags & IFF_PROMISC) {
  1359. /* Set promiscuous. */
  1360. sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
  1361. ECMR_PRM, ECMR);
  1362. } else {
  1363. /* Normal, unicast/broadcast-only mode. */
  1364. sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
  1365. ECMR_MCT, ECMR);
  1366. }
  1367. }
  1368. #endif /* SH_ETH_HAS_TSU */
  1369. /* SuperH's TSU register init function */
  1370. static void sh_eth_tsu_init(struct sh_eth_private *mdp)
  1371. {
  1372. sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
  1373. sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
  1374. sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
  1375. sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
  1376. sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
  1377. sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
  1378. sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
  1379. sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
  1380. sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
  1381. sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
  1382. if (sh_eth_is_gether(mdp)) {
  1383. sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
  1384. sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
  1385. } else {
  1386. sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
  1387. sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
  1388. }
  1389. sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
  1390. sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
  1391. sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
  1392. sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
  1393. sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
  1394. sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
  1395. sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
  1396. }
  1397. /* MDIO bus release function */
  1398. static int sh_mdio_release(struct net_device *ndev)
  1399. {
  1400. struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
  1401. /* unregister mdio bus */
  1402. mdiobus_unregister(bus);
  1403. /* remove mdio bus info from net_device */
  1404. dev_set_drvdata(&ndev->dev, NULL);
  1405. /* free interrupts memory */
  1406. kfree(bus->irq);
  1407. /* free bitbang info */
  1408. free_mdio_bitbang(bus);
  1409. return 0;
  1410. }
  1411. /* MDIO bus init function */
  1412. static int sh_mdio_init(struct net_device *ndev, int id,
  1413. struct sh_eth_plat_data *pd)
  1414. {
  1415. int ret, i;
  1416. struct bb_info *bitbang;
  1417. struct sh_eth_private *mdp = netdev_priv(ndev);
  1418. /* create bit control struct for PHY */
  1419. bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
  1420. if (!bitbang) {
  1421. ret = -ENOMEM;
  1422. goto out;
  1423. }
  1424. /* bitbang init */
  1425. bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
  1426. bitbang->set_gate = pd->set_mdio_gate;
  1427. bitbang->mdi_msk = 0x08;
  1428. bitbang->mdo_msk = 0x04;
  1429. bitbang->mmd_msk = 0x02;/* MMD */
  1430. bitbang->mdc_msk = 0x01;
  1431. bitbang->ctrl.ops = &bb_ops;
  1432. /* MII controller setting */
  1433. mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
  1434. if (!mdp->mii_bus) {
  1435. ret = -ENOMEM;
  1436. goto out_free_bitbang;
  1437. }
  1438. /* Hook up MII support for ethtool */
  1439. mdp->mii_bus->name = "sh_mii";
  1440. mdp->mii_bus->parent = &ndev->dev;
  1441. snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  1442. mdp->pdev->name, id);
  1443. /* PHY IRQ */
  1444. mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
  1445. if (!mdp->mii_bus->irq) {
  1446. ret = -ENOMEM;
  1447. goto out_free_bus;
  1448. }
  1449. for (i = 0; i < PHY_MAX_ADDR; i++)
  1450. mdp->mii_bus->irq[i] = PHY_POLL;
  1451. /* regist mdio bus */
  1452. ret = mdiobus_register(mdp->mii_bus);
  1453. if (ret)
  1454. goto out_free_irq;
  1455. dev_set_drvdata(&ndev->dev, mdp->mii_bus);
  1456. return 0;
  1457. out_free_irq:
  1458. kfree(mdp->mii_bus->irq);
  1459. out_free_bus:
  1460. free_mdio_bitbang(mdp->mii_bus);
  1461. out_free_bitbang:
  1462. kfree(bitbang);
  1463. out:
  1464. return ret;
  1465. }
  1466. static const u16 *sh_eth_get_register_offset(int register_type)
  1467. {
  1468. const u16 *reg_offset = NULL;
  1469. switch (register_type) {
  1470. case SH_ETH_REG_GIGABIT:
  1471. reg_offset = sh_eth_offset_gigabit;
  1472. break;
  1473. case SH_ETH_REG_FAST_SH4:
  1474. reg_offset = sh_eth_offset_fast_sh4;
  1475. break;
  1476. case SH_ETH_REG_FAST_SH3_SH2:
  1477. reg_offset = sh_eth_offset_fast_sh3_sh2;
  1478. break;
  1479. default:
  1480. printk(KERN_ERR "Unknown register type (%d)\n", register_type);
  1481. break;
  1482. }
  1483. return reg_offset;
  1484. }
  1485. static const struct net_device_ops sh_eth_netdev_ops = {
  1486. .ndo_open = sh_eth_open,
  1487. .ndo_stop = sh_eth_close,
  1488. .ndo_start_xmit = sh_eth_start_xmit,
  1489. .ndo_get_stats = sh_eth_get_stats,
  1490. #if defined(SH_ETH_HAS_TSU)
  1491. .ndo_set_rx_mode = sh_eth_set_multicast_list,
  1492. #endif
  1493. .ndo_tx_timeout = sh_eth_tx_timeout,
  1494. .ndo_do_ioctl = sh_eth_do_ioctl,
  1495. .ndo_validate_addr = eth_validate_addr,
  1496. .ndo_set_mac_address = eth_mac_addr,
  1497. .ndo_change_mtu = eth_change_mtu,
  1498. };
  1499. static int sh_eth_drv_probe(struct platform_device *pdev)
  1500. {
  1501. int ret, devno = 0;
  1502. struct resource *res;
  1503. struct net_device *ndev = NULL;
  1504. struct sh_eth_private *mdp = NULL;
  1505. struct sh_eth_plat_data *pd;
  1506. /* get base addr */
  1507. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1508. if (unlikely(res == NULL)) {
  1509. dev_err(&pdev->dev, "invalid resource\n");
  1510. ret = -EINVAL;
  1511. goto out;
  1512. }
  1513. ndev = alloc_etherdev(sizeof(struct sh_eth_private));
  1514. if (!ndev) {
  1515. dev_err(&pdev->dev, "Could not allocate device.\n");
  1516. ret = -ENOMEM;
  1517. goto out;
  1518. }
  1519. /* The sh Ether-specific entries in the device structure. */
  1520. ndev->base_addr = res->start;
  1521. devno = pdev->id;
  1522. if (devno < 0)
  1523. devno = 0;
  1524. ndev->dma = -1;
  1525. ret = platform_get_irq(pdev, 0);
  1526. if (ret < 0) {
  1527. ret = -ENODEV;
  1528. goto out_release;
  1529. }
  1530. ndev->irq = ret;
  1531. SET_NETDEV_DEV(ndev, &pdev->dev);
  1532. /* Fill in the fields of the device structure with ethernet values. */
  1533. ether_setup(ndev);
  1534. mdp = netdev_priv(ndev);
  1535. mdp->addr = ioremap(res->start, resource_size(res));
  1536. if (mdp->addr == NULL) {
  1537. ret = -ENOMEM;
  1538. dev_err(&pdev->dev, "ioremap failed.\n");
  1539. goto out_release;
  1540. }
  1541. spin_lock_init(&mdp->lock);
  1542. mdp->pdev = pdev;
  1543. pm_runtime_enable(&pdev->dev);
  1544. pm_runtime_resume(&pdev->dev);
  1545. pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
  1546. /* get PHY ID */
  1547. mdp->phy_id = pd->phy;
  1548. mdp->phy_interface = pd->phy_interface;
  1549. /* EDMAC endian */
  1550. mdp->edmac_endian = pd->edmac_endian;
  1551. mdp->no_ether_link = pd->no_ether_link;
  1552. mdp->ether_link_active_low = pd->ether_link_active_low;
  1553. mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
  1554. /* set cpu data */
  1555. #if defined(SH_ETH_HAS_BOTH_MODULES)
  1556. mdp->cd = sh_eth_get_cpu_data(mdp);
  1557. #else
  1558. mdp->cd = &sh_eth_my_cpu_data;
  1559. #endif
  1560. sh_eth_set_default_cpu_data(mdp->cd);
  1561. /* set function */
  1562. ndev->netdev_ops = &sh_eth_netdev_ops;
  1563. SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
  1564. ndev->watchdog_timeo = TX_TIMEOUT;
  1565. /* debug message level */
  1566. mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
  1567. mdp->post_rx = POST_RX >> (devno << 1);
  1568. mdp->post_fw = POST_FW >> (devno << 1);
  1569. /* read and set MAC address */
  1570. read_mac_address(ndev, pd->mac_addr);
  1571. /* First device only init */
  1572. if (!devno) {
  1573. if (mdp->cd->tsu) {
  1574. struct resource *rtsu;
  1575. rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1576. if (!rtsu) {
  1577. dev_err(&pdev->dev, "Not found TSU resource\n");
  1578. goto out_release;
  1579. }
  1580. mdp->tsu_addr = ioremap(rtsu->start,
  1581. resource_size(rtsu));
  1582. }
  1583. if (mdp->cd->chip_reset)
  1584. mdp->cd->chip_reset(ndev);
  1585. if (mdp->cd->tsu) {
  1586. /* TSU init (Init only)*/
  1587. sh_eth_tsu_init(mdp);
  1588. }
  1589. }
  1590. /* network device register */
  1591. ret = register_netdev(ndev);
  1592. if (ret)
  1593. goto out_release;
  1594. /* mdio bus init */
  1595. ret = sh_mdio_init(ndev, pdev->id, pd);
  1596. if (ret)
  1597. goto out_unregister;
  1598. /* print device information */
  1599. pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
  1600. (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
  1601. platform_set_drvdata(pdev, ndev);
  1602. return ret;
  1603. out_unregister:
  1604. unregister_netdev(ndev);
  1605. out_release:
  1606. /* net_dev free */
  1607. if (mdp && mdp->addr)
  1608. iounmap(mdp->addr);
  1609. if (mdp && mdp->tsu_addr)
  1610. iounmap(mdp->tsu_addr);
  1611. if (ndev)
  1612. free_netdev(ndev);
  1613. out:
  1614. return ret;
  1615. }
  1616. static int sh_eth_drv_remove(struct platform_device *pdev)
  1617. {
  1618. struct net_device *ndev = platform_get_drvdata(pdev);
  1619. struct sh_eth_private *mdp = netdev_priv(ndev);
  1620. iounmap(mdp->tsu_addr);
  1621. sh_mdio_release(ndev);
  1622. unregister_netdev(ndev);
  1623. pm_runtime_disable(&pdev->dev);
  1624. iounmap(mdp->addr);
  1625. free_netdev(ndev);
  1626. platform_set_drvdata(pdev, NULL);
  1627. return 0;
  1628. }
  1629. static int sh_eth_runtime_nop(struct device *dev)
  1630. {
  1631. /*
  1632. * Runtime PM callback shared between ->runtime_suspend()
  1633. * and ->runtime_resume(). Simply returns success.
  1634. *
  1635. * This driver re-initializes all registers after
  1636. * pm_runtime_get_sync() anyway so there is no need
  1637. * to save and restore registers here.
  1638. */
  1639. return 0;
  1640. }
  1641. static struct dev_pm_ops sh_eth_dev_pm_ops = {
  1642. .runtime_suspend = sh_eth_runtime_nop,
  1643. .runtime_resume = sh_eth_runtime_nop,
  1644. };
  1645. static struct platform_driver sh_eth_driver = {
  1646. .probe = sh_eth_drv_probe,
  1647. .remove = sh_eth_drv_remove,
  1648. .driver = {
  1649. .name = CARDNAME,
  1650. .pm = &sh_eth_dev_pm_ops,
  1651. },
  1652. };
  1653. module_platform_driver(sh_eth_driver);
  1654. MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
  1655. MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
  1656. MODULE_LICENSE("GPL v2");