sh_eth.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774
  1. /*
  2. * SuperH Ethernet device driver
  3. *
  4. * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
  5. * Copyright (C) 2008-2009 Renesas Solutions Corp.
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc.,
  17. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18. *
  19. * The full GNU General Public License is included in this distribution in
  20. * the file called "COPYING".
  21. */
  22. #include <linux/init.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/etherdevice.h>
  25. #include <linux/delay.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/mdio-bitbang.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/phy.h>
  30. #include <linux/cache.h>
  31. #include <linux/io.h>
  32. #include <linux/pm_runtime.h>
  33. #include <linux/slab.h>
  34. #include <linux/ethtool.h>
  35. #include <asm/cacheflush.h>
  36. #include "sh_eth.h"
  37. #define SH_ETH_DEF_MSG_ENABLE \
  38. (NETIF_MSG_LINK | \
  39. NETIF_MSG_TIMER | \
  40. NETIF_MSG_RX_ERR| \
  41. NETIF_MSG_TX_ERR)
  42. /* There is CPU dependent code */
  43. #if defined(CONFIG_CPU_SUBTYPE_SH7724)
  44. #define SH_ETH_RESET_DEFAULT 1
  45. static void sh_eth_set_duplex(struct net_device *ndev)
  46. {
  47. struct sh_eth_private *mdp = netdev_priv(ndev);
  48. u32 ioaddr = ndev->base_addr;
  49. if (mdp->duplex) /* Full */
  50. writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
  51. else /* Half */
  52. writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
  53. }
  54. static void sh_eth_set_rate(struct net_device *ndev)
  55. {
  56. struct sh_eth_private *mdp = netdev_priv(ndev);
  57. u32 ioaddr = ndev->base_addr;
  58. switch (mdp->speed) {
  59. case 10: /* 10BASE */
  60. writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
  61. break;
  62. case 100:/* 100BASE */
  63. writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
  64. break;
  65. default:
  66. break;
  67. }
  68. }
  69. /* SH7724 */
  70. static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  71. .set_duplex = sh_eth_set_duplex,
  72. .set_rate = sh_eth_set_rate,
  73. .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
  74. .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
  75. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
  76. .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
  77. .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
  78. EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
  79. .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
  80. .apr = 1,
  81. .mpr = 1,
  82. .tpauser = 1,
  83. .hw_swap = 1,
  84. .rpadir = 1,
  85. .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
  86. };
  87. #elif defined(CONFIG_CPU_SUBTYPE_SH7757)
  88. #define SH_ETH_RESET_DEFAULT 1
  89. static void sh_eth_set_duplex(struct net_device *ndev)
  90. {
  91. struct sh_eth_private *mdp = netdev_priv(ndev);
  92. u32 ioaddr = ndev->base_addr;
  93. if (mdp->duplex) /* Full */
  94. writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
  95. else /* Half */
  96. writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
  97. }
  98. static void sh_eth_set_rate(struct net_device *ndev)
  99. {
  100. struct sh_eth_private *mdp = netdev_priv(ndev);
  101. u32 ioaddr = ndev->base_addr;
  102. switch (mdp->speed) {
  103. case 10: /* 10BASE */
  104. writel(0, ioaddr + RTRATE);
  105. break;
  106. case 100:/* 100BASE */
  107. writel(1, ioaddr + RTRATE);
  108. break;
  109. default:
  110. break;
  111. }
  112. }
  113. /* SH7757 */
  114. static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  115. .set_duplex = sh_eth_set_duplex,
  116. .set_rate = sh_eth_set_rate,
  117. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
  118. .rmcr_value = 0x00000001,
  119. .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
  120. .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
  121. EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
  122. .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
  123. .apr = 1,
  124. .mpr = 1,
  125. .tpauser = 1,
  126. .hw_swap = 1,
  127. .no_ade = 1,
  128. };
  129. #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
  130. #define SH_ETH_HAS_TSU 1
  131. static void sh_eth_chip_reset(struct net_device *ndev)
  132. {
  133. /* reset device */
  134. writel(ARSTR_ARSTR, ARSTR);
  135. mdelay(1);
  136. }
  137. static void sh_eth_reset(struct net_device *ndev)
  138. {
  139. u32 ioaddr = ndev->base_addr;
  140. int cnt = 100;
  141. writel(EDSR_ENALL, ioaddr + EDSR);
  142. writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
  143. while (cnt > 0) {
  144. if (!(readl(ioaddr + EDMR) & 0x3))
  145. break;
  146. mdelay(1);
  147. cnt--;
  148. }
  149. if (cnt == 0)
  150. printk(KERN_ERR "Device reset fail\n");
  151. /* Table Init */
  152. writel(0x0, ioaddr + TDLAR);
  153. writel(0x0, ioaddr + TDFAR);
  154. writel(0x0, ioaddr + TDFXR);
  155. writel(0x0, ioaddr + TDFFR);
  156. writel(0x0, ioaddr + RDLAR);
  157. writel(0x0, ioaddr + RDFAR);
  158. writel(0x0, ioaddr + RDFXR);
  159. writel(0x0, ioaddr + RDFFR);
  160. }
  161. static void sh_eth_set_duplex(struct net_device *ndev)
  162. {
  163. struct sh_eth_private *mdp = netdev_priv(ndev);
  164. u32 ioaddr = ndev->base_addr;
  165. if (mdp->duplex) /* Full */
  166. writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
  167. else /* Half */
  168. writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
  169. }
  170. static void sh_eth_set_rate(struct net_device *ndev)
  171. {
  172. struct sh_eth_private *mdp = netdev_priv(ndev);
  173. u32 ioaddr = ndev->base_addr;
  174. switch (mdp->speed) {
  175. case 10: /* 10BASE */
  176. writel(GECMR_10, ioaddr + GECMR);
  177. break;
  178. case 100:/* 100BASE */
  179. writel(GECMR_100, ioaddr + GECMR);
  180. break;
  181. case 1000: /* 1000BASE */
  182. writel(GECMR_1000, ioaddr + GECMR);
  183. break;
  184. default:
  185. break;
  186. }
  187. }
  188. /* sh7763 */
  189. static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  190. .chip_reset = sh_eth_chip_reset,
  191. .set_duplex = sh_eth_set_duplex,
  192. .set_rate = sh_eth_set_rate,
  193. .ecsr_value = ECSR_ICD | ECSR_MPD,
  194. .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
  195. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
  196. .tx_check = EESR_TC1 | EESR_FTC,
  197. .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
  198. EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
  199. EESR_ECI,
  200. .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
  201. EESR_TFE,
  202. .apr = 1,
  203. .mpr = 1,
  204. .tpauser = 1,
  205. .bculr = 1,
  206. .hw_swap = 1,
  207. .no_trimd = 1,
  208. .no_ade = 1,
  209. };
  210. #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
  211. #define SH_ETH_RESET_DEFAULT 1
  212. static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  213. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
  214. .apr = 1,
  215. .mpr = 1,
  216. .tpauser = 1,
  217. .hw_swap = 1,
  218. };
  219. #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
  220. #define SH_ETH_RESET_DEFAULT 1
  221. #define SH_ETH_HAS_TSU 1
  222. static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  223. .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
  224. };
  225. #endif
  226. static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
  227. {
  228. if (!cd->ecsr_value)
  229. cd->ecsr_value = DEFAULT_ECSR_INIT;
  230. if (!cd->ecsipr_value)
  231. cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
  232. if (!cd->fcftr_value)
  233. cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
  234. DEFAULT_FIFO_F_D_RFD;
  235. if (!cd->fdr_value)
  236. cd->fdr_value = DEFAULT_FDR_INIT;
  237. if (!cd->rmcr_value)
  238. cd->rmcr_value = DEFAULT_RMCR_VALUE;
  239. if (!cd->tx_check)
  240. cd->tx_check = DEFAULT_TX_CHECK;
  241. if (!cd->eesr_err_check)
  242. cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
  243. if (!cd->tx_error_check)
  244. cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
  245. }
  246. #if defined(SH_ETH_RESET_DEFAULT)
  247. /* Chip Reset */
  248. static void sh_eth_reset(struct net_device *ndev)
  249. {
  250. u32 ioaddr = ndev->base_addr;
  251. writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
  252. mdelay(3);
  253. writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
  254. }
  255. #endif
  256. #if defined(CONFIG_CPU_SH4)
  257. static void sh_eth_set_receive_align(struct sk_buff *skb)
  258. {
  259. int reserve;
  260. reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
  261. if (reserve)
  262. skb_reserve(skb, reserve);
  263. }
  264. #else
  265. static void sh_eth_set_receive_align(struct sk_buff *skb)
  266. {
  267. skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
  268. }
  269. #endif
  270. /* CPU <-> EDMAC endian convert */
  271. static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
  272. {
  273. switch (mdp->edmac_endian) {
  274. case EDMAC_LITTLE_ENDIAN:
  275. return cpu_to_le32(x);
  276. case EDMAC_BIG_ENDIAN:
  277. return cpu_to_be32(x);
  278. }
  279. return x;
  280. }
  281. static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
  282. {
  283. switch (mdp->edmac_endian) {
  284. case EDMAC_LITTLE_ENDIAN:
  285. return le32_to_cpu(x);
  286. case EDMAC_BIG_ENDIAN:
  287. return be32_to_cpu(x);
  288. }
  289. return x;
  290. }
  291. /*
  292. * Program the hardware MAC address from dev->dev_addr.
  293. */
  294. static void update_mac_address(struct net_device *ndev)
  295. {
  296. u32 ioaddr = ndev->base_addr;
  297. writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
  298. (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
  299. ioaddr + MAHR);
  300. writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
  301. ioaddr + MALR);
  302. }
  303. /*
  304. * Get MAC address from SuperH MAC address register
  305. *
  306. * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
  307. * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
  308. * When you want use this device, you must set MAC address in bootloader.
  309. *
  310. */
  311. static void read_mac_address(struct net_device *ndev, unsigned char *mac)
  312. {
  313. u32 ioaddr = ndev->base_addr;
  314. if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
  315. memcpy(ndev->dev_addr, mac, 6);
  316. } else {
  317. ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24);
  318. ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF;
  319. ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF;
  320. ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF);
  321. ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF;
  322. ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF);
  323. }
  324. }
  325. struct bb_info {
  326. struct mdiobb_ctrl ctrl;
  327. u32 addr;
  328. u32 mmd_msk;/* MMD */
  329. u32 mdo_msk;
  330. u32 mdi_msk;
  331. u32 mdc_msk;
  332. };
  333. /* PHY bit set */
  334. static void bb_set(u32 addr, u32 msk)
  335. {
  336. writel(readl(addr) | msk, addr);
  337. }
  338. /* PHY bit clear */
  339. static void bb_clr(u32 addr, u32 msk)
  340. {
  341. writel((readl(addr) & ~msk), addr);
  342. }
  343. /* PHY bit read */
  344. static int bb_read(u32 addr, u32 msk)
  345. {
  346. return (readl(addr) & msk) != 0;
  347. }
  348. /* Data I/O pin control */
  349. static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
  350. {
  351. struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
  352. if (bit)
  353. bb_set(bitbang->addr, bitbang->mmd_msk);
  354. else
  355. bb_clr(bitbang->addr, bitbang->mmd_msk);
  356. }
  357. /* Set bit data*/
  358. static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
  359. {
  360. struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
  361. if (bit)
  362. bb_set(bitbang->addr, bitbang->mdo_msk);
  363. else
  364. bb_clr(bitbang->addr, bitbang->mdo_msk);
  365. }
  366. /* Get bit data*/
  367. static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
  368. {
  369. struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
  370. return bb_read(bitbang->addr, bitbang->mdi_msk);
  371. }
  372. /* MDC pin control */
  373. static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
  374. {
  375. struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
  376. if (bit)
  377. bb_set(bitbang->addr, bitbang->mdc_msk);
  378. else
  379. bb_clr(bitbang->addr, bitbang->mdc_msk);
  380. }
  381. /* mdio bus control struct */
  382. static struct mdiobb_ops bb_ops = {
  383. .owner = THIS_MODULE,
  384. .set_mdc = sh_mdc_ctrl,
  385. .set_mdio_dir = sh_mmd_ctrl,
  386. .set_mdio_data = sh_set_mdio,
  387. .get_mdio_data = sh_get_mdio,
  388. };
  389. /* free skb and descriptor buffer */
  390. static void sh_eth_ring_free(struct net_device *ndev)
  391. {
  392. struct sh_eth_private *mdp = netdev_priv(ndev);
  393. int i;
  394. /* Free Rx skb ringbuffer */
  395. if (mdp->rx_skbuff) {
  396. for (i = 0; i < RX_RING_SIZE; i++) {
  397. if (mdp->rx_skbuff[i])
  398. dev_kfree_skb(mdp->rx_skbuff[i]);
  399. }
  400. }
  401. kfree(mdp->rx_skbuff);
  402. /* Free Tx skb ringbuffer */
  403. if (mdp->tx_skbuff) {
  404. for (i = 0; i < TX_RING_SIZE; i++) {
  405. if (mdp->tx_skbuff[i])
  406. dev_kfree_skb(mdp->tx_skbuff[i]);
  407. }
  408. }
  409. kfree(mdp->tx_skbuff);
  410. }
  411. /* format skb and descriptor buffer */
  412. static void sh_eth_ring_format(struct net_device *ndev)
  413. {
  414. u32 ioaddr = ndev->base_addr;
  415. struct sh_eth_private *mdp = netdev_priv(ndev);
  416. int i;
  417. struct sk_buff *skb;
  418. struct sh_eth_rxdesc *rxdesc = NULL;
  419. struct sh_eth_txdesc *txdesc = NULL;
  420. int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
  421. int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
  422. mdp->cur_rx = mdp->cur_tx = 0;
  423. mdp->dirty_rx = mdp->dirty_tx = 0;
  424. memset(mdp->rx_ring, 0, rx_ringsize);
  425. /* build Rx ring buffer */
  426. for (i = 0; i < RX_RING_SIZE; i++) {
  427. /* skb */
  428. mdp->rx_skbuff[i] = NULL;
  429. skb = dev_alloc_skb(mdp->rx_buf_sz);
  430. mdp->rx_skbuff[i] = skb;
  431. if (skb == NULL)
  432. break;
  433. dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
  434. DMA_FROM_DEVICE);
  435. skb->dev = ndev; /* Mark as being used by this device. */
  436. sh_eth_set_receive_align(skb);
  437. /* RX descriptor */
  438. rxdesc = &mdp->rx_ring[i];
  439. rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
  440. rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
  441. /* The size of the buffer is 16 byte boundary. */
  442. rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
  443. /* Rx descriptor address set */
  444. if (i == 0) {
  445. writel(mdp->rx_desc_dma, ioaddr + RDLAR);
  446. #if defined(CONFIG_CPU_SUBTYPE_SH7763)
  447. writel(mdp->rx_desc_dma, ioaddr + RDFAR);
  448. #endif
  449. }
  450. }
  451. mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
  452. /* Mark the last entry as wrapping the ring. */
  453. rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
  454. memset(mdp->tx_ring, 0, tx_ringsize);
  455. /* build Tx ring buffer */
  456. for (i = 0; i < TX_RING_SIZE; i++) {
  457. mdp->tx_skbuff[i] = NULL;
  458. txdesc = &mdp->tx_ring[i];
  459. txdesc->status = cpu_to_edmac(mdp, TD_TFP);
  460. txdesc->buffer_length = 0;
  461. if (i == 0) {
  462. /* Tx descriptor address set */
  463. writel(mdp->tx_desc_dma, ioaddr + TDLAR);
  464. #if defined(CONFIG_CPU_SUBTYPE_SH7763)
  465. writel(mdp->tx_desc_dma, ioaddr + TDFAR);
  466. #endif
  467. }
  468. }
  469. txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
  470. }
  471. /* Get skb and descriptor buffer */
  472. static int sh_eth_ring_init(struct net_device *ndev)
  473. {
  474. struct sh_eth_private *mdp = netdev_priv(ndev);
  475. int rx_ringsize, tx_ringsize, ret = 0;
  476. /*
  477. * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
  478. * card needs room to do 8 byte alignment, +2 so we can reserve
  479. * the first 2 bytes, and +16 gets room for the status word from the
  480. * card.
  481. */
  482. mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
  483. (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
  484. if (mdp->cd->rpadir)
  485. mdp->rx_buf_sz += NET_IP_ALIGN;
  486. /* Allocate RX and TX skb rings */
  487. mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
  488. GFP_KERNEL);
  489. if (!mdp->rx_skbuff) {
  490. dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
  491. ret = -ENOMEM;
  492. return ret;
  493. }
  494. mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
  495. GFP_KERNEL);
  496. if (!mdp->tx_skbuff) {
  497. dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
  498. ret = -ENOMEM;
  499. goto skb_ring_free;
  500. }
  501. /* Allocate all Rx descriptors. */
  502. rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
  503. mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
  504. GFP_KERNEL);
  505. if (!mdp->rx_ring) {
  506. dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
  507. rx_ringsize);
  508. ret = -ENOMEM;
  509. goto desc_ring_free;
  510. }
  511. mdp->dirty_rx = 0;
  512. /* Allocate all Tx descriptors. */
  513. tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
  514. mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
  515. GFP_KERNEL);
  516. if (!mdp->tx_ring) {
  517. dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
  518. tx_ringsize);
  519. ret = -ENOMEM;
  520. goto desc_ring_free;
  521. }
  522. return ret;
  523. desc_ring_free:
  524. /* free DMA buffer */
  525. dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
  526. skb_ring_free:
  527. /* Free Rx and Tx skb ring buffer */
  528. sh_eth_ring_free(ndev);
  529. return ret;
  530. }
  531. static int sh_eth_dev_init(struct net_device *ndev)
  532. {
  533. int ret = 0;
  534. struct sh_eth_private *mdp = netdev_priv(ndev);
  535. u32 ioaddr = ndev->base_addr;
  536. u_int32_t rx_int_var, tx_int_var;
  537. u32 val;
  538. /* Soft Reset */
  539. sh_eth_reset(ndev);
  540. /* Descriptor format */
  541. sh_eth_ring_format(ndev);
  542. if (mdp->cd->rpadir)
  543. writel(mdp->cd->rpadir_value, ioaddr + RPADIR);
  544. /* all sh_eth int mask */
  545. writel(0, ioaddr + EESIPR);
  546. #if defined(__LITTLE_ENDIAN__)
  547. if (mdp->cd->hw_swap)
  548. writel(EDMR_EL, ioaddr + EDMR);
  549. else
  550. #endif
  551. writel(0, ioaddr + EDMR);
  552. /* FIFO size set */
  553. writel(mdp->cd->fdr_value, ioaddr + FDR);
  554. writel(0, ioaddr + TFTR);
  555. /* Frame recv control */
  556. writel(mdp->cd->rmcr_value, ioaddr + RMCR);
  557. rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
  558. tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
  559. writel(rx_int_var | tx_int_var, ioaddr + TRSCER);
  560. if (mdp->cd->bculr)
  561. writel(0x800, ioaddr + BCULR); /* Burst sycle set */
  562. writel(mdp->cd->fcftr_value, ioaddr + FCFTR);
  563. if (!mdp->cd->no_trimd)
  564. writel(0, ioaddr + TRIMD);
  565. /* Recv frame limit set register */
  566. writel(RFLR_VALUE, ioaddr + RFLR);
  567. writel(readl(ioaddr + EESR), ioaddr + EESR);
  568. writel(mdp->cd->eesipr_value, ioaddr + EESIPR);
  569. /* PAUSE Prohibition */
  570. val = (readl(ioaddr + ECMR) & ECMR_DM) |
  571. ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
  572. writel(val, ioaddr + ECMR);
  573. if (mdp->cd->set_rate)
  574. mdp->cd->set_rate(ndev);
  575. /* E-MAC Status Register clear */
  576. writel(mdp->cd->ecsr_value, ioaddr + ECSR);
  577. /* E-MAC Interrupt Enable register */
  578. writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
  579. /* Set MAC address */
  580. update_mac_address(ndev);
  581. /* mask reset */
  582. if (mdp->cd->apr)
  583. writel(APR_AP, ioaddr + APR);
  584. if (mdp->cd->mpr)
  585. writel(MPR_MP, ioaddr + MPR);
  586. if (mdp->cd->tpauser)
  587. writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
  588. /* Setting the Rx mode will start the Rx process. */
  589. writel(EDRRR_R, ioaddr + EDRRR);
  590. netif_start_queue(ndev);
  591. return ret;
  592. }
  593. /* free Tx skb function */
  594. static int sh_eth_txfree(struct net_device *ndev)
  595. {
  596. struct sh_eth_private *mdp = netdev_priv(ndev);
  597. struct sh_eth_txdesc *txdesc;
  598. int freeNum = 0;
  599. int entry = 0;
  600. for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
  601. entry = mdp->dirty_tx % TX_RING_SIZE;
  602. txdesc = &mdp->tx_ring[entry];
  603. if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
  604. break;
  605. /* Free the original skb. */
  606. if (mdp->tx_skbuff[entry]) {
  607. dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
  608. mdp->tx_skbuff[entry] = NULL;
  609. freeNum++;
  610. }
  611. txdesc->status = cpu_to_edmac(mdp, TD_TFP);
  612. if (entry >= TX_RING_SIZE - 1)
  613. txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
  614. mdp->stats.tx_packets++;
  615. mdp->stats.tx_bytes += txdesc->buffer_length;
  616. }
  617. return freeNum;
  618. }
  619. /* Packet receive function */
  620. static int sh_eth_rx(struct net_device *ndev)
  621. {
  622. struct sh_eth_private *mdp = netdev_priv(ndev);
  623. struct sh_eth_rxdesc *rxdesc;
  624. int entry = mdp->cur_rx % RX_RING_SIZE;
  625. int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
  626. struct sk_buff *skb;
  627. u16 pkt_len = 0;
  628. u32 desc_status;
  629. rxdesc = &mdp->rx_ring[entry];
  630. while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
  631. desc_status = edmac_to_cpu(mdp, rxdesc->status);
  632. pkt_len = rxdesc->frame_length;
  633. if (--boguscnt < 0)
  634. break;
  635. if (!(desc_status & RDFEND))
  636. mdp->stats.rx_length_errors++;
  637. if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
  638. RD_RFS5 | RD_RFS6 | RD_RFS10)) {
  639. mdp->stats.rx_errors++;
  640. if (desc_status & RD_RFS1)
  641. mdp->stats.rx_crc_errors++;
  642. if (desc_status & RD_RFS2)
  643. mdp->stats.rx_frame_errors++;
  644. if (desc_status & RD_RFS3)
  645. mdp->stats.rx_length_errors++;
  646. if (desc_status & RD_RFS4)
  647. mdp->stats.rx_length_errors++;
  648. if (desc_status & RD_RFS6)
  649. mdp->stats.rx_missed_errors++;
  650. if (desc_status & RD_RFS10)
  651. mdp->stats.rx_over_errors++;
  652. } else {
  653. if (!mdp->cd->hw_swap)
  654. sh_eth_soft_swap(
  655. phys_to_virt(ALIGN(rxdesc->addr, 4)),
  656. pkt_len + 2);
  657. skb = mdp->rx_skbuff[entry];
  658. mdp->rx_skbuff[entry] = NULL;
  659. if (mdp->cd->rpadir)
  660. skb_reserve(skb, NET_IP_ALIGN);
  661. skb_put(skb, pkt_len);
  662. skb->protocol = eth_type_trans(skb, ndev);
  663. netif_rx(skb);
  664. mdp->stats.rx_packets++;
  665. mdp->stats.rx_bytes += pkt_len;
  666. }
  667. rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
  668. entry = (++mdp->cur_rx) % RX_RING_SIZE;
  669. rxdesc = &mdp->rx_ring[entry];
  670. }
  671. /* Refill the Rx ring buffers. */
  672. for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
  673. entry = mdp->dirty_rx % RX_RING_SIZE;
  674. rxdesc = &mdp->rx_ring[entry];
  675. /* The size of the buffer is 16 byte boundary. */
  676. rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
  677. if (mdp->rx_skbuff[entry] == NULL) {
  678. skb = dev_alloc_skb(mdp->rx_buf_sz);
  679. mdp->rx_skbuff[entry] = skb;
  680. if (skb == NULL)
  681. break; /* Better luck next round. */
  682. dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
  683. DMA_FROM_DEVICE);
  684. skb->dev = ndev;
  685. sh_eth_set_receive_align(skb);
  686. skb_checksum_none_assert(skb);
  687. rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
  688. }
  689. if (entry >= RX_RING_SIZE - 1)
  690. rxdesc->status |=
  691. cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
  692. else
  693. rxdesc->status |=
  694. cpu_to_edmac(mdp, RD_RACT | RD_RFP);
  695. }
  696. /* Restart Rx engine if stopped. */
  697. /* If we don't need to check status, don't. -KDU */
  698. if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R))
  699. writel(EDRRR_R, ndev->base_addr + EDRRR);
  700. return 0;
  701. }
  702. static void sh_eth_rcv_snd_disable(u32 ioaddr)
  703. {
  704. /* disable tx and rx */
  705. writel(readl(ioaddr + ECMR) &
  706. ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
  707. }
  708. static void sh_eth_rcv_snd_enable(u32 ioaddr)
  709. {
  710. /* enable tx and rx */
  711. writel(readl(ioaddr + ECMR) |
  712. (ECMR_RE | ECMR_TE), ioaddr + ECMR);
  713. }
  714. /* error control function */
  715. static void sh_eth_error(struct net_device *ndev, int intr_status)
  716. {
  717. struct sh_eth_private *mdp = netdev_priv(ndev);
  718. u32 ioaddr = ndev->base_addr;
  719. u32 felic_stat;
  720. u32 link_stat;
  721. u32 mask;
  722. if (intr_status & EESR_ECI) {
  723. felic_stat = readl(ioaddr + ECSR);
  724. writel(felic_stat, ioaddr + ECSR); /* clear int */
  725. if (felic_stat & ECSR_ICD)
  726. mdp->stats.tx_carrier_errors++;
  727. if (felic_stat & ECSR_LCHNG) {
  728. /* Link Changed */
  729. if (mdp->cd->no_psr || mdp->no_ether_link) {
  730. if (mdp->link == PHY_DOWN)
  731. link_stat = 0;
  732. else
  733. link_stat = PHY_ST_LINK;
  734. } else {
  735. link_stat = (readl(ioaddr + PSR));
  736. if (mdp->ether_link_active_low)
  737. link_stat = ~link_stat;
  738. }
  739. if (!(link_stat & PHY_ST_LINK))
  740. sh_eth_rcv_snd_disable(ioaddr);
  741. else {
  742. /* Link Up */
  743. writel(readl(ioaddr + EESIPR) &
  744. ~DMAC_M_ECI, ioaddr + EESIPR);
  745. /*clear int */
  746. writel(readl(ioaddr + ECSR),
  747. ioaddr + ECSR);
  748. writel(readl(ioaddr + EESIPR) |
  749. DMAC_M_ECI, ioaddr + EESIPR);
  750. /* enable tx and rx */
  751. sh_eth_rcv_snd_enable(ioaddr);
  752. }
  753. }
  754. }
  755. if (intr_status & EESR_TWB) {
  756. /* Write buck end. unused write back interrupt */
  757. if (intr_status & EESR_TABT) /* Transmit Abort int */
  758. mdp->stats.tx_aborted_errors++;
  759. if (netif_msg_tx_err(mdp))
  760. dev_err(&ndev->dev, "Transmit Abort\n");
  761. }
  762. if (intr_status & EESR_RABT) {
  763. /* Receive Abort int */
  764. if (intr_status & EESR_RFRMER) {
  765. /* Receive Frame Overflow int */
  766. mdp->stats.rx_frame_errors++;
  767. if (netif_msg_rx_err(mdp))
  768. dev_err(&ndev->dev, "Receive Abort\n");
  769. }
  770. }
  771. if (intr_status & EESR_TDE) {
  772. /* Transmit Descriptor Empty int */
  773. mdp->stats.tx_fifo_errors++;
  774. if (netif_msg_tx_err(mdp))
  775. dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
  776. }
  777. if (intr_status & EESR_TFE) {
  778. /* FIFO under flow */
  779. mdp->stats.tx_fifo_errors++;
  780. if (netif_msg_tx_err(mdp))
  781. dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
  782. }
  783. if (intr_status & EESR_RDE) {
  784. /* Receive Descriptor Empty int */
  785. mdp->stats.rx_over_errors++;
  786. if (readl(ioaddr + EDRRR) ^ EDRRR_R)
  787. writel(EDRRR_R, ioaddr + EDRRR);
  788. if (netif_msg_rx_err(mdp))
  789. dev_err(&ndev->dev, "Receive Descriptor Empty\n");
  790. }
  791. if (intr_status & EESR_RFE) {
  792. /* Receive FIFO Overflow int */
  793. mdp->stats.rx_fifo_errors++;
  794. if (netif_msg_rx_err(mdp))
  795. dev_err(&ndev->dev, "Receive FIFO Overflow\n");
  796. }
  797. if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
  798. /* Address Error */
  799. mdp->stats.tx_fifo_errors++;
  800. if (netif_msg_tx_err(mdp))
  801. dev_err(&ndev->dev, "Address Error\n");
  802. }
  803. mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
  804. if (mdp->cd->no_ade)
  805. mask &= ~EESR_ADE;
  806. if (intr_status & mask) {
  807. /* Tx error */
  808. u32 edtrr = readl(ndev->base_addr + EDTRR);
  809. /* dmesg */
  810. dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
  811. intr_status, mdp->cur_tx);
  812. dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
  813. mdp->dirty_tx, (u32) ndev->state, edtrr);
  814. /* dirty buffer free */
  815. sh_eth_txfree(ndev);
  816. /* SH7712 BUG */
  817. if (edtrr ^ EDTRR_TRNS) {
  818. /* tx dma start */
  819. writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
  820. }
  821. /* wakeup */
  822. netif_wake_queue(ndev);
  823. }
  824. }
  825. static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
  826. {
  827. struct net_device *ndev = netdev;
  828. struct sh_eth_private *mdp = netdev_priv(ndev);
  829. struct sh_eth_cpu_data *cd = mdp->cd;
  830. irqreturn_t ret = IRQ_NONE;
  831. u32 ioaddr, intr_status = 0;
  832. ioaddr = ndev->base_addr;
  833. spin_lock(&mdp->lock);
  834. /* Get interrpt stat */
  835. intr_status = readl(ioaddr + EESR);
  836. /* Clear interrupt */
  837. if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
  838. EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
  839. cd->tx_check | cd->eesr_err_check)) {
  840. writel(intr_status, ioaddr + EESR);
  841. ret = IRQ_HANDLED;
  842. } else
  843. goto other_irq;
  844. if (intr_status & (EESR_FRC | /* Frame recv*/
  845. EESR_RMAF | /* Multi cast address recv*/
  846. EESR_RRF | /* Bit frame recv */
  847. EESR_RTLF | /* Long frame recv*/
  848. EESR_RTSF | /* short frame recv */
  849. EESR_PRE | /* PHY-LSI recv error */
  850. EESR_CERF)){ /* recv frame CRC error */
  851. sh_eth_rx(ndev);
  852. }
  853. /* Tx Check */
  854. if (intr_status & cd->tx_check) {
  855. sh_eth_txfree(ndev);
  856. netif_wake_queue(ndev);
  857. }
  858. if (intr_status & cd->eesr_err_check)
  859. sh_eth_error(ndev, intr_status);
  860. other_irq:
  861. spin_unlock(&mdp->lock);
  862. return ret;
  863. }
  864. static void sh_eth_timer(unsigned long data)
  865. {
  866. struct net_device *ndev = (struct net_device *)data;
  867. struct sh_eth_private *mdp = netdev_priv(ndev);
  868. mod_timer(&mdp->timer, jiffies + (10 * HZ));
  869. }
  870. /* PHY state control function */
  871. static void sh_eth_adjust_link(struct net_device *ndev)
  872. {
  873. struct sh_eth_private *mdp = netdev_priv(ndev);
  874. struct phy_device *phydev = mdp->phydev;
  875. u32 ioaddr = ndev->base_addr;
  876. int new_state = 0;
  877. if (phydev->link != PHY_DOWN) {
  878. if (phydev->duplex != mdp->duplex) {
  879. new_state = 1;
  880. mdp->duplex = phydev->duplex;
  881. if (mdp->cd->set_duplex)
  882. mdp->cd->set_duplex(ndev);
  883. }
  884. if (phydev->speed != mdp->speed) {
  885. new_state = 1;
  886. mdp->speed = phydev->speed;
  887. if (mdp->cd->set_rate)
  888. mdp->cd->set_rate(ndev);
  889. }
  890. if (mdp->link == PHY_DOWN) {
  891. writel((readl(ioaddr + ECMR) & ~ECMR_TXF)
  892. | ECMR_DM, ioaddr + ECMR);
  893. new_state = 1;
  894. mdp->link = phydev->link;
  895. }
  896. } else if (mdp->link) {
  897. new_state = 1;
  898. mdp->link = PHY_DOWN;
  899. mdp->speed = 0;
  900. mdp->duplex = -1;
  901. }
  902. if (new_state && netif_msg_link(mdp))
  903. phy_print_status(phydev);
  904. }
  905. /* PHY init function */
  906. static int sh_eth_phy_init(struct net_device *ndev)
  907. {
  908. struct sh_eth_private *mdp = netdev_priv(ndev);
  909. char phy_id[MII_BUS_ID_SIZE + 3];
  910. struct phy_device *phydev = NULL;
  911. snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
  912. mdp->mii_bus->id , mdp->phy_id);
  913. mdp->link = PHY_DOWN;
  914. mdp->speed = 0;
  915. mdp->duplex = -1;
  916. /* Try connect to PHY */
  917. phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
  918. 0, PHY_INTERFACE_MODE_MII);
  919. if (IS_ERR(phydev)) {
  920. dev_err(&ndev->dev, "phy_connect failed\n");
  921. return PTR_ERR(phydev);
  922. }
  923. dev_info(&ndev->dev, "attached phy %i to driver %s\n",
  924. phydev->addr, phydev->drv->name);
  925. mdp->phydev = phydev;
  926. return 0;
  927. }
  928. /* PHY control start function */
  929. static int sh_eth_phy_start(struct net_device *ndev)
  930. {
  931. struct sh_eth_private *mdp = netdev_priv(ndev);
  932. int ret;
  933. ret = sh_eth_phy_init(ndev);
  934. if (ret)
  935. return ret;
  936. /* reset phy - this also wakes it from PDOWN */
  937. phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
  938. phy_start(mdp->phydev);
  939. return 0;
  940. }
  941. static int sh_eth_get_settings(struct net_device *ndev,
  942. struct ethtool_cmd *ecmd)
  943. {
  944. struct sh_eth_private *mdp = netdev_priv(ndev);
  945. unsigned long flags;
  946. int ret;
  947. spin_lock_irqsave(&mdp->lock, flags);
  948. ret = phy_ethtool_gset(mdp->phydev, ecmd);
  949. spin_unlock_irqrestore(&mdp->lock, flags);
  950. return ret;
  951. }
  952. static int sh_eth_set_settings(struct net_device *ndev,
  953. struct ethtool_cmd *ecmd)
  954. {
  955. struct sh_eth_private *mdp = netdev_priv(ndev);
  956. unsigned long flags;
  957. int ret;
  958. u32 ioaddr = ndev->base_addr;
  959. spin_lock_irqsave(&mdp->lock, flags);
  960. /* disable tx and rx */
  961. sh_eth_rcv_snd_disable(ioaddr);
  962. ret = phy_ethtool_sset(mdp->phydev, ecmd);
  963. if (ret)
  964. goto error_exit;
  965. if (ecmd->duplex == DUPLEX_FULL)
  966. mdp->duplex = 1;
  967. else
  968. mdp->duplex = 0;
  969. if (mdp->cd->set_duplex)
  970. mdp->cd->set_duplex(ndev);
  971. error_exit:
  972. mdelay(1);
  973. /* enable tx and rx */
  974. sh_eth_rcv_snd_enable(ioaddr);
  975. spin_unlock_irqrestore(&mdp->lock, flags);
  976. return ret;
  977. }
  978. static int sh_eth_nway_reset(struct net_device *ndev)
  979. {
  980. struct sh_eth_private *mdp = netdev_priv(ndev);
  981. unsigned long flags;
  982. int ret;
  983. spin_lock_irqsave(&mdp->lock, flags);
  984. ret = phy_start_aneg(mdp->phydev);
  985. spin_unlock_irqrestore(&mdp->lock, flags);
  986. return ret;
  987. }
  988. static u32 sh_eth_get_msglevel(struct net_device *ndev)
  989. {
  990. struct sh_eth_private *mdp = netdev_priv(ndev);
  991. return mdp->msg_enable;
  992. }
  993. static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
  994. {
  995. struct sh_eth_private *mdp = netdev_priv(ndev);
  996. mdp->msg_enable = value;
  997. }
  998. static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
  999. "rx_current", "tx_current",
  1000. "rx_dirty", "tx_dirty",
  1001. };
  1002. #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
  1003. static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
  1004. {
  1005. switch (sset) {
  1006. case ETH_SS_STATS:
  1007. return SH_ETH_STATS_LEN;
  1008. default:
  1009. return -EOPNOTSUPP;
  1010. }
  1011. }
  1012. static void sh_eth_get_ethtool_stats(struct net_device *ndev,
  1013. struct ethtool_stats *stats, u64 *data)
  1014. {
  1015. struct sh_eth_private *mdp = netdev_priv(ndev);
  1016. int i = 0;
  1017. /* device-specific stats */
  1018. data[i++] = mdp->cur_rx;
  1019. data[i++] = mdp->cur_tx;
  1020. data[i++] = mdp->dirty_rx;
  1021. data[i++] = mdp->dirty_tx;
  1022. }
  1023. static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
  1024. {
  1025. switch (stringset) {
  1026. case ETH_SS_STATS:
  1027. memcpy(data, *sh_eth_gstrings_stats,
  1028. sizeof(sh_eth_gstrings_stats));
  1029. break;
  1030. }
  1031. }
  1032. static struct ethtool_ops sh_eth_ethtool_ops = {
  1033. .get_settings = sh_eth_get_settings,
  1034. .set_settings = sh_eth_set_settings,
  1035. .nway_reset = sh_eth_nway_reset,
  1036. .get_msglevel = sh_eth_get_msglevel,
  1037. .set_msglevel = sh_eth_set_msglevel,
  1038. .get_link = ethtool_op_get_link,
  1039. .get_strings = sh_eth_get_strings,
  1040. .get_ethtool_stats = sh_eth_get_ethtool_stats,
  1041. .get_sset_count = sh_eth_get_sset_count,
  1042. };
  1043. /* network device open function */
  1044. static int sh_eth_open(struct net_device *ndev)
  1045. {
  1046. int ret = 0;
  1047. struct sh_eth_private *mdp = netdev_priv(ndev);
  1048. pm_runtime_get_sync(&mdp->pdev->dev);
  1049. ret = request_irq(ndev->irq, sh_eth_interrupt,
  1050. #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
  1051. defined(CONFIG_CPU_SUBTYPE_SH7764) || \
  1052. defined(CONFIG_CPU_SUBTYPE_SH7757)
  1053. IRQF_SHARED,
  1054. #else
  1055. 0,
  1056. #endif
  1057. ndev->name, ndev);
  1058. if (ret) {
  1059. dev_err(&ndev->dev, "Can not assign IRQ number\n");
  1060. return ret;
  1061. }
  1062. /* Descriptor set */
  1063. ret = sh_eth_ring_init(ndev);
  1064. if (ret)
  1065. goto out_free_irq;
  1066. /* device init */
  1067. ret = sh_eth_dev_init(ndev);
  1068. if (ret)
  1069. goto out_free_irq;
  1070. /* PHY control start*/
  1071. ret = sh_eth_phy_start(ndev);
  1072. if (ret)
  1073. goto out_free_irq;
  1074. /* Set the timer to check for link beat. */
  1075. init_timer(&mdp->timer);
  1076. mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
  1077. setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
  1078. return ret;
  1079. out_free_irq:
  1080. free_irq(ndev->irq, ndev);
  1081. pm_runtime_put_sync(&mdp->pdev->dev);
  1082. return ret;
  1083. }
  1084. /* Timeout function */
  1085. static void sh_eth_tx_timeout(struct net_device *ndev)
  1086. {
  1087. struct sh_eth_private *mdp = netdev_priv(ndev);
  1088. u32 ioaddr = ndev->base_addr;
  1089. struct sh_eth_rxdesc *rxdesc;
  1090. int i;
  1091. netif_stop_queue(ndev);
  1092. if (netif_msg_timer(mdp))
  1093. dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
  1094. " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
  1095. /* tx_errors count up */
  1096. mdp->stats.tx_errors++;
  1097. /* timer off */
  1098. del_timer_sync(&mdp->timer);
  1099. /* Free all the skbuffs in the Rx queue. */
  1100. for (i = 0; i < RX_RING_SIZE; i++) {
  1101. rxdesc = &mdp->rx_ring[i];
  1102. rxdesc->status = 0;
  1103. rxdesc->addr = 0xBADF00D0;
  1104. if (mdp->rx_skbuff[i])
  1105. dev_kfree_skb(mdp->rx_skbuff[i]);
  1106. mdp->rx_skbuff[i] = NULL;
  1107. }
  1108. for (i = 0; i < TX_RING_SIZE; i++) {
  1109. if (mdp->tx_skbuff[i])
  1110. dev_kfree_skb(mdp->tx_skbuff[i]);
  1111. mdp->tx_skbuff[i] = NULL;
  1112. }
  1113. /* device init */
  1114. sh_eth_dev_init(ndev);
  1115. /* timer on */
  1116. mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
  1117. add_timer(&mdp->timer);
  1118. }
  1119. /* Packet transmit function */
  1120. static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  1121. {
  1122. struct sh_eth_private *mdp = netdev_priv(ndev);
  1123. struct sh_eth_txdesc *txdesc;
  1124. u32 entry;
  1125. unsigned long flags;
  1126. spin_lock_irqsave(&mdp->lock, flags);
  1127. if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
  1128. if (!sh_eth_txfree(ndev)) {
  1129. if (netif_msg_tx_queued(mdp))
  1130. dev_warn(&ndev->dev, "TxFD exhausted.\n");
  1131. netif_stop_queue(ndev);
  1132. spin_unlock_irqrestore(&mdp->lock, flags);
  1133. return NETDEV_TX_BUSY;
  1134. }
  1135. }
  1136. spin_unlock_irqrestore(&mdp->lock, flags);
  1137. entry = mdp->cur_tx % TX_RING_SIZE;
  1138. mdp->tx_skbuff[entry] = skb;
  1139. txdesc = &mdp->tx_ring[entry];
  1140. txdesc->addr = virt_to_phys(skb->data);
  1141. /* soft swap. */
  1142. if (!mdp->cd->hw_swap)
  1143. sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
  1144. skb->len + 2);
  1145. /* write back */
  1146. __flush_purge_region(skb->data, skb->len);
  1147. if (skb->len < ETHERSMALL)
  1148. txdesc->buffer_length = ETHERSMALL;
  1149. else
  1150. txdesc->buffer_length = skb->len;
  1151. if (entry >= TX_RING_SIZE - 1)
  1152. txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
  1153. else
  1154. txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
  1155. mdp->cur_tx++;
  1156. if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
  1157. writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
  1158. return NETDEV_TX_OK;
  1159. }
  1160. /* device close function */
  1161. static int sh_eth_close(struct net_device *ndev)
  1162. {
  1163. struct sh_eth_private *mdp = netdev_priv(ndev);
  1164. u32 ioaddr = ndev->base_addr;
  1165. int ringsize;
  1166. netif_stop_queue(ndev);
  1167. /* Disable interrupts by clearing the interrupt mask. */
  1168. writel(0x0000, ioaddr + EESIPR);
  1169. /* Stop the chip's Tx and Rx processes. */
  1170. writel(0, ioaddr + EDTRR);
  1171. writel(0, ioaddr + EDRRR);
  1172. /* PHY Disconnect */
  1173. if (mdp->phydev) {
  1174. phy_stop(mdp->phydev);
  1175. phy_disconnect(mdp->phydev);
  1176. }
  1177. free_irq(ndev->irq, ndev);
  1178. del_timer_sync(&mdp->timer);
  1179. /* Free all the skbuffs in the Rx queue. */
  1180. sh_eth_ring_free(ndev);
  1181. /* free DMA buffer */
  1182. ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
  1183. dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
  1184. /* free DMA buffer */
  1185. ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
  1186. dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
  1187. pm_runtime_put_sync(&mdp->pdev->dev);
  1188. return 0;
  1189. }
  1190. static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
  1191. {
  1192. struct sh_eth_private *mdp = netdev_priv(ndev);
  1193. u32 ioaddr = ndev->base_addr;
  1194. pm_runtime_get_sync(&mdp->pdev->dev);
  1195. mdp->stats.tx_dropped += readl(ioaddr + TROCR);
  1196. writel(0, ioaddr + TROCR); /* (write clear) */
  1197. mdp->stats.collisions += readl(ioaddr + CDCR);
  1198. writel(0, ioaddr + CDCR); /* (write clear) */
  1199. mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR);
  1200. writel(0, ioaddr + LCCR); /* (write clear) */
  1201. #if defined(CONFIG_CPU_SUBTYPE_SH7763)
  1202. mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */
  1203. writel(0, ioaddr + CERCR); /* (write clear) */
  1204. mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */
  1205. writel(0, ioaddr + CEECR); /* (write clear) */
  1206. #else
  1207. mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR);
  1208. writel(0, ioaddr + CNDCR); /* (write clear) */
  1209. #endif
  1210. pm_runtime_put_sync(&mdp->pdev->dev);
  1211. return &mdp->stats;
  1212. }
  1213. /* ioctl to device funciotn*/
  1214. static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
  1215. int cmd)
  1216. {
  1217. struct sh_eth_private *mdp = netdev_priv(ndev);
  1218. struct phy_device *phydev = mdp->phydev;
  1219. if (!netif_running(ndev))
  1220. return -EINVAL;
  1221. if (!phydev)
  1222. return -ENODEV;
  1223. return phy_mii_ioctl(phydev, rq, cmd);
  1224. }
  1225. #if defined(SH_ETH_HAS_TSU)
  1226. /* Multicast reception directions set */
  1227. static void sh_eth_set_multicast_list(struct net_device *ndev)
  1228. {
  1229. u32 ioaddr = ndev->base_addr;
  1230. if (ndev->flags & IFF_PROMISC) {
  1231. /* Set promiscuous. */
  1232. writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
  1233. ioaddr + ECMR);
  1234. } else {
  1235. /* Normal, unicast/broadcast-only mode. */
  1236. writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
  1237. ioaddr + ECMR);
  1238. }
  1239. }
  1240. /* SuperH's TSU register init function */
  1241. static void sh_eth_tsu_init(u32 ioaddr)
  1242. {
  1243. writel(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
  1244. writel(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
  1245. writel(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
  1246. writel(0xc, ioaddr + TSU_BSYSL0);
  1247. writel(0xc, ioaddr + TSU_BSYSL1);
  1248. writel(0, ioaddr + TSU_PRISL0);
  1249. writel(0, ioaddr + TSU_PRISL1);
  1250. writel(0, ioaddr + TSU_FWSL0);
  1251. writel(0, ioaddr + TSU_FWSL1);
  1252. writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
  1253. #if defined(CONFIG_CPU_SUBTYPE_SH7763)
  1254. writel(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
  1255. writel(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
  1256. #else
  1257. writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
  1258. writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
  1259. #endif
  1260. writel(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
  1261. writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
  1262. writel(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
  1263. writel(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
  1264. writel(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
  1265. writel(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
  1266. writel(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
  1267. }
  1268. #endif /* SH_ETH_HAS_TSU */
  1269. /* MDIO bus release function */
  1270. static int sh_mdio_release(struct net_device *ndev)
  1271. {
  1272. struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
  1273. /* unregister mdio bus */
  1274. mdiobus_unregister(bus);
  1275. /* remove mdio bus info from net_device */
  1276. dev_set_drvdata(&ndev->dev, NULL);
  1277. /* free interrupts memory */
  1278. kfree(bus->irq);
  1279. /* free bitbang info */
  1280. free_mdio_bitbang(bus);
  1281. return 0;
  1282. }
  1283. /* MDIO bus init function */
  1284. static int sh_mdio_init(struct net_device *ndev, int id)
  1285. {
  1286. int ret, i;
  1287. struct bb_info *bitbang;
  1288. struct sh_eth_private *mdp = netdev_priv(ndev);
  1289. /* create bit control struct for PHY */
  1290. bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
  1291. if (!bitbang) {
  1292. ret = -ENOMEM;
  1293. goto out;
  1294. }
  1295. /* bitbang init */
  1296. bitbang->addr = ndev->base_addr + PIR;
  1297. bitbang->mdi_msk = 0x08;
  1298. bitbang->mdo_msk = 0x04;
  1299. bitbang->mmd_msk = 0x02;/* MMD */
  1300. bitbang->mdc_msk = 0x01;
  1301. bitbang->ctrl.ops = &bb_ops;
  1302. /* MII controller setting */
  1303. mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
  1304. if (!mdp->mii_bus) {
  1305. ret = -ENOMEM;
  1306. goto out_free_bitbang;
  1307. }
  1308. /* Hook up MII support for ethtool */
  1309. mdp->mii_bus->name = "sh_mii";
  1310. mdp->mii_bus->parent = &ndev->dev;
  1311. snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
  1312. /* PHY IRQ */
  1313. mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
  1314. if (!mdp->mii_bus->irq) {
  1315. ret = -ENOMEM;
  1316. goto out_free_bus;
  1317. }
  1318. for (i = 0; i < PHY_MAX_ADDR; i++)
  1319. mdp->mii_bus->irq[i] = PHY_POLL;
  1320. /* regist mdio bus */
  1321. ret = mdiobus_register(mdp->mii_bus);
  1322. if (ret)
  1323. goto out_free_irq;
  1324. dev_set_drvdata(&ndev->dev, mdp->mii_bus);
  1325. return 0;
  1326. out_free_irq:
  1327. kfree(mdp->mii_bus->irq);
  1328. out_free_bus:
  1329. free_mdio_bitbang(mdp->mii_bus);
  1330. out_free_bitbang:
  1331. kfree(bitbang);
  1332. out:
  1333. return ret;
  1334. }
  1335. static const struct net_device_ops sh_eth_netdev_ops = {
  1336. .ndo_open = sh_eth_open,
  1337. .ndo_stop = sh_eth_close,
  1338. .ndo_start_xmit = sh_eth_start_xmit,
  1339. .ndo_get_stats = sh_eth_get_stats,
  1340. #if defined(SH_ETH_HAS_TSU)
  1341. .ndo_set_multicast_list = sh_eth_set_multicast_list,
  1342. #endif
  1343. .ndo_tx_timeout = sh_eth_tx_timeout,
  1344. .ndo_do_ioctl = sh_eth_do_ioctl,
  1345. .ndo_validate_addr = eth_validate_addr,
  1346. .ndo_set_mac_address = eth_mac_addr,
  1347. .ndo_change_mtu = eth_change_mtu,
  1348. };
  1349. static int sh_eth_drv_probe(struct platform_device *pdev)
  1350. {
  1351. int ret, devno = 0;
  1352. struct resource *res;
  1353. struct net_device *ndev = NULL;
  1354. struct sh_eth_private *mdp;
  1355. struct sh_eth_plat_data *pd;
  1356. /* get base addr */
  1357. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1358. if (unlikely(res == NULL)) {
  1359. dev_err(&pdev->dev, "invalid resource\n");
  1360. ret = -EINVAL;
  1361. goto out;
  1362. }
  1363. ndev = alloc_etherdev(sizeof(struct sh_eth_private));
  1364. if (!ndev) {
  1365. dev_err(&pdev->dev, "Could not allocate device.\n");
  1366. ret = -ENOMEM;
  1367. goto out;
  1368. }
  1369. /* The sh Ether-specific entries in the device structure. */
  1370. ndev->base_addr = res->start;
  1371. devno = pdev->id;
  1372. if (devno < 0)
  1373. devno = 0;
  1374. ndev->dma = -1;
  1375. ret = platform_get_irq(pdev, 0);
  1376. if (ret < 0) {
  1377. ret = -ENODEV;
  1378. goto out_release;
  1379. }
  1380. ndev->irq = ret;
  1381. SET_NETDEV_DEV(ndev, &pdev->dev);
  1382. /* Fill in the fields of the device structure with ethernet values. */
  1383. ether_setup(ndev);
  1384. mdp = netdev_priv(ndev);
  1385. spin_lock_init(&mdp->lock);
  1386. mdp->pdev = pdev;
  1387. pm_runtime_enable(&pdev->dev);
  1388. pm_runtime_resume(&pdev->dev);
  1389. pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
  1390. /* get PHY ID */
  1391. mdp->phy_id = pd->phy;
  1392. /* EDMAC endian */
  1393. mdp->edmac_endian = pd->edmac_endian;
  1394. mdp->no_ether_link = pd->no_ether_link;
  1395. mdp->ether_link_active_low = pd->ether_link_active_low;
  1396. /* set cpu data */
  1397. mdp->cd = &sh_eth_my_cpu_data;
  1398. sh_eth_set_default_cpu_data(mdp->cd);
  1399. /* set function */
  1400. ndev->netdev_ops = &sh_eth_netdev_ops;
  1401. SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
  1402. ndev->watchdog_timeo = TX_TIMEOUT;
  1403. /* debug message level */
  1404. mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
  1405. mdp->post_rx = POST_RX >> (devno << 1);
  1406. mdp->post_fw = POST_FW >> (devno << 1);
  1407. /* read and set MAC address */
  1408. read_mac_address(ndev, pd->mac_addr);
  1409. /* First device only init */
  1410. if (!devno) {
  1411. if (mdp->cd->chip_reset)
  1412. mdp->cd->chip_reset(ndev);
  1413. #if defined(SH_ETH_HAS_TSU)
  1414. /* TSU init (Init only)*/
  1415. sh_eth_tsu_init(SH_TSU_ADDR);
  1416. #endif
  1417. }
  1418. /* network device register */
  1419. ret = register_netdev(ndev);
  1420. if (ret)
  1421. goto out_release;
  1422. /* mdio bus init */
  1423. ret = sh_mdio_init(ndev, pdev->id);
  1424. if (ret)
  1425. goto out_unregister;
  1426. /* print device infomation */
  1427. pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
  1428. (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
  1429. platform_set_drvdata(pdev, ndev);
  1430. return ret;
  1431. out_unregister:
  1432. unregister_netdev(ndev);
  1433. out_release:
  1434. /* net_dev free */
  1435. if (ndev)
  1436. free_netdev(ndev);
  1437. out:
  1438. return ret;
  1439. }
  1440. static int sh_eth_drv_remove(struct platform_device *pdev)
  1441. {
  1442. struct net_device *ndev = platform_get_drvdata(pdev);
  1443. sh_mdio_release(ndev);
  1444. unregister_netdev(ndev);
  1445. pm_runtime_disable(&pdev->dev);
  1446. free_netdev(ndev);
  1447. platform_set_drvdata(pdev, NULL);
  1448. return 0;
  1449. }
  1450. static int sh_eth_runtime_nop(struct device *dev)
  1451. {
  1452. /*
  1453. * Runtime PM callback shared between ->runtime_suspend()
  1454. * and ->runtime_resume(). Simply returns success.
  1455. *
  1456. * This driver re-initializes all registers after
  1457. * pm_runtime_get_sync() anyway so there is no need
  1458. * to save and restore registers here.
  1459. */
  1460. return 0;
  1461. }
  1462. static struct dev_pm_ops sh_eth_dev_pm_ops = {
  1463. .runtime_suspend = sh_eth_runtime_nop,
  1464. .runtime_resume = sh_eth_runtime_nop,
  1465. };
  1466. static struct platform_driver sh_eth_driver = {
  1467. .probe = sh_eth_drv_probe,
  1468. .remove = sh_eth_drv_remove,
  1469. .driver = {
  1470. .name = CARDNAME,
  1471. .pm = &sh_eth_dev_pm_ops,
  1472. },
  1473. };
  1474. static int __init sh_eth_init(void)
  1475. {
  1476. return platform_driver_register(&sh_eth_driver);
  1477. }
  1478. static void __exit sh_eth_cleanup(void)
  1479. {
  1480. platform_driver_unregister(&sh_eth_driver);
  1481. }
  1482. module_init(sh_eth_init);
  1483. module_exit(sh_eth_cleanup);
  1484. MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
  1485. MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
  1486. MODULE_LICENSE("GPL v2");