ks8851_mll.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697
  1. /**
  2. * drivers/net/ks8851_mll.c
  3. * Copyright (c) 2009 Micrel Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. /**
  19. * Supports:
  20. * KS8851 16bit MLL chip from Micrel Inc.
  21. */
  22. #include <linux/module.h>
  23. #include <linux/kernel.h>
  24. #include <linux/netdevice.h>
  25. #include <linux/etherdevice.h>
  26. #include <linux/ethtool.h>
  27. #include <linux/cache.h>
  28. #include <linux/crc32.h>
  29. #include <linux/mii.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/delay.h>
  32. #define DRV_NAME "ks8851_mll"
  33. static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
  34. #define MAX_RECV_FRAMES 32
  35. #define MAX_BUF_SIZE 2048
  36. #define TX_BUF_SIZE 2000
  37. #define RX_BUF_SIZE 2000
  38. #define KS_CCR 0x08
  39. #define CCR_EEPROM (1 << 9)
  40. #define CCR_SPI (1 << 8)
  41. #define CCR_8BIT (1 << 7)
  42. #define CCR_16BIT (1 << 6)
  43. #define CCR_32BIT (1 << 5)
  44. #define CCR_SHARED (1 << 4)
  45. #define CCR_32PIN (1 << 0)
  46. /* MAC address registers */
  47. #define KS_MARL 0x10
  48. #define KS_MARM 0x12
  49. #define KS_MARH 0x14
  50. #define KS_OBCR 0x20
  51. #define OBCR_ODS_16MA (1 << 6)
  52. #define KS_EEPCR 0x22
  53. #define EEPCR_EESA (1 << 4)
  54. #define EEPCR_EESB (1 << 3)
  55. #define EEPCR_EEDO (1 << 2)
  56. #define EEPCR_EESCK (1 << 1)
  57. #define EEPCR_EECS (1 << 0)
  58. #define KS_MBIR 0x24
  59. #define MBIR_TXMBF (1 << 12)
  60. #define MBIR_TXMBFA (1 << 11)
  61. #define MBIR_RXMBF (1 << 4)
  62. #define MBIR_RXMBFA (1 << 3)
  63. #define KS_GRR 0x26
  64. #define GRR_QMU (1 << 1)
  65. #define GRR_GSR (1 << 0)
  66. #define KS_WFCR 0x2A
  67. #define WFCR_MPRXE (1 << 7)
  68. #define WFCR_WF3E (1 << 3)
  69. #define WFCR_WF2E (1 << 2)
  70. #define WFCR_WF1E (1 << 1)
  71. #define WFCR_WF0E (1 << 0)
  72. #define KS_WF0CRC0 0x30
  73. #define KS_WF0CRC1 0x32
  74. #define KS_WF0BM0 0x34
  75. #define KS_WF0BM1 0x36
  76. #define KS_WF0BM2 0x38
  77. #define KS_WF0BM3 0x3A
  78. #define KS_WF1CRC0 0x40
  79. #define KS_WF1CRC1 0x42
  80. #define KS_WF1BM0 0x44
  81. #define KS_WF1BM1 0x46
  82. #define KS_WF1BM2 0x48
  83. #define KS_WF1BM3 0x4A
  84. #define KS_WF2CRC0 0x50
  85. #define KS_WF2CRC1 0x52
  86. #define KS_WF2BM0 0x54
  87. #define KS_WF2BM1 0x56
  88. #define KS_WF2BM2 0x58
  89. #define KS_WF2BM3 0x5A
  90. #define KS_WF3CRC0 0x60
  91. #define KS_WF3CRC1 0x62
  92. #define KS_WF3BM0 0x64
  93. #define KS_WF3BM1 0x66
  94. #define KS_WF3BM2 0x68
  95. #define KS_WF3BM3 0x6A
  96. #define KS_TXCR 0x70
  97. #define TXCR_TCGICMP (1 << 8)
  98. #define TXCR_TCGUDP (1 << 7)
  99. #define TXCR_TCGTCP (1 << 6)
  100. #define TXCR_TCGIP (1 << 5)
  101. #define TXCR_FTXQ (1 << 4)
  102. #define TXCR_TXFCE (1 << 3)
  103. #define TXCR_TXPE (1 << 2)
  104. #define TXCR_TXCRC (1 << 1)
  105. #define TXCR_TXE (1 << 0)
  106. #define KS_TXSR 0x72
  107. #define TXSR_TXLC (1 << 13)
  108. #define TXSR_TXMC (1 << 12)
  109. #define TXSR_TXFID_MASK (0x3f << 0)
  110. #define TXSR_TXFID_SHIFT (0)
  111. #define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
  112. #define KS_RXCR1 0x74
  113. #define RXCR1_FRXQ (1 << 15)
  114. #define RXCR1_RXUDPFCC (1 << 14)
  115. #define RXCR1_RXTCPFCC (1 << 13)
  116. #define RXCR1_RXIPFCC (1 << 12)
  117. #define RXCR1_RXPAFMA (1 << 11)
  118. #define RXCR1_RXFCE (1 << 10)
  119. #define RXCR1_RXEFE (1 << 9)
  120. #define RXCR1_RXMAFMA (1 << 8)
  121. #define RXCR1_RXBE (1 << 7)
  122. #define RXCR1_RXME (1 << 6)
  123. #define RXCR1_RXUE (1 << 5)
  124. #define RXCR1_RXAE (1 << 4)
  125. #define RXCR1_RXINVF (1 << 1)
  126. #define RXCR1_RXE (1 << 0)
  127. #define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
  128. RXCR1_RXMAFMA | RXCR1_RXPAFMA)
  129. #define KS_RXCR2 0x76
  130. #define RXCR2_SRDBL_MASK (0x7 << 5)
  131. #define RXCR2_SRDBL_SHIFT (5)
  132. #define RXCR2_SRDBL_4B (0x0 << 5)
  133. #define RXCR2_SRDBL_8B (0x1 << 5)
  134. #define RXCR2_SRDBL_16B (0x2 << 5)
  135. #define RXCR2_SRDBL_32B (0x3 << 5)
  136. /* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
  137. #define RXCR2_IUFFP (1 << 4)
  138. #define RXCR2_RXIUFCEZ (1 << 3)
  139. #define RXCR2_UDPLFE (1 << 2)
  140. #define RXCR2_RXICMPFCC (1 << 1)
  141. #define RXCR2_RXSAF (1 << 0)
  142. #define KS_TXMIR 0x78
  143. #define KS_RXFHSR 0x7C
  144. #define RXFSHR_RXFV (1 << 15)
  145. #define RXFSHR_RXICMPFCS (1 << 13)
  146. #define RXFSHR_RXIPFCS (1 << 12)
  147. #define RXFSHR_RXTCPFCS (1 << 11)
  148. #define RXFSHR_RXUDPFCS (1 << 10)
  149. #define RXFSHR_RXBF (1 << 7)
  150. #define RXFSHR_RXMF (1 << 6)
  151. #define RXFSHR_RXUF (1 << 5)
  152. #define RXFSHR_RXMR (1 << 4)
  153. #define RXFSHR_RXFT (1 << 3)
  154. #define RXFSHR_RXFTL (1 << 2)
  155. #define RXFSHR_RXRF (1 << 1)
  156. #define RXFSHR_RXCE (1 << 0)
  157. #define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
  158. RXFSHR_RXFTL | RXFSHR_RXMR |\
  159. RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
  160. RXFSHR_RXTCPFCS)
  161. #define KS_RXFHBCR 0x7E
  162. #define RXFHBCR_CNT_MASK 0x0FFF
  163. #define KS_TXQCR 0x80
  164. #define TXQCR_AETFE (1 << 2)
  165. #define TXQCR_TXQMAM (1 << 1)
  166. #define TXQCR_METFE (1 << 0)
  167. #define KS_RXQCR 0x82
  168. #define RXQCR_RXDTTS (1 << 12)
  169. #define RXQCR_RXDBCTS (1 << 11)
  170. #define RXQCR_RXFCTS (1 << 10)
  171. #define RXQCR_RXIPHTOE (1 << 9)
  172. #define RXQCR_RXDTTE (1 << 7)
  173. #define RXQCR_RXDBCTE (1 << 6)
  174. #define RXQCR_RXFCTE (1 << 5)
  175. #define RXQCR_ADRFE (1 << 4)
  176. #define RXQCR_SDA (1 << 3)
  177. #define RXQCR_RRXEF (1 << 0)
  178. #define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
  179. #define KS_TXFDPR 0x84
  180. #define TXFDPR_TXFPAI (1 << 14)
  181. #define TXFDPR_TXFP_MASK (0x7ff << 0)
  182. #define TXFDPR_TXFP_SHIFT (0)
  183. #define KS_RXFDPR 0x86
  184. #define RXFDPR_RXFPAI (1 << 14)
  185. #define KS_RXDTTR 0x8C
  186. #define KS_RXDBCTR 0x8E
  187. #define KS_IER 0x90
  188. #define KS_ISR 0x92
  189. #define IRQ_LCI (1 << 15)
  190. #define IRQ_TXI (1 << 14)
  191. #define IRQ_RXI (1 << 13)
  192. #define IRQ_RXOI (1 << 11)
  193. #define IRQ_TXPSI (1 << 9)
  194. #define IRQ_RXPSI (1 << 8)
  195. #define IRQ_TXSAI (1 << 6)
  196. #define IRQ_RXWFDI (1 << 5)
  197. #define IRQ_RXMPDI (1 << 4)
  198. #define IRQ_LDI (1 << 3)
  199. #define IRQ_EDI (1 << 2)
  200. #define IRQ_SPIBEI (1 << 1)
  201. #define IRQ_DEDI (1 << 0)
  202. #define KS_RXFCTR 0x9C
  203. #define RXFCTR_THRESHOLD_MASK 0x00FF
  204. #define KS_RXFC 0x9D
  205. #define RXFCTR_RXFC_MASK (0xff << 8)
  206. #define RXFCTR_RXFC_SHIFT (8)
  207. #define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
  208. #define RXFCTR_RXFCT_MASK (0xff << 0)
  209. #define RXFCTR_RXFCT_SHIFT (0)
  210. #define KS_TXNTFSR 0x9E
  211. #define KS_MAHTR0 0xA0
  212. #define KS_MAHTR1 0xA2
  213. #define KS_MAHTR2 0xA4
  214. #define KS_MAHTR3 0xA6
  215. #define KS_FCLWR 0xB0
  216. #define KS_FCHWR 0xB2
  217. #define KS_FCOWR 0xB4
  218. #define KS_CIDER 0xC0
  219. #define CIDER_ID 0x8870
  220. #define CIDER_REV_MASK (0x7 << 1)
  221. #define CIDER_REV_SHIFT (1)
  222. #define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
  223. #define KS_CGCR 0xC6
  224. #define KS_IACR 0xC8
  225. #define IACR_RDEN (1 << 12)
  226. #define IACR_TSEL_MASK (0x3 << 10)
  227. #define IACR_TSEL_SHIFT (10)
  228. #define IACR_TSEL_MIB (0x3 << 10)
  229. #define IACR_ADDR_MASK (0x1f << 0)
  230. #define IACR_ADDR_SHIFT (0)
  231. #define KS_IADLR 0xD0
  232. #define KS_IAHDR 0xD2
  233. #define KS_PMECR 0xD4
  234. #define PMECR_PME_DELAY (1 << 14)
  235. #define PMECR_PME_POL (1 << 12)
  236. #define PMECR_WOL_WAKEUP (1 << 11)
  237. #define PMECR_WOL_MAGICPKT (1 << 10)
  238. #define PMECR_WOL_LINKUP (1 << 9)
  239. #define PMECR_WOL_ENERGY (1 << 8)
  240. #define PMECR_AUTO_WAKE_EN (1 << 7)
  241. #define PMECR_WAKEUP_NORMAL (1 << 6)
  242. #define PMECR_WKEVT_MASK (0xf << 2)
  243. #define PMECR_WKEVT_SHIFT (2)
  244. #define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
  245. #define PMECR_WKEVT_ENERGY (0x1 << 2)
  246. #define PMECR_WKEVT_LINK (0x2 << 2)
  247. #define PMECR_WKEVT_MAGICPKT (0x4 << 2)
  248. #define PMECR_WKEVT_FRAME (0x8 << 2)
  249. #define PMECR_PM_MASK (0x3 << 0)
  250. #define PMECR_PM_SHIFT (0)
  251. #define PMECR_PM_NORMAL (0x0 << 0)
  252. #define PMECR_PM_ENERGY (0x1 << 0)
  253. #define PMECR_PM_SOFTDOWN (0x2 << 0)
  254. #define PMECR_PM_POWERSAVE (0x3 << 0)
  255. /* Standard MII PHY data */
  256. #define KS_P1MBCR 0xE4
  257. #define P1MBCR_FORCE_FDX (1 << 8)
  258. #define KS_P1MBSR 0xE6
  259. #define P1MBSR_AN_COMPLETE (1 << 5)
  260. #define P1MBSR_AN_CAPABLE (1 << 3)
  261. #define P1MBSR_LINK_UP (1 << 2)
  262. #define KS_PHY1ILR 0xE8
  263. #define KS_PHY1IHR 0xEA
  264. #define KS_P1ANAR 0xEC
  265. #define KS_P1ANLPR 0xEE
  266. #define KS_P1SCLMD 0xF4
  267. #define P1SCLMD_LEDOFF (1 << 15)
  268. #define P1SCLMD_TXIDS (1 << 14)
  269. #define P1SCLMD_RESTARTAN (1 << 13)
  270. #define P1SCLMD_DISAUTOMDIX (1 << 10)
  271. #define P1SCLMD_FORCEMDIX (1 << 9)
  272. #define P1SCLMD_AUTONEGEN (1 << 7)
  273. #define P1SCLMD_FORCE100 (1 << 6)
  274. #define P1SCLMD_FORCEFDX (1 << 5)
  275. #define P1SCLMD_ADV_FLOW (1 << 4)
  276. #define P1SCLMD_ADV_100BT_FDX (1 << 3)
  277. #define P1SCLMD_ADV_100BT_HDX (1 << 2)
  278. #define P1SCLMD_ADV_10BT_FDX (1 << 1)
  279. #define P1SCLMD_ADV_10BT_HDX (1 << 0)
  280. #define KS_P1CR 0xF6
  281. #define P1CR_HP_MDIX (1 << 15)
  282. #define P1CR_REV_POL (1 << 13)
  283. #define P1CR_OP_100M (1 << 10)
  284. #define P1CR_OP_FDX (1 << 9)
  285. #define P1CR_OP_MDI (1 << 7)
  286. #define P1CR_AN_DONE (1 << 6)
  287. #define P1CR_LINK_GOOD (1 << 5)
  288. #define P1CR_PNTR_FLOW (1 << 4)
  289. #define P1CR_PNTR_100BT_FDX (1 << 3)
  290. #define P1CR_PNTR_100BT_HDX (1 << 2)
  291. #define P1CR_PNTR_10BT_FDX (1 << 1)
  292. #define P1CR_PNTR_10BT_HDX (1 << 0)
  293. /* TX Frame control */
  294. #define TXFR_TXIC (1 << 15)
  295. #define TXFR_TXFID_MASK (0x3f << 0)
  296. #define TXFR_TXFID_SHIFT (0)
  297. #define KS_P1SR 0xF8
  298. #define P1SR_HP_MDIX (1 << 15)
  299. #define P1SR_REV_POL (1 << 13)
  300. #define P1SR_OP_100M (1 << 10)
  301. #define P1SR_OP_FDX (1 << 9)
  302. #define P1SR_OP_MDI (1 << 7)
  303. #define P1SR_AN_DONE (1 << 6)
  304. #define P1SR_LINK_GOOD (1 << 5)
  305. #define P1SR_PNTR_FLOW (1 << 4)
  306. #define P1SR_PNTR_100BT_FDX (1 << 3)
  307. #define P1SR_PNTR_100BT_HDX (1 << 2)
  308. #define P1SR_PNTR_10BT_FDX (1 << 1)
  309. #define P1SR_PNTR_10BT_HDX (1 << 0)
  310. #define ENUM_BUS_NONE 0
  311. #define ENUM_BUS_8BIT 1
  312. #define ENUM_BUS_16BIT 2
  313. #define ENUM_BUS_32BIT 3
  314. #define MAX_MCAST_LST 32
  315. #define HW_MCAST_SIZE 8
  316. #define MAC_ADDR_LEN 6
  317. /**
  318. * union ks_tx_hdr - tx header data
  319. * @txb: The header as bytes
  320. * @txw: The header as 16bit, little-endian words
  321. *
  322. * A dual representation of the tx header data to allow
  323. * access to individual bytes, and to allow 16bit accesses
  324. * with 16bit alignment.
  325. */
  326. union ks_tx_hdr {
  327. u8 txb[4];
  328. __le16 txw[2];
  329. };
  330. /**
  331. * struct ks_net - KS8851 driver private data
  332. * @net_device : The network device we're bound to
  333. * @hw_addr : start address of data register.
  334. * @hw_addr_cmd : start address of command register.
  335. * @txh : temporaly buffer to save status/length.
  336. * @lock : Lock to ensure that the device is not accessed when busy.
  337. * @pdev : Pointer to platform device.
  338. * @mii : The MII state information for the mii calls.
  339. * @frame_head_info : frame header information for multi-pkt rx.
  340. * @statelock : Lock on this structure for tx list.
  341. * @msg_enable : The message flags controlling driver output (see ethtool).
  342. * @frame_cnt : number of frames received.
  343. * @bus_width : i/o bus width.
  344. * @irq : irq number assigned to this device.
  345. * @rc_rxqcr : Cached copy of KS_RXQCR.
  346. * @rc_txcr : Cached copy of KS_TXCR.
  347. * @rc_ier : Cached copy of KS_IER.
  348. * @sharedbus : Multipex(addr and data bus) mode indicator.
  349. * @cmd_reg_cache : command register cached.
  350. * @cmd_reg_cache_int : command register cached. Used in the irq handler.
  351. * @promiscuous : promiscuous mode indicator.
  352. * @all_mcast : mutlicast indicator.
  353. * @mcast_lst_size : size of multicast list.
  354. * @mcast_lst : multicast list.
  355. * @mcast_bits : multicast enabed.
  356. * @mac_addr : MAC address assigned to this device.
  357. * @fid : frame id.
  358. * @extra_byte : number of extra byte prepended rx pkt.
  359. * @enabled : indicator this device works.
  360. *
  361. * The @lock ensures that the chip is protected when certain operations are
  362. * in progress. When the read or write packet transfer is in progress, most
  363. * of the chip registers are not accessible until the transfer is finished and
  364. * the DMA has been de-asserted.
  365. *
  366. * The @statelock is used to protect information in the structure which may
  367. * need to be accessed via several sources, such as the network driver layer
  368. * or one of the work queues.
  369. *
  370. */
  371. /* Receive multiplex framer header info */
  372. struct type_frame_head {
  373. u16 sts; /* Frame status */
  374. u16 len; /* Byte count */
  375. };
  376. struct ks_net {
  377. struct net_device *netdev;
  378. void __iomem *hw_addr;
  379. void __iomem *hw_addr_cmd;
  380. union ks_tx_hdr txh ____cacheline_aligned;
  381. struct mutex lock; /* spinlock to be interrupt safe */
  382. struct platform_device *pdev;
  383. struct mii_if_info mii;
  384. struct type_frame_head *frame_head_info;
  385. spinlock_t statelock;
  386. u32 msg_enable;
  387. u32 frame_cnt;
  388. int bus_width;
  389. int irq;
  390. u16 rc_rxqcr;
  391. u16 rc_txcr;
  392. u16 rc_ier;
  393. u16 sharedbus;
  394. u16 cmd_reg_cache;
  395. u16 cmd_reg_cache_int;
  396. u16 promiscuous;
  397. u16 all_mcast;
  398. u16 mcast_lst_size;
  399. u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
  400. u8 mcast_bits[HW_MCAST_SIZE];
  401. u8 mac_addr[6];
  402. u8 fid;
  403. u8 extra_byte;
  404. u8 enabled;
  405. };
  406. static int msg_enable;
  407. #define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
  408. #define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
  409. #define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
  410. #define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
  411. #define BE3 0x8000 /* Byte Enable 3 */
  412. #define BE2 0x4000 /* Byte Enable 2 */
  413. #define BE1 0x2000 /* Byte Enable 1 */
  414. #define BE0 0x1000 /* Byte Enable 0 */
  415. /**
  416. * register read/write calls.
  417. *
  418. * All these calls issue transactions to access the chip's registers. They
  419. * all require that the necessary lock is held to prevent accesses when the
  420. * chip is busy transfering packet data (RX/TX FIFO accesses).
  421. */
  422. /**
  423. * ks_rdreg8 - read 8 bit register from device
  424. * @ks : The chip information
  425. * @offset: The register address
  426. *
  427. * Read a 8bit register from the chip, returning the result
  428. */
  429. static u8 ks_rdreg8(struct ks_net *ks, int offset)
  430. {
  431. u16 data;
  432. u8 shift_bit = offset & 0x03;
  433. u8 shift_data = (offset & 1) << 3;
  434. ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
  435. iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
  436. data = ioread16(ks->hw_addr);
  437. return (u8)(data >> shift_data);
  438. }
  439. /**
  440. * ks_rdreg16 - read 16 bit register from device
  441. * @ks : The chip information
  442. * @offset: The register address
  443. *
  444. * Read a 16bit register from the chip, returning the result
  445. */
  446. static u16 ks_rdreg16(struct ks_net *ks, int offset)
  447. {
  448. ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
  449. iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
  450. return ioread16(ks->hw_addr);
  451. }
  452. /**
  453. * ks_wrreg8 - write 8bit register value to chip
  454. * @ks: The chip information
  455. * @offset: The register address
  456. * @value: The value to write
  457. *
  458. */
  459. static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
  460. {
  461. u8 shift_bit = (offset & 0x03);
  462. u16 value_write = (u16)(value << ((offset & 1) << 3));
  463. ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
  464. iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
  465. iowrite16(value_write, ks->hw_addr);
  466. }
  467. /**
  468. * ks_wrreg16 - write 16bit register value to chip
  469. * @ks: The chip information
  470. * @offset: The register address
  471. * @value: The value to write
  472. *
  473. */
  474. static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
  475. {
  476. ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
  477. iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
  478. iowrite16(value, ks->hw_addr);
  479. }
  480. /**
  481. * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
  482. * @ks: The chip state
  483. * @wptr: buffer address to save data
  484. * @len: length in byte to read
  485. *
  486. */
  487. static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
  488. {
  489. len >>= 1;
  490. while (len--)
  491. *wptr++ = (u16)ioread16(ks->hw_addr);
  492. }
  493. /**
  494. * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
  495. * @ks: The chip information
  496. * @wptr: buffer address
  497. * @len: length in byte to write
  498. *
  499. */
  500. static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
  501. {
  502. len >>= 1;
  503. while (len--)
  504. iowrite16(*wptr++, ks->hw_addr);
  505. }
  506. /**
  507. * ks_tx_fifo_space - return the available hardware buffer size.
  508. * @ks: The chip information
  509. *
  510. */
  511. static inline u16 ks_tx_fifo_space(struct ks_net *ks)
  512. {
  513. return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
  514. }
  515. /**
  516. * ks_save_cmd_reg - save the command register from the cache.
  517. * @ks: The chip information
  518. *
  519. */
  520. static inline void ks_save_cmd_reg(struct ks_net *ks)
  521. {
  522. /*ks8851 MLL has a bug to read back the command register.
  523. * So rely on software to save the content of command register.
  524. */
  525. ks->cmd_reg_cache_int = ks->cmd_reg_cache;
  526. }
  527. /**
  528. * ks_restore_cmd_reg - restore the command register from the cache and
  529. * write to hardware register.
  530. * @ks: The chip information
  531. *
  532. */
  533. static inline void ks_restore_cmd_reg(struct ks_net *ks)
  534. {
  535. ks->cmd_reg_cache = ks->cmd_reg_cache_int;
  536. iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
  537. }
  538. /**
  539. * ks_set_powermode - set power mode of the device
  540. * @ks: The chip information
  541. * @pwrmode: The power mode value to write to KS_PMECR.
  542. *
  543. * Change the power mode of the chip.
  544. */
  545. static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
  546. {
  547. unsigned pmecr;
  548. if (netif_msg_hw(ks))
  549. ks_dbg(ks, "setting power mode %d\n", pwrmode);
  550. ks_rdreg16(ks, KS_GRR);
  551. pmecr = ks_rdreg16(ks, KS_PMECR);
  552. pmecr &= ~PMECR_PM_MASK;
  553. pmecr |= pwrmode;
  554. ks_wrreg16(ks, KS_PMECR, pmecr);
  555. }
  556. /**
  557. * ks_read_config - read chip configuration of bus width.
  558. * @ks: The chip information
  559. *
  560. */
  561. static void ks_read_config(struct ks_net *ks)
  562. {
  563. u16 reg_data = 0;
  564. /* Regardless of bus width, 8 bit read should always work.*/
  565. reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
  566. reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
  567. /* addr/data bus are multiplexed */
  568. ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
  569. /* There are garbage data when reading data from QMU,
  570. depending on bus-width.
  571. */
  572. if (reg_data & CCR_8BIT) {
  573. ks->bus_width = ENUM_BUS_8BIT;
  574. ks->extra_byte = 1;
  575. } else if (reg_data & CCR_16BIT) {
  576. ks->bus_width = ENUM_BUS_16BIT;
  577. ks->extra_byte = 2;
  578. } else {
  579. ks->bus_width = ENUM_BUS_32BIT;
  580. ks->extra_byte = 4;
  581. }
  582. }
  583. /**
  584. * ks_soft_reset - issue one of the soft reset to the device
  585. * @ks: The device state.
  586. * @op: The bit(s) to set in the GRR
  587. *
  588. * Issue the relevant soft-reset command to the device's GRR register
  589. * specified by @op.
  590. *
  591. * Note, the delays are in there as a caution to ensure that the reset
  592. * has time to take effect and then complete. Since the datasheet does
  593. * not currently specify the exact sequence, we have chosen something
  594. * that seems to work with our device.
  595. */
  596. static void ks_soft_reset(struct ks_net *ks, unsigned op)
  597. {
  598. /* Disable interrupt first */
  599. ks_wrreg16(ks, KS_IER, 0x0000);
  600. ks_wrreg16(ks, KS_GRR, op);
  601. mdelay(10); /* wait a short time to effect reset */
  602. ks_wrreg16(ks, KS_GRR, 0);
  603. mdelay(1); /* wait for condition to clear */
  604. }
  605. /**
  606. * ks_read_qmu - read 1 pkt data from the QMU.
  607. * @ks: The chip information
  608. * @buf: buffer address to save 1 pkt
  609. * @len: Pkt length
  610. * Here is the sequence to read 1 pkt:
  611. * 1. set sudo DMA mode
  612. * 2. read prepend data
  613. * 3. read pkt data
  614. * 4. reset sudo DMA Mode
  615. */
  616. static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
  617. {
  618. u32 r = ks->extra_byte & 0x1 ;
  619. u32 w = ks->extra_byte - r;
  620. /* 1. set sudo DMA mode */
  621. ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
  622. ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
  623. /* 2. read prepend data */
  624. /**
  625. * read 4 + extra bytes and discard them.
  626. * extra bytes for dummy, 2 for status, 2 for len
  627. */
  628. /* use likely(r) for 8 bit access for performance */
  629. if (unlikely(r))
  630. ioread8(ks->hw_addr);
  631. ks_inblk(ks, buf, w + 2 + 2);
  632. /* 3. read pkt data */
  633. ks_inblk(ks, buf, ALIGN(len, 4));
  634. /* 4. reset sudo DMA Mode */
  635. ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
  636. }
  637. /**
  638. * ks_rcv - read multiple pkts data from the QMU.
  639. * @ks: The chip information
  640. * @netdev: The network device being opened.
  641. *
  642. * Read all of header information before reading pkt content.
  643. * It is not allowed only port of pkts in QMU after issuing
  644. * interrupt ack.
  645. */
  646. static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
  647. {
  648. u32 i;
  649. struct type_frame_head *frame_hdr = ks->frame_head_info;
  650. struct sk_buff *skb;
  651. ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
  652. /* read all header information */
  653. for (i = 0; i < ks->frame_cnt; i++) {
  654. /* Checking Received packet status */
  655. frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
  656. /* Get packet len from hardware */
  657. frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
  658. frame_hdr++;
  659. }
  660. frame_hdr = ks->frame_head_info;
  661. while (ks->frame_cnt--) {
  662. skb = dev_alloc_skb(frame_hdr->len + 16);
  663. if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
  664. (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
  665. skb_reserve(skb, 2);
  666. /* read data block including CRC 4 bytes */
  667. ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4);
  668. skb_put(skb, frame_hdr->len);
  669. skb->dev = netdev;
  670. skb->protocol = eth_type_trans(skb, netdev);
  671. netif_rx(skb);
  672. } else {
  673. printk(KERN_ERR "%s: err:skb alloc\n", __func__);
  674. ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
  675. if (skb)
  676. dev_kfree_skb_irq(skb);
  677. }
  678. frame_hdr++;
  679. }
  680. }
  681. /**
  682. * ks_update_link_status - link status update.
  683. * @netdev: The network device being opened.
  684. * @ks: The chip information
  685. *
  686. */
  687. static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
  688. {
  689. /* check the status of the link */
  690. u32 link_up_status;
  691. if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
  692. netif_carrier_on(netdev);
  693. link_up_status = true;
  694. } else {
  695. netif_carrier_off(netdev);
  696. link_up_status = false;
  697. }
  698. if (netif_msg_link(ks))
  699. ks_dbg(ks, "%s: %s\n",
  700. __func__, link_up_status ? "UP" : "DOWN");
  701. }
  702. /**
  703. * ks_irq - device interrupt handler
  704. * @irq: Interrupt number passed from the IRQ hnalder.
  705. * @pw: The private word passed to register_irq(), our struct ks_net.
  706. *
  707. * This is the handler invoked to find out what happened
  708. *
  709. * Read the interrupt status, work out what needs to be done and then clear
  710. * any of the interrupts that are not needed.
  711. */
  712. static irqreturn_t ks_irq(int irq, void *pw)
  713. {
  714. struct ks_net *ks = pw;
  715. struct net_device *netdev = ks->netdev;
  716. u16 status;
  717. /*this should be the first in IRQ handler */
  718. ks_save_cmd_reg(ks);
  719. status = ks_rdreg16(ks, KS_ISR);
  720. if (unlikely(!status)) {
  721. ks_restore_cmd_reg(ks);
  722. return IRQ_NONE;
  723. }
  724. ks_wrreg16(ks, KS_ISR, status);
  725. if (likely(status & IRQ_RXI))
  726. ks_rcv(ks, netdev);
  727. if (unlikely(status & IRQ_LCI))
  728. ks_update_link_status(netdev, ks);
  729. if (unlikely(status & IRQ_TXI))
  730. netif_wake_queue(netdev);
  731. if (unlikely(status & IRQ_LDI)) {
  732. u16 pmecr = ks_rdreg16(ks, KS_PMECR);
  733. pmecr &= ~PMECR_WKEVT_MASK;
  734. ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
  735. }
  736. /* this should be the last in IRQ handler*/
  737. ks_restore_cmd_reg(ks);
  738. return IRQ_HANDLED;
  739. }
  740. /**
  741. * ks_net_open - open network device
  742. * @netdev: The network device being opened.
  743. *
  744. * Called when the network device is marked active, such as a user executing
  745. * 'ifconfig up' on the device.
  746. */
  747. static int ks_net_open(struct net_device *netdev)
  748. {
  749. struct ks_net *ks = netdev_priv(netdev);
  750. int err;
  751. #define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
  752. /* lock the card, even if we may not actually do anything
  753. * else at the moment.
  754. */
  755. if (netif_msg_ifup(ks))
  756. ks_dbg(ks, "%s - entry\n", __func__);
  757. /* reset the HW */
  758. err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks);
  759. if (err) {
  760. printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
  761. ks->irq, err);
  762. return err;
  763. }
  764. if (netif_msg_ifup(ks))
  765. ks_dbg(ks, "network device %s up\n", netdev->name);
  766. return 0;
  767. }
  768. /**
  769. * ks_net_stop - close network device
  770. * @netdev: The device being closed.
  771. *
  772. * Called to close down a network device which has been active. Cancell any
  773. * work, shutdown the RX and TX process and then place the chip into a low
  774. * power state whilst it is not being used.
  775. */
  776. static int ks_net_stop(struct net_device *netdev)
  777. {
  778. struct ks_net *ks = netdev_priv(netdev);
  779. if (netif_msg_ifdown(ks))
  780. ks_info(ks, "%s: shutting down\n", netdev->name);
  781. netif_stop_queue(netdev);
  782. kfree(ks->frame_head_info);
  783. mutex_lock(&ks->lock);
  784. /* turn off the IRQs and ack any outstanding */
  785. ks_wrreg16(ks, KS_IER, 0x0000);
  786. ks_wrreg16(ks, KS_ISR, 0xffff);
  787. /* shutdown RX process */
  788. ks_wrreg16(ks, KS_RXCR1, 0x0000);
  789. /* shutdown TX process */
  790. ks_wrreg16(ks, KS_TXCR, 0x0000);
  791. /* set powermode to soft power down to save power */
  792. ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
  793. free_irq(ks->irq, netdev);
  794. mutex_unlock(&ks->lock);
  795. return 0;
  796. }
  797. /**
  798. * ks_write_qmu - write 1 pkt data to the QMU.
  799. * @ks: The chip information
  800. * @pdata: buffer address to save 1 pkt
  801. * @len: Pkt length in byte
  802. * Here is the sequence to write 1 pkt:
  803. * 1. set sudo DMA mode
  804. * 2. write status/length
  805. * 3. write pkt data
  806. * 4. reset sudo DMA Mode
  807. * 5. reset sudo DMA mode
  808. * 6. Wait until pkt is out
  809. */
  810. static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
  811. {
  812. unsigned fid = ks->fid;
  813. fid = ks->fid;
  814. ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
  815. /* reduce the tx interrupt occurrances. */
  816. if (!fid)
  817. fid |= TXFR_TXIC; /* irq on completion */
  818. /* start header at txb[0] to align txw entries */
  819. ks->txh.txw[0] = cpu_to_le16(fid);
  820. ks->txh.txw[1] = cpu_to_le16(len);
  821. /* 1. set sudo-DMA mode */
  822. ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
  823. /* 2. write status/lenth info */
  824. ks_outblk(ks, ks->txh.txw, 4);
  825. /* 3. write pkt data */
  826. ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
  827. /* 4. reset sudo-DMA mode */
  828. ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
  829. /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
  830. ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
  831. /* 6. wait until TXQCR_METFE is auto-cleared */
  832. while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
  833. ;
  834. }
  835. static void ks_disable_int(struct ks_net *ks)
  836. {
  837. ks_wrreg16(ks, KS_IER, 0x0000);
  838. } /* ks_disable_int */
  839. static void ks_enable_int(struct ks_net *ks)
  840. {
  841. ks_wrreg16(ks, KS_IER, ks->rc_ier);
  842. } /* ks_enable_int */
  843. /**
  844. * ks_start_xmit - transmit packet
  845. * @skb : The buffer to transmit
  846. * @netdev : The device used to transmit the packet.
  847. *
  848. * Called by the network layer to transmit the @skb.
  849. * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
  850. * So while tx is in-progress, prevent IRQ interrupt from happenning.
  851. */
  852. static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
  853. {
  854. int retv = NETDEV_TX_OK;
  855. struct ks_net *ks = netdev_priv(netdev);
  856. disable_irq(netdev->irq);
  857. ks_disable_int(ks);
  858. spin_lock(&ks->statelock);
  859. /* Extra space are required:
  860. * 4 byte for alignment, 4 for status/length, 4 for CRC
  861. */
  862. if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
  863. ks_write_qmu(ks, skb->data, skb->len);
  864. dev_kfree_skb(skb);
  865. } else
  866. retv = NETDEV_TX_BUSY;
  867. spin_unlock(&ks->statelock);
  868. ks_enable_int(ks);
  869. enable_irq(netdev->irq);
  870. return retv;
  871. }
  872. /**
  873. * ks_start_rx - ready to serve pkts
  874. * @ks : The chip information
  875. *
  876. */
  877. static void ks_start_rx(struct ks_net *ks)
  878. {
  879. u16 cntl;
  880. /* Enables QMU Receive (RXCR1). */
  881. cntl = ks_rdreg16(ks, KS_RXCR1);
  882. cntl |= RXCR1_RXE ;
  883. ks_wrreg16(ks, KS_RXCR1, cntl);
  884. } /* ks_start_rx */
  885. /**
  886. * ks_stop_rx - stop to serve pkts
  887. * @ks : The chip information
  888. *
  889. */
  890. static void ks_stop_rx(struct ks_net *ks)
  891. {
  892. u16 cntl;
  893. /* Disables QMU Receive (RXCR1). */
  894. cntl = ks_rdreg16(ks, KS_RXCR1);
  895. cntl &= ~RXCR1_RXE ;
  896. ks_wrreg16(ks, KS_RXCR1, cntl);
  897. } /* ks_stop_rx */
  898. static unsigned long const ethernet_polynomial = 0x04c11db7U;
  899. static unsigned long ether_gen_crc(int length, u8 *data)
  900. {
  901. long crc = -1;
  902. while (--length >= 0) {
  903. u8 current_octet = *data++;
  904. int bit;
  905. for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
  906. crc = (crc << 1) ^
  907. ((crc < 0) ^ (current_octet & 1) ?
  908. ethernet_polynomial : 0);
  909. }
  910. }
  911. return (unsigned long)crc;
  912. } /* ether_gen_crc */
  913. /**
  914. * ks_set_grpaddr - set multicast information
  915. * @ks : The chip information
  916. */
  917. static void ks_set_grpaddr(struct ks_net *ks)
  918. {
  919. u8 i;
  920. u32 index, position, value;
  921. memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
  922. for (i = 0; i < ks->mcast_lst_size; i++) {
  923. position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
  924. index = position >> 3;
  925. value = 1 << (position & 7);
  926. ks->mcast_bits[index] |= (u8)value;
  927. }
  928. for (i = 0; i < HW_MCAST_SIZE; i++) {
  929. if (i & 1) {
  930. ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
  931. (ks->mcast_bits[i] << 8) |
  932. ks->mcast_bits[i - 1]);
  933. }
  934. }
  935. } /* ks_set_grpaddr */
  936. /*
  937. * ks_clear_mcast - clear multicast information
  938. *
  939. * @ks : The chip information
  940. * This routine removes all mcast addresses set in the hardware.
  941. */
  942. static void ks_clear_mcast(struct ks_net *ks)
  943. {
  944. u16 i, mcast_size;
  945. for (i = 0; i < HW_MCAST_SIZE; i++)
  946. ks->mcast_bits[i] = 0;
  947. mcast_size = HW_MCAST_SIZE >> 2;
  948. for (i = 0; i < mcast_size; i++)
  949. ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
  950. }
  951. static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
  952. {
  953. u16 cntl;
  954. ks->promiscuous = promiscuous_mode;
  955. ks_stop_rx(ks); /* Stop receiving for reconfiguration */
  956. cntl = ks_rdreg16(ks, KS_RXCR1);
  957. cntl &= ~RXCR1_FILTER_MASK;
  958. if (promiscuous_mode)
  959. /* Enable Promiscuous mode */
  960. cntl |= RXCR1_RXAE | RXCR1_RXINVF;
  961. else
  962. /* Disable Promiscuous mode (default normal mode) */
  963. cntl |= RXCR1_RXPAFMA;
  964. ks_wrreg16(ks, KS_RXCR1, cntl);
  965. if (ks->enabled)
  966. ks_start_rx(ks);
  967. } /* ks_set_promis */
  968. static void ks_set_mcast(struct ks_net *ks, u16 mcast)
  969. {
  970. u16 cntl;
  971. ks->all_mcast = mcast;
  972. ks_stop_rx(ks); /* Stop receiving for reconfiguration */
  973. cntl = ks_rdreg16(ks, KS_RXCR1);
  974. cntl &= ~RXCR1_FILTER_MASK;
  975. if (mcast)
  976. /* Enable "Perfect with Multicast address passed mode" */
  977. cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
  978. else
  979. /**
  980. * Disable "Perfect with Multicast address passed
  981. * mode" (normal mode).
  982. */
  983. cntl |= RXCR1_RXPAFMA;
  984. ks_wrreg16(ks, KS_RXCR1, cntl);
  985. if (ks->enabled)
  986. ks_start_rx(ks);
  987. } /* ks_set_mcast */
  988. static void ks_set_rx_mode(struct net_device *netdev)
  989. {
  990. struct ks_net *ks = netdev_priv(netdev);
  991. struct dev_mc_list *ptr;
  992. /* Turn on/off promiscuous mode. */
  993. if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
  994. ks_set_promis(ks,
  995. (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
  996. /* Turn on/off all mcast mode. */
  997. else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
  998. ks_set_mcast(ks,
  999. (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
  1000. else
  1001. ks_set_promis(ks, false);
  1002. if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
  1003. if (netdev->mc_count <= MAX_MCAST_LST) {
  1004. int i = 0;
  1005. for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
  1006. if (!(*ptr->dmi_addr & 1))
  1007. continue;
  1008. if (i >= MAX_MCAST_LST)
  1009. break;
  1010. memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
  1011. MAC_ADDR_LEN);
  1012. }
  1013. ks->mcast_lst_size = (u8)i;
  1014. ks_set_grpaddr(ks);
  1015. } else {
  1016. /**
  1017. * List too big to support so
  1018. * turn on all mcast mode.
  1019. */
  1020. ks->mcast_lst_size = MAX_MCAST_LST;
  1021. ks_set_mcast(ks, true);
  1022. }
  1023. } else {
  1024. ks->mcast_lst_size = 0;
  1025. ks_clear_mcast(ks);
  1026. }
  1027. } /* ks_set_rx_mode */
  1028. static void ks_set_mac(struct ks_net *ks, u8 *data)
  1029. {
  1030. u16 *pw = (u16 *)data;
  1031. u16 w, u;
  1032. ks_stop_rx(ks); /* Stop receiving for reconfiguration */
  1033. u = *pw++;
  1034. w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
  1035. ks_wrreg16(ks, KS_MARH, w);
  1036. u = *pw++;
  1037. w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
  1038. ks_wrreg16(ks, KS_MARM, w);
  1039. u = *pw;
  1040. w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
  1041. ks_wrreg16(ks, KS_MARL, w);
  1042. memcpy(ks->mac_addr, data, 6);
  1043. if (ks->enabled)
  1044. ks_start_rx(ks);
  1045. }
  1046. static int ks_set_mac_address(struct net_device *netdev, void *paddr)
  1047. {
  1048. struct ks_net *ks = netdev_priv(netdev);
  1049. struct sockaddr *addr = paddr;
  1050. u8 *da;
  1051. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  1052. da = (u8 *)netdev->dev_addr;
  1053. ks_set_mac(ks, da);
  1054. return 0;
  1055. }
  1056. static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
  1057. {
  1058. struct ks_net *ks = netdev_priv(netdev);
  1059. if (!netif_running(netdev))
  1060. return -EINVAL;
  1061. return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
  1062. }
  1063. static const struct net_device_ops ks_netdev_ops = {
  1064. .ndo_open = ks_net_open,
  1065. .ndo_stop = ks_net_stop,
  1066. .ndo_do_ioctl = ks_net_ioctl,
  1067. .ndo_start_xmit = ks_start_xmit,
  1068. .ndo_set_mac_address = ks_set_mac_address,
  1069. .ndo_set_rx_mode = ks_set_rx_mode,
  1070. .ndo_change_mtu = eth_change_mtu,
  1071. .ndo_validate_addr = eth_validate_addr,
  1072. };
  1073. /* ethtool support */
  1074. static void ks_get_drvinfo(struct net_device *netdev,
  1075. struct ethtool_drvinfo *di)
  1076. {
  1077. strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
  1078. strlcpy(di->version, "1.00", sizeof(di->version));
  1079. strlcpy(di->bus_info, dev_name(netdev->dev.parent),
  1080. sizeof(di->bus_info));
  1081. }
  1082. static u32 ks_get_msglevel(struct net_device *netdev)
  1083. {
  1084. struct ks_net *ks = netdev_priv(netdev);
  1085. return ks->msg_enable;
  1086. }
  1087. static void ks_set_msglevel(struct net_device *netdev, u32 to)
  1088. {
  1089. struct ks_net *ks = netdev_priv(netdev);
  1090. ks->msg_enable = to;
  1091. }
  1092. static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
  1093. {
  1094. struct ks_net *ks = netdev_priv(netdev);
  1095. return mii_ethtool_gset(&ks->mii, cmd);
  1096. }
  1097. static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
  1098. {
  1099. struct ks_net *ks = netdev_priv(netdev);
  1100. return mii_ethtool_sset(&ks->mii, cmd);
  1101. }
  1102. static u32 ks_get_link(struct net_device *netdev)
  1103. {
  1104. struct ks_net *ks = netdev_priv(netdev);
  1105. return mii_link_ok(&ks->mii);
  1106. }
  1107. static int ks_nway_reset(struct net_device *netdev)
  1108. {
  1109. struct ks_net *ks = netdev_priv(netdev);
  1110. return mii_nway_restart(&ks->mii);
  1111. }
  1112. static const struct ethtool_ops ks_ethtool_ops = {
  1113. .get_drvinfo = ks_get_drvinfo,
  1114. .get_msglevel = ks_get_msglevel,
  1115. .set_msglevel = ks_set_msglevel,
  1116. .get_settings = ks_get_settings,
  1117. .set_settings = ks_set_settings,
  1118. .get_link = ks_get_link,
  1119. .nway_reset = ks_nway_reset,
  1120. };
  1121. /* MII interface controls */
  1122. /**
  1123. * ks_phy_reg - convert MII register into a KS8851 register
  1124. * @reg: MII register number.
  1125. *
  1126. * Return the KS8851 register number for the corresponding MII PHY register
  1127. * if possible. Return zero if the MII register has no direct mapping to the
  1128. * KS8851 register set.
  1129. */
  1130. static int ks_phy_reg(int reg)
  1131. {
  1132. switch (reg) {
  1133. case MII_BMCR:
  1134. return KS_P1MBCR;
  1135. case MII_BMSR:
  1136. return KS_P1MBSR;
  1137. case MII_PHYSID1:
  1138. return KS_PHY1ILR;
  1139. case MII_PHYSID2:
  1140. return KS_PHY1IHR;
  1141. case MII_ADVERTISE:
  1142. return KS_P1ANAR;
  1143. case MII_LPA:
  1144. return KS_P1ANLPR;
  1145. }
  1146. return 0x0;
  1147. }
  1148. /**
  1149. * ks_phy_read - MII interface PHY register read.
  1150. * @netdev: The network device the PHY is on.
  1151. * @phy_addr: Address of PHY (ignored as we only have one)
  1152. * @reg: The register to read.
  1153. *
  1154. * This call reads data from the PHY register specified in @reg. Since the
  1155. * device does not support all the MII registers, the non-existant values
  1156. * are always returned as zero.
  1157. *
  1158. * We return zero for unsupported registers as the MII code does not check
  1159. * the value returned for any error status, and simply returns it to the
  1160. * caller. The mii-tool that the driver was tested with takes any -ve error
  1161. * as real PHY capabilities, thus displaying incorrect data to the user.
  1162. */
  1163. static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
  1164. {
  1165. struct ks_net *ks = netdev_priv(netdev);
  1166. int ksreg;
  1167. int result;
  1168. ksreg = ks_phy_reg(reg);
  1169. if (!ksreg)
  1170. return 0x0; /* no error return allowed, so use zero */
  1171. mutex_lock(&ks->lock);
  1172. result = ks_rdreg16(ks, ksreg);
  1173. mutex_unlock(&ks->lock);
  1174. return result;
  1175. }
  1176. static void ks_phy_write(struct net_device *netdev,
  1177. int phy, int reg, int value)
  1178. {
  1179. struct ks_net *ks = netdev_priv(netdev);
  1180. int ksreg;
  1181. ksreg = ks_phy_reg(reg);
  1182. if (ksreg) {
  1183. mutex_lock(&ks->lock);
  1184. ks_wrreg16(ks, ksreg, value);
  1185. mutex_unlock(&ks->lock);
  1186. }
  1187. }
  1188. /**
  1189. * ks_read_selftest - read the selftest memory info.
  1190. * @ks: The device state
  1191. *
  1192. * Read and check the TX/RX memory selftest information.
  1193. */
  1194. static int ks_read_selftest(struct ks_net *ks)
  1195. {
  1196. unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
  1197. int ret = 0;
  1198. unsigned rd;
  1199. rd = ks_rdreg16(ks, KS_MBIR);
  1200. if ((rd & both_done) != both_done) {
  1201. ks_warn(ks, "Memory selftest not finished\n");
  1202. return 0;
  1203. }
  1204. if (rd & MBIR_TXMBFA) {
  1205. ks_err(ks, "TX memory selftest fails\n");
  1206. ret |= 1;
  1207. }
  1208. if (rd & MBIR_RXMBFA) {
  1209. ks_err(ks, "RX memory selftest fails\n");
  1210. ret |= 2;
  1211. }
  1212. ks_info(ks, "the selftest passes\n");
  1213. return ret;
  1214. }
  1215. static void ks_disable(struct ks_net *ks)
  1216. {
  1217. u16 w;
  1218. w = ks_rdreg16(ks, KS_TXCR);
  1219. /* Disables QMU Transmit (TXCR). */
  1220. w &= ~TXCR_TXE;
  1221. ks_wrreg16(ks, KS_TXCR, w);
  1222. /* Disables QMU Receive (RXCR1). */
  1223. w = ks_rdreg16(ks, KS_RXCR1);
  1224. w &= ~RXCR1_RXE ;
  1225. ks_wrreg16(ks, KS_RXCR1, w);
  1226. ks->enabled = false;
  1227. } /* ks_disable */
  1228. static void ks_setup(struct ks_net *ks)
  1229. {
  1230. u16 w;
  1231. /**
  1232. * Configure QMU Transmit
  1233. */
  1234. /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
  1235. ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
  1236. /* Setup Receive Frame Data Pointer Auto-Increment */
  1237. ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
  1238. /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
  1239. ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
  1240. /* Setup RxQ Command Control (RXQCR) */
  1241. ks->rc_rxqcr = RXQCR_CMD_CNTL;
  1242. ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
  1243. /**
  1244. * set the force mode to half duplex, default is full duplex
  1245. * because if the auto-negotiation fails, most switch uses
  1246. * half-duplex.
  1247. */
  1248. w = ks_rdreg16(ks, KS_P1MBCR);
  1249. w &= ~P1MBCR_FORCE_FDX;
  1250. ks_wrreg16(ks, KS_P1MBCR, w);
  1251. w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
  1252. ks_wrreg16(ks, KS_TXCR, w);
  1253. w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE;
  1254. if (ks->promiscuous) /* bPromiscuous */
  1255. w |= (RXCR1_RXAE | RXCR1_RXINVF);
  1256. else if (ks->all_mcast) /* Multicast address passed mode */
  1257. w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
  1258. else /* Normal mode */
  1259. w |= RXCR1_RXPAFMA;
  1260. ks_wrreg16(ks, KS_RXCR1, w);
  1261. } /*ks_setup */
  1262. static void ks_setup_int(struct ks_net *ks)
  1263. {
  1264. ks->rc_ier = 0x00;
  1265. /* Clear the interrupts status of the hardware. */
  1266. ks_wrreg16(ks, KS_ISR, 0xffff);
  1267. /* Enables the interrupts of the hardware. */
  1268. ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
  1269. } /* ks_setup_int */
  1270. void ks_enable(struct ks_net *ks)
  1271. {
  1272. u16 w;
  1273. w = ks_rdreg16(ks, KS_TXCR);
  1274. /* Enables QMU Transmit (TXCR). */
  1275. ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
  1276. /*
  1277. * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
  1278. * Enable
  1279. */
  1280. w = ks_rdreg16(ks, KS_RXQCR);
  1281. ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
  1282. /* Enables QMU Receive (RXCR1). */
  1283. w = ks_rdreg16(ks, KS_RXCR1);
  1284. ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
  1285. ks->enabled = true;
  1286. } /* ks_enable */
  1287. static int ks_hw_init(struct ks_net *ks)
  1288. {
  1289. #define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
  1290. ks->promiscuous = 0;
  1291. ks->all_mcast = 0;
  1292. ks->mcast_lst_size = 0;
  1293. ks->frame_head_info = (struct type_frame_head *) \
  1294. kmalloc(MHEADER_SIZE, GFP_KERNEL);
  1295. if (!ks->frame_head_info) {
  1296. printk(KERN_ERR "Error: Fail to allocate frame memory\n");
  1297. return false;
  1298. }
  1299. ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
  1300. return true;
  1301. }
  1302. static int __devinit ks8851_probe(struct platform_device *pdev)
  1303. {
  1304. int err = -ENOMEM;
  1305. struct resource *io_d, *io_c;
  1306. struct net_device *netdev;
  1307. struct ks_net *ks;
  1308. u16 id, data;
  1309. io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1310. io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1311. if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
  1312. goto err_mem_region;
  1313. if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
  1314. goto err_mem_region1;
  1315. netdev = alloc_etherdev(sizeof(struct ks_net));
  1316. if (!netdev)
  1317. goto err_alloc_etherdev;
  1318. SET_NETDEV_DEV(netdev, &pdev->dev);
  1319. ks = netdev_priv(netdev);
  1320. ks->netdev = netdev;
  1321. ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
  1322. if (!ks->hw_addr)
  1323. goto err_ioremap;
  1324. ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
  1325. if (!ks->hw_addr_cmd)
  1326. goto err_ioremap1;
  1327. ks->irq = platform_get_irq(pdev, 0);
  1328. if (ks->irq < 0) {
  1329. err = ks->irq;
  1330. goto err_get_irq;
  1331. }
  1332. ks->pdev = pdev;
  1333. mutex_init(&ks->lock);
  1334. spin_lock_init(&ks->statelock);
  1335. netdev->netdev_ops = &ks_netdev_ops;
  1336. netdev->ethtool_ops = &ks_ethtool_ops;
  1337. /* setup mii state */
  1338. ks->mii.dev = netdev;
  1339. ks->mii.phy_id = 1,
  1340. ks->mii.phy_id_mask = 1;
  1341. ks->mii.reg_num_mask = 0xf;
  1342. ks->mii.mdio_read = ks_phy_read;
  1343. ks->mii.mdio_write = ks_phy_write;
  1344. ks_info(ks, "message enable is %d\n", msg_enable);
  1345. /* set the default message enable */
  1346. ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
  1347. NETIF_MSG_PROBE |
  1348. NETIF_MSG_LINK));
  1349. ks_read_config(ks);
  1350. /* simple check for a valid chip being connected to the bus */
  1351. if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
  1352. ks_err(ks, "failed to read device ID\n");
  1353. err = -ENODEV;
  1354. goto err_register;
  1355. }
  1356. if (ks_read_selftest(ks)) {
  1357. ks_err(ks, "failed to read device ID\n");
  1358. err = -ENODEV;
  1359. goto err_register;
  1360. }
  1361. err = register_netdev(netdev);
  1362. if (err)
  1363. goto err_register;
  1364. platform_set_drvdata(pdev, netdev);
  1365. ks_soft_reset(ks, GRR_GSR);
  1366. ks_hw_init(ks);
  1367. ks_disable(ks);
  1368. ks_setup(ks);
  1369. ks_setup_int(ks);
  1370. ks_enable_int(ks);
  1371. ks_enable(ks);
  1372. memcpy(netdev->dev_addr, ks->mac_addr, 6);
  1373. data = ks_rdreg16(ks, KS_OBCR);
  1374. ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
  1375. /**
  1376. * If you want to use the default MAC addr,
  1377. * comment out the 2 functions below.
  1378. */
  1379. random_ether_addr(netdev->dev_addr);
  1380. ks_set_mac(ks, netdev->dev_addr);
  1381. id = ks_rdreg16(ks, KS_CIDER);
  1382. printk(KERN_INFO DRV_NAME
  1383. " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
  1384. (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
  1385. return 0;
  1386. err_register:
  1387. err_get_irq:
  1388. iounmap(ks->hw_addr_cmd);
  1389. err_ioremap1:
  1390. iounmap(ks->hw_addr);
  1391. err_ioremap:
  1392. free_netdev(netdev);
  1393. err_alloc_etherdev:
  1394. release_mem_region(io_c->start, resource_size(io_c));
  1395. err_mem_region1:
  1396. release_mem_region(io_d->start, resource_size(io_d));
  1397. err_mem_region:
  1398. return err;
  1399. }
  1400. static int __devexit ks8851_remove(struct platform_device *pdev)
  1401. {
  1402. struct net_device *netdev = platform_get_drvdata(pdev);
  1403. struct ks_net *ks = netdev_priv(netdev);
  1404. struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1405. unregister_netdev(netdev);
  1406. iounmap(ks->hw_addr);
  1407. free_netdev(netdev);
  1408. release_mem_region(iomem->start, resource_size(iomem));
  1409. platform_set_drvdata(pdev, NULL);
  1410. return 0;
  1411. }
  1412. static struct platform_driver ks8851_platform_driver = {
  1413. .driver = {
  1414. .name = DRV_NAME,
  1415. .owner = THIS_MODULE,
  1416. },
  1417. .probe = ks8851_probe,
  1418. .remove = __devexit_p(ks8851_remove),
  1419. };
  1420. static int __init ks8851_init(void)
  1421. {
  1422. return platform_driver_register(&ks8851_platform_driver);
  1423. }
  1424. static void __exit ks8851_exit(void)
  1425. {
  1426. platform_driver_unregister(&ks8851_platform_driver);
  1427. }
  1428. module_init(ks8851_init);
  1429. module_exit(ks8851_exit);
  1430. MODULE_DESCRIPTION("KS8851 MLL Network driver");
  1431. MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
  1432. MODULE_LICENSE("GPL");
  1433. module_param_named(message, msg_enable, int, 0);
  1434. MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");