mvneta.c 75 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846
  1. /*
  2. * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Rami Rosen <rosenr@marvell.com>
  7. * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  8. *
  9. * This file is licensed under the terms of the GNU General Public
  10. * License version 2. This program is licensed "as is" without any
  11. * warranty of any kind, whether express or implied.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/inetdevice.h>
  19. #include <linux/mbus.h>
  20. #include <linux/module.h>
  21. #include <linux/interrupt.h>
  22. #include <net/ip.h>
  23. #include <net/ipv6.h>
  24. #include <linux/of.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/of_mdio.h>
  27. #include <linux/of_net.h>
  28. #include <linux/of_address.h>
  29. #include <linux/phy.h>
  30. #include <linux/clk.h>
  31. /* Registers */
  32. #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
  33. #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
  34. #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
  35. #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
  36. #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
  37. #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
  38. #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
  39. #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
  40. #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
  41. #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
  42. #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
  43. #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
  44. #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
  45. #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
  46. #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
  47. #define MVNETA_PORT_RX_RESET 0x1cc0
  48. #define MVNETA_PORT_RX_DMA_RESET BIT(0)
  49. #define MVNETA_PHY_ADDR 0x2000
  50. #define MVNETA_PHY_ADDR_MASK 0x1f
  51. #define MVNETA_MBUS_RETRY 0x2010
  52. #define MVNETA_UNIT_INTR_CAUSE 0x2080
  53. #define MVNETA_UNIT_CONTROL 0x20B0
  54. #define MVNETA_PHY_POLLING_ENABLE BIT(1)
  55. #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
  56. #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
  57. #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
  58. #define MVNETA_BASE_ADDR_ENABLE 0x2290
  59. #define MVNETA_PORT_CONFIG 0x2400
  60. #define MVNETA_UNI_PROMISC_MODE BIT(0)
  61. #define MVNETA_DEF_RXQ(q) ((q) << 1)
  62. #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
  63. #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
  64. #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
  65. #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
  66. #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
  67. #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
  68. #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
  69. MVNETA_DEF_RXQ_ARP(q) | \
  70. MVNETA_DEF_RXQ_TCP(q) | \
  71. MVNETA_DEF_RXQ_UDP(q) | \
  72. MVNETA_DEF_RXQ_BPDU(q) | \
  73. MVNETA_TX_UNSET_ERR_SUM | \
  74. MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
  75. #define MVNETA_PORT_CONFIG_EXTEND 0x2404
  76. #define MVNETA_MAC_ADDR_LOW 0x2414
  77. #define MVNETA_MAC_ADDR_HIGH 0x2418
  78. #define MVNETA_SDMA_CONFIG 0x241c
  79. #define MVNETA_SDMA_BRST_SIZE_16 4
  80. #define MVNETA_NO_DESC_SWAP 0x0
  81. #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
  82. #define MVNETA_RX_NO_DATA_SWAP BIT(4)
  83. #define MVNETA_TX_NO_DATA_SWAP BIT(5)
  84. #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
  85. #define MVNETA_PORT_STATUS 0x2444
  86. #define MVNETA_TX_IN_PRGRS BIT(1)
  87. #define MVNETA_TX_FIFO_EMPTY BIT(8)
  88. #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
  89. #define MVNETA_TYPE_PRIO 0x24bc
  90. #define MVNETA_FORCE_UNI BIT(21)
  91. #define MVNETA_TXQ_CMD_1 0x24e4
  92. #define MVNETA_TXQ_CMD 0x2448
  93. #define MVNETA_TXQ_DISABLE_SHIFT 8
  94. #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
  95. #define MVNETA_ACC_MODE 0x2500
  96. #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
  97. #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
  98. #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
  99. #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
  100. #define MVNETA_INTR_NEW_CAUSE 0x25a0
  101. #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
  102. #define MVNETA_INTR_NEW_MASK 0x25a4
  103. #define MVNETA_INTR_OLD_CAUSE 0x25a8
  104. #define MVNETA_INTR_OLD_MASK 0x25ac
  105. #define MVNETA_INTR_MISC_CAUSE 0x25b0
  106. #define MVNETA_INTR_MISC_MASK 0x25b4
  107. #define MVNETA_INTR_ENABLE 0x25b8
  108. #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
  109. #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
  110. #define MVNETA_RXQ_CMD 0x2680
  111. #define MVNETA_RXQ_DISABLE_SHIFT 8
  112. #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
  113. #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
  114. #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
  115. #define MVNETA_GMAC_CTRL_0 0x2c00
  116. #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
  117. #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
  118. #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
  119. #define MVNETA_GMAC_CTRL_2 0x2c08
  120. #define MVNETA_GMAC2_PSC_ENABLE BIT(3)
  121. #define MVNETA_GMAC2_PORT_RGMII BIT(4)
  122. #define MVNETA_GMAC2_PORT_RESET BIT(6)
  123. #define MVNETA_GMAC_STATUS 0x2c10
  124. #define MVNETA_GMAC_LINK_UP BIT(0)
  125. #define MVNETA_GMAC_SPEED_1000 BIT(1)
  126. #define MVNETA_GMAC_SPEED_100 BIT(2)
  127. #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
  128. #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
  129. #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
  130. #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
  131. #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
  132. #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
  133. #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
  134. #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
  135. #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
  136. #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
  137. #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
  138. #define MVNETA_MIB_COUNTERS_BASE 0x3080
  139. #define MVNETA_MIB_LATE_COLLISION 0x7c
  140. #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
  141. #define MVNETA_DA_FILT_OTH_MCAST 0x3500
  142. #define MVNETA_DA_FILT_UCAST_BASE 0x3600
  143. #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
  144. #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
  145. #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
  146. #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
  147. #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
  148. #define MVNETA_TXQ_DEC_SENT_SHIFT 16
  149. #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
  150. #define MVNETA_TXQ_SENT_DESC_SHIFT 16
  151. #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
  152. #define MVNETA_PORT_TX_RESET 0x3cf0
  153. #define MVNETA_PORT_TX_DMA_RESET BIT(0)
  154. #define MVNETA_TX_MTU 0x3e0c
  155. #define MVNETA_TX_TOKEN_SIZE 0x3e14
  156. #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
  157. #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
  158. #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
  159. #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
  160. /* Descriptor ring Macros */
  161. #define MVNETA_QUEUE_NEXT_DESC(q, index) \
  162. (((index) < (q)->last_desc) ? ((index) + 1) : 0)
  163. /* Various constants */
  164. /* Coalescing */
  165. #define MVNETA_TXDONE_COAL_PKTS 16
  166. #define MVNETA_RX_COAL_PKTS 32
  167. #define MVNETA_RX_COAL_USEC 100
  168. /* Timer */
  169. #define MVNETA_TX_DONE_TIMER_PERIOD 10
  170. /* Napi polling weight */
  171. #define MVNETA_RX_POLL_WEIGHT 64
  172. /* The two bytes Marvell header. Either contains a special value used
  173. * by Marvell switches when a specific hardware mode is enabled (not
  174. * supported by this driver) or is filled automatically by zeroes on
  175. * the RX side. Those two bytes being at the front of the Ethernet
  176. * header, they allow to have the IP header aligned on a 4 bytes
  177. * boundary automatically: the hardware skips those two bytes on its
  178. * own.
  179. */
  180. #define MVNETA_MH_SIZE 2
  181. #define MVNETA_VLAN_TAG_LEN 4
  182. #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
  183. #define MVNETA_TX_CSUM_MAX_SIZE 9800
  184. #define MVNETA_ACC_MODE_EXT 1
  185. /* Timeout constants */
  186. #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
  187. #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
  188. #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
  189. #define MVNETA_TX_MTU_MAX 0x3ffff
  190. /* Max number of Rx descriptors */
  191. #define MVNETA_MAX_RXD 128
  192. /* Max number of Tx descriptors */
  193. #define MVNETA_MAX_TXD 532
  194. /* descriptor aligned size */
  195. #define MVNETA_DESC_ALIGNED_SIZE 32
  196. #define MVNETA_RX_PKT_SIZE(mtu) \
  197. ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
  198. ETH_HLEN + ETH_FCS_LEN, \
  199. MVNETA_CPU_D_CACHE_LINE_SIZE)
  200. #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
  201. struct mvneta_stats {
  202. struct u64_stats_sync syncp;
  203. u64 packets;
  204. u64 bytes;
  205. };
  206. struct mvneta_port {
  207. int pkt_size;
  208. void __iomem *base;
  209. struct mvneta_rx_queue *rxqs;
  210. struct mvneta_tx_queue *txqs;
  211. struct timer_list tx_done_timer;
  212. struct net_device *dev;
  213. u32 cause_rx_tx;
  214. struct napi_struct napi;
  215. /* Flags */
  216. unsigned long flags;
  217. #define MVNETA_F_TX_DONE_TIMER_BIT 0
  218. /* Napi weight */
  219. int weight;
  220. /* Core clock */
  221. struct clk *clk;
  222. u8 mcast_count[256];
  223. u16 tx_ring_size;
  224. u16 rx_ring_size;
  225. struct mvneta_stats tx_stats;
  226. struct mvneta_stats rx_stats;
  227. struct mii_bus *mii_bus;
  228. struct phy_device *phy_dev;
  229. phy_interface_t phy_interface;
  230. struct device_node *phy_node;
  231. unsigned int link;
  232. unsigned int duplex;
  233. unsigned int speed;
  234. };
  235. /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
  236. * layout of the transmit and reception DMA descriptors, and their
  237. * layout is therefore defined by the hardware design
  238. */
  239. struct mvneta_tx_desc {
  240. u32 command; /* Options used by HW for packet transmitting.*/
  241. #define MVNETA_TX_L3_OFF_SHIFT 0
  242. #define MVNETA_TX_IP_HLEN_SHIFT 8
  243. #define MVNETA_TX_L4_UDP BIT(16)
  244. #define MVNETA_TX_L3_IP6 BIT(17)
  245. #define MVNETA_TXD_IP_CSUM BIT(18)
  246. #define MVNETA_TXD_Z_PAD BIT(19)
  247. #define MVNETA_TXD_L_DESC BIT(20)
  248. #define MVNETA_TXD_F_DESC BIT(21)
  249. #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
  250. MVNETA_TXD_L_DESC | \
  251. MVNETA_TXD_F_DESC)
  252. #define MVNETA_TX_L4_CSUM_FULL BIT(30)
  253. #define MVNETA_TX_L4_CSUM_NOT BIT(31)
  254. u16 reserverd1; /* csum_l4 (for future use) */
  255. u16 data_size; /* Data size of transmitted packet in bytes */
  256. u32 buf_phys_addr; /* Physical addr of transmitted buffer */
  257. u32 reserved2; /* hw_cmd - (for future use, PMT) */
  258. u32 reserved3[4]; /* Reserved - (for future use) */
  259. };
  260. struct mvneta_rx_desc {
  261. u32 status; /* Info about received packet */
  262. #define MVNETA_RXD_ERR_CRC 0x0
  263. #define MVNETA_RXD_ERR_SUMMARY BIT(16)
  264. #define MVNETA_RXD_ERR_OVERRUN BIT(17)
  265. #define MVNETA_RXD_ERR_LEN BIT(18)
  266. #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
  267. #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
  268. #define MVNETA_RXD_L3_IP4 BIT(25)
  269. #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
  270. #define MVNETA_RXD_L4_CSUM_OK BIT(30)
  271. u16 reserved1; /* pnc_info - (for future use, PnC) */
  272. u16 data_size; /* Size of received packet in bytes */
  273. u32 buf_phys_addr; /* Physical address of the buffer */
  274. u32 reserved2; /* pnc_flow_id (for future use, PnC) */
  275. u32 buf_cookie; /* cookie for access to RX buffer in rx path */
  276. u16 reserved3; /* prefetch_cmd, for future use */
  277. u16 reserved4; /* csum_l4 - (for future use, PnC) */
  278. u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
  279. u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
  280. };
  281. struct mvneta_tx_queue {
  282. /* Number of this TX queue, in the range 0-7 */
  283. u8 id;
  284. /* Number of TX DMA descriptors in the descriptor ring */
  285. int size;
  286. /* Number of currently used TX DMA descriptor in the
  287. * descriptor ring
  288. */
  289. int count;
  290. /* Array of transmitted skb */
  291. struct sk_buff **tx_skb;
  292. /* Index of last TX DMA descriptor that was inserted */
  293. int txq_put_index;
  294. /* Index of the TX DMA descriptor to be cleaned up */
  295. int txq_get_index;
  296. u32 done_pkts_coal;
  297. /* Virtual address of the TX DMA descriptors array */
  298. struct mvneta_tx_desc *descs;
  299. /* DMA address of the TX DMA descriptors array */
  300. dma_addr_t descs_phys;
  301. /* Index of the last TX DMA descriptor */
  302. int last_desc;
  303. /* Index of the next TX DMA descriptor to process */
  304. int next_desc_to_proc;
  305. };
  306. struct mvneta_rx_queue {
  307. /* rx queue number, in the range 0-7 */
  308. u8 id;
  309. /* num of rx descriptors in the rx descriptor ring */
  310. int size;
  311. /* counter of times when mvneta_refill() failed */
  312. int missed;
  313. u32 pkts_coal;
  314. u32 time_coal;
  315. /* Virtual address of the RX DMA descriptors array */
  316. struct mvneta_rx_desc *descs;
  317. /* DMA address of the RX DMA descriptors array */
  318. dma_addr_t descs_phys;
  319. /* Index of the last RX DMA descriptor */
  320. int last_desc;
  321. /* Index of the next RX DMA descriptor to process */
  322. int next_desc_to_proc;
  323. };
  324. static int rxq_number = 8;
  325. static int txq_number = 8;
  326. static int rxq_def;
  327. static int txq_def;
  328. #define MVNETA_DRIVER_NAME "mvneta"
  329. #define MVNETA_DRIVER_VERSION "1.0"
  330. /* Utility/helper methods */
  331. /* Write helper method */
  332. static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
  333. {
  334. writel(data, pp->base + offset);
  335. }
  336. /* Read helper method */
  337. static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
  338. {
  339. return readl(pp->base + offset);
  340. }
  341. /* Increment txq get counter */
  342. static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
  343. {
  344. txq->txq_get_index++;
  345. if (txq->txq_get_index == txq->size)
  346. txq->txq_get_index = 0;
  347. }
  348. /* Increment txq put counter */
  349. static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
  350. {
  351. txq->txq_put_index++;
  352. if (txq->txq_put_index == txq->size)
  353. txq->txq_put_index = 0;
  354. }
  355. /* Clear all MIB counters */
  356. static void mvneta_mib_counters_clear(struct mvneta_port *pp)
  357. {
  358. int i;
  359. u32 dummy;
  360. /* Perform dummy reads from MIB counters */
  361. for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
  362. dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
  363. }
  364. /* Get System Network Statistics */
  365. struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
  366. struct rtnl_link_stats64 *stats)
  367. {
  368. struct mvneta_port *pp = netdev_priv(dev);
  369. unsigned int start;
  370. memset(stats, 0, sizeof(struct rtnl_link_stats64));
  371. do {
  372. start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
  373. stats->rx_packets = pp->rx_stats.packets;
  374. stats->rx_bytes = pp->rx_stats.bytes;
  375. } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
  376. do {
  377. start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
  378. stats->tx_packets = pp->tx_stats.packets;
  379. stats->tx_bytes = pp->tx_stats.bytes;
  380. } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
  381. stats->rx_errors = dev->stats.rx_errors;
  382. stats->rx_dropped = dev->stats.rx_dropped;
  383. stats->tx_dropped = dev->stats.tx_dropped;
  384. return stats;
  385. }
  386. /* Rx descriptors helper methods */
  387. /* Checks whether the given RX descriptor is both the first and the
  388. * last descriptor for the RX packet. Each RX packet is currently
  389. * received through a single RX descriptor, so not having each RX
  390. * descriptor with its first and last bits set is an error
  391. */
  392. static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
  393. {
  394. return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
  395. MVNETA_RXD_FIRST_LAST_DESC;
  396. }
  397. /* Add number of descriptors ready to receive new packets */
  398. static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
  399. struct mvneta_rx_queue *rxq,
  400. int ndescs)
  401. {
  402. /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
  403. * be added at once
  404. */
  405. while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
  406. mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
  407. (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
  408. MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
  409. ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
  410. }
  411. mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
  412. (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
  413. }
  414. /* Get number of RX descriptors occupied by received packets */
  415. static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
  416. struct mvneta_rx_queue *rxq)
  417. {
  418. u32 val;
  419. val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
  420. return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
  421. }
  422. /* Update num of rx desc called upon return from rx path or
  423. * from mvneta_rxq_drop_pkts().
  424. */
  425. static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
  426. struct mvneta_rx_queue *rxq,
  427. int rx_done, int rx_filled)
  428. {
  429. u32 val;
  430. if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
  431. val = rx_done |
  432. (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
  433. mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
  434. return;
  435. }
  436. /* Only 255 descriptors can be added at once */
  437. while ((rx_done > 0) || (rx_filled > 0)) {
  438. if (rx_done <= 0xff) {
  439. val = rx_done;
  440. rx_done = 0;
  441. } else {
  442. val = 0xff;
  443. rx_done -= 0xff;
  444. }
  445. if (rx_filled <= 0xff) {
  446. val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
  447. rx_filled = 0;
  448. } else {
  449. val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
  450. rx_filled -= 0xff;
  451. }
  452. mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
  453. }
  454. }
  455. /* Get pointer to next RX descriptor to be processed by SW */
  456. static struct mvneta_rx_desc *
  457. mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
  458. {
  459. int rx_desc = rxq->next_desc_to_proc;
  460. rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
  461. return rxq->descs + rx_desc;
  462. }
  463. /* Change maximum receive size of the port. */
  464. static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
  465. {
  466. u32 val;
  467. val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
  468. val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
  469. val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
  470. MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
  471. mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
  472. }
  473. /* Set rx queue offset */
  474. static void mvneta_rxq_offset_set(struct mvneta_port *pp,
  475. struct mvneta_rx_queue *rxq,
  476. int offset)
  477. {
  478. u32 val;
  479. val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
  480. val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
  481. /* Offset is in */
  482. val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
  483. mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
  484. }
  485. /* Tx descriptors helper methods */
  486. /* Update HW with number of TX descriptors to be sent */
  487. static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
  488. struct mvneta_tx_queue *txq,
  489. int pend_desc)
  490. {
  491. u32 val;
  492. /* Only 255 descriptors can be added at once ; Assume caller
  493. * process TX desriptors in quanta less than 256
  494. */
  495. val = pend_desc;
  496. mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
  497. }
  498. /* Get pointer to next TX descriptor to be processed (send) by HW */
  499. static struct mvneta_tx_desc *
  500. mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
  501. {
  502. int tx_desc = txq->next_desc_to_proc;
  503. txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
  504. return txq->descs + tx_desc;
  505. }
  506. /* Release the last allocated TX descriptor. Useful to handle DMA
  507. * mapping failures in the TX path.
  508. */
  509. static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
  510. {
  511. if (txq->next_desc_to_proc == 0)
  512. txq->next_desc_to_proc = txq->last_desc - 1;
  513. else
  514. txq->next_desc_to_proc--;
  515. }
  516. /* Set rxq buf size */
  517. static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
  518. struct mvneta_rx_queue *rxq,
  519. int buf_size)
  520. {
  521. u32 val;
  522. val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
  523. val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
  524. val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
  525. mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
  526. }
  527. /* Disable buffer management (BM) */
  528. static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
  529. struct mvneta_rx_queue *rxq)
  530. {
  531. u32 val;
  532. val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
  533. val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
  534. mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
  535. }
  536. /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
  537. static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
  538. {
  539. u32 val;
  540. val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
  541. if (enable)
  542. val |= MVNETA_GMAC2_PORT_RGMII;
  543. else
  544. val &= ~MVNETA_GMAC2_PORT_RGMII;
  545. mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
  546. }
  547. /* Config SGMII port */
  548. static void mvneta_port_sgmii_config(struct mvneta_port *pp)
  549. {
  550. u32 val;
  551. val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
  552. val |= MVNETA_GMAC2_PSC_ENABLE;
  553. mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
  554. }
  555. /* Start the Ethernet port RX and TX activity */
  556. static void mvneta_port_up(struct mvneta_port *pp)
  557. {
  558. int queue;
  559. u32 q_map;
  560. /* Enable all initialized TXs. */
  561. mvneta_mib_counters_clear(pp);
  562. q_map = 0;
  563. for (queue = 0; queue < txq_number; queue++) {
  564. struct mvneta_tx_queue *txq = &pp->txqs[queue];
  565. if (txq->descs != NULL)
  566. q_map |= (1 << queue);
  567. }
  568. mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
  569. /* Enable all initialized RXQs. */
  570. q_map = 0;
  571. for (queue = 0; queue < rxq_number; queue++) {
  572. struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
  573. if (rxq->descs != NULL)
  574. q_map |= (1 << queue);
  575. }
  576. mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
  577. }
  578. /* Stop the Ethernet port activity */
  579. static void mvneta_port_down(struct mvneta_port *pp)
  580. {
  581. u32 val;
  582. int count;
  583. /* Stop Rx port activity. Check port Rx activity. */
  584. val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
  585. /* Issue stop command for active channels only */
  586. if (val != 0)
  587. mvreg_write(pp, MVNETA_RXQ_CMD,
  588. val << MVNETA_RXQ_DISABLE_SHIFT);
  589. /* Wait for all Rx activity to terminate. */
  590. count = 0;
  591. do {
  592. if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
  593. netdev_warn(pp->dev,
  594. "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
  595. val);
  596. break;
  597. }
  598. mdelay(1);
  599. val = mvreg_read(pp, MVNETA_RXQ_CMD);
  600. } while (val & 0xff);
  601. /* Stop Tx port activity. Check port Tx activity. Issue stop
  602. * command for active channels only
  603. */
  604. val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
  605. if (val != 0)
  606. mvreg_write(pp, MVNETA_TXQ_CMD,
  607. (val << MVNETA_TXQ_DISABLE_SHIFT));
  608. /* Wait for all Tx activity to terminate. */
  609. count = 0;
  610. do {
  611. if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
  612. netdev_warn(pp->dev,
  613. "TIMEOUT for TX stopped status=0x%08x\n",
  614. val);
  615. break;
  616. }
  617. mdelay(1);
  618. /* Check TX Command reg that all Txqs are stopped */
  619. val = mvreg_read(pp, MVNETA_TXQ_CMD);
  620. } while (val & 0xff);
  621. /* Double check to verify that TX FIFO is empty */
  622. count = 0;
  623. do {
  624. if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
  625. netdev_warn(pp->dev,
  626. "TX FIFO empty timeout status=0x08%x\n",
  627. val);
  628. break;
  629. }
  630. mdelay(1);
  631. val = mvreg_read(pp, MVNETA_PORT_STATUS);
  632. } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
  633. (val & MVNETA_TX_IN_PRGRS));
  634. udelay(200);
  635. }
  636. /* Enable the port by setting the port enable bit of the MAC control register */
  637. static void mvneta_port_enable(struct mvneta_port *pp)
  638. {
  639. u32 val;
  640. /* Enable port */
  641. val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
  642. val |= MVNETA_GMAC0_PORT_ENABLE;
  643. mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
  644. }
  645. /* Disable the port and wait for about 200 usec before retuning */
  646. static void mvneta_port_disable(struct mvneta_port *pp)
  647. {
  648. u32 val;
  649. /* Reset the Enable bit in the Serial Control Register */
  650. val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
  651. val &= ~MVNETA_GMAC0_PORT_ENABLE;
  652. mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
  653. udelay(200);
  654. }
  655. /* Multicast tables methods */
  656. /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
  657. static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
  658. {
  659. int offset;
  660. u32 val;
  661. if (queue == -1) {
  662. val = 0;
  663. } else {
  664. val = 0x1 | (queue << 1);
  665. val |= (val << 24) | (val << 16) | (val << 8);
  666. }
  667. for (offset = 0; offset <= 0xc; offset += 4)
  668. mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
  669. }
  670. /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
  671. static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
  672. {
  673. int offset;
  674. u32 val;
  675. if (queue == -1) {
  676. val = 0;
  677. } else {
  678. val = 0x1 | (queue << 1);
  679. val |= (val << 24) | (val << 16) | (val << 8);
  680. }
  681. for (offset = 0; offset <= 0xfc; offset += 4)
  682. mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
  683. }
  684. /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
  685. static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
  686. {
  687. int offset;
  688. u32 val;
  689. if (queue == -1) {
  690. memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
  691. val = 0;
  692. } else {
  693. memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
  694. val = 0x1 | (queue << 1);
  695. val |= (val << 24) | (val << 16) | (val << 8);
  696. }
  697. for (offset = 0; offset <= 0xfc; offset += 4)
  698. mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
  699. }
  700. /* This method sets defaults to the NETA port:
  701. * Clears interrupt Cause and Mask registers.
  702. * Clears all MAC tables.
  703. * Sets defaults to all registers.
  704. * Resets RX and TX descriptor rings.
  705. * Resets PHY.
  706. * This method can be called after mvneta_port_down() to return the port
  707. * settings to defaults.
  708. */
  709. static void mvneta_defaults_set(struct mvneta_port *pp)
  710. {
  711. int cpu;
  712. int queue;
  713. u32 val;
  714. /* Clear all Cause registers */
  715. mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
  716. mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
  717. mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
  718. /* Mask all interrupts */
  719. mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
  720. mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
  721. mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
  722. mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
  723. /* Enable MBUS Retry bit16 */
  724. mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
  725. /* Set CPU queue access map - all CPUs have access to all RX
  726. * queues and to all TX queues
  727. */
  728. for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
  729. mvreg_write(pp, MVNETA_CPU_MAP(cpu),
  730. (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
  731. MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
  732. /* Reset RX and TX DMAs */
  733. mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
  734. mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
  735. /* Disable Legacy WRR, Disable EJP, Release from reset */
  736. mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
  737. for (queue = 0; queue < txq_number; queue++) {
  738. mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
  739. mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
  740. }
  741. mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
  742. mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
  743. /* Set Port Acceleration Mode */
  744. val = MVNETA_ACC_MODE_EXT;
  745. mvreg_write(pp, MVNETA_ACC_MODE, val);
  746. /* Update val of portCfg register accordingly with all RxQueue types */
  747. val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
  748. mvreg_write(pp, MVNETA_PORT_CONFIG, val);
  749. val = 0;
  750. mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
  751. mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
  752. /* Build PORT_SDMA_CONFIG_REG */
  753. val = 0;
  754. /* Default burst size */
  755. val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
  756. val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
  757. val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
  758. MVNETA_NO_DESC_SWAP);
  759. /* Assign port SDMA configuration */
  760. mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
  761. mvneta_set_ucast_table(pp, -1);
  762. mvneta_set_special_mcast_table(pp, -1);
  763. mvneta_set_other_mcast_table(pp, -1);
  764. /* Set port interrupt enable register - default enable all */
  765. mvreg_write(pp, MVNETA_INTR_ENABLE,
  766. (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
  767. | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
  768. }
  769. /* Set max sizes for tx queues */
  770. static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
  771. {
  772. u32 val, size, mtu;
  773. int queue;
  774. mtu = max_tx_size * 8;
  775. if (mtu > MVNETA_TX_MTU_MAX)
  776. mtu = MVNETA_TX_MTU_MAX;
  777. /* Set MTU */
  778. val = mvreg_read(pp, MVNETA_TX_MTU);
  779. val &= ~MVNETA_TX_MTU_MAX;
  780. val |= mtu;
  781. mvreg_write(pp, MVNETA_TX_MTU, val);
  782. /* TX token size and all TXQs token size must be larger that MTU */
  783. val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
  784. size = val & MVNETA_TX_TOKEN_SIZE_MAX;
  785. if (size < mtu) {
  786. size = mtu;
  787. val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
  788. val |= size;
  789. mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
  790. }
  791. for (queue = 0; queue < txq_number; queue++) {
  792. val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
  793. size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
  794. if (size < mtu) {
  795. size = mtu;
  796. val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
  797. val |= size;
  798. mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
  799. }
  800. }
  801. }
  802. /* Set unicast address */
  803. static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
  804. int queue)
  805. {
  806. unsigned int unicast_reg;
  807. unsigned int tbl_offset;
  808. unsigned int reg_offset;
  809. /* Locate the Unicast table entry */
  810. last_nibble = (0xf & last_nibble);
  811. /* offset from unicast tbl base */
  812. tbl_offset = (last_nibble / 4) * 4;
  813. /* offset within the above reg */
  814. reg_offset = last_nibble % 4;
  815. unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
  816. if (queue == -1) {
  817. /* Clear accepts frame bit at specified unicast DA tbl entry */
  818. unicast_reg &= ~(0xff << (8 * reg_offset));
  819. } else {
  820. unicast_reg &= ~(0xff << (8 * reg_offset));
  821. unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
  822. }
  823. mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
  824. }
  825. /* Set mac address */
  826. static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
  827. int queue)
  828. {
  829. unsigned int mac_h;
  830. unsigned int mac_l;
  831. if (queue != -1) {
  832. mac_l = (addr[4] << 8) | (addr[5]);
  833. mac_h = (addr[0] << 24) | (addr[1] << 16) |
  834. (addr[2] << 8) | (addr[3] << 0);
  835. mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
  836. mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
  837. }
  838. /* Accept frames of this address */
  839. mvneta_set_ucast_addr(pp, addr[5], queue);
  840. }
  841. /* Set the number of packets that will be received before RX interrupt
  842. * will be generated by HW.
  843. */
  844. static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
  845. struct mvneta_rx_queue *rxq, u32 value)
  846. {
  847. mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
  848. value | MVNETA_RXQ_NON_OCCUPIED(0));
  849. rxq->pkts_coal = value;
  850. }
  851. /* Set the time delay in usec before RX interrupt will be generated by
  852. * HW.
  853. */
  854. static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
  855. struct mvneta_rx_queue *rxq, u32 value)
  856. {
  857. u32 val;
  858. unsigned long clk_rate;
  859. clk_rate = clk_get_rate(pp->clk);
  860. val = (clk_rate / 1000000) * value;
  861. mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
  862. rxq->time_coal = value;
  863. }
  864. /* Set threshold for TX_DONE pkts coalescing */
  865. static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
  866. struct mvneta_tx_queue *txq, u32 value)
  867. {
  868. u32 val;
  869. val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
  870. val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
  871. val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
  872. mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
  873. txq->done_pkts_coal = value;
  874. }
  875. /* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
  876. static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
  877. {
  878. if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
  879. pp->tx_done_timer.expires = jiffies +
  880. msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
  881. add_timer(&pp->tx_done_timer);
  882. }
  883. }
  884. /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
  885. static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
  886. u32 phys_addr, u32 cookie)
  887. {
  888. rx_desc->buf_cookie = cookie;
  889. rx_desc->buf_phys_addr = phys_addr;
  890. }
  891. /* Decrement sent descriptors counter */
  892. static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
  893. struct mvneta_tx_queue *txq,
  894. int sent_desc)
  895. {
  896. u32 val;
  897. /* Only 255 TX descriptors can be updated at once */
  898. while (sent_desc > 0xff) {
  899. val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
  900. mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
  901. sent_desc = sent_desc - 0xff;
  902. }
  903. val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
  904. mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
  905. }
  906. /* Get number of TX descriptors already sent by HW */
  907. static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
  908. struct mvneta_tx_queue *txq)
  909. {
  910. u32 val;
  911. int sent_desc;
  912. val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
  913. sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
  914. MVNETA_TXQ_SENT_DESC_SHIFT;
  915. return sent_desc;
  916. }
  917. /* Get number of sent descriptors and decrement counter.
  918. * The number of sent descriptors is returned.
  919. */
  920. static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
  921. struct mvneta_tx_queue *txq)
  922. {
  923. int sent_desc;
  924. /* Get number of sent descriptors */
  925. sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
  926. /* Decrement sent descriptors counter */
  927. if (sent_desc)
  928. mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
  929. return sent_desc;
  930. }
  931. /* Set TXQ descriptors fields relevant for CSUM calculation */
  932. static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
  933. int ip_hdr_len, int l4_proto)
  934. {
  935. u32 command;
  936. /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
  937. * G_L4_chk, L4_type; required only for checksum
  938. * calculation
  939. */
  940. command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
  941. command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
  942. if (l3_proto == swab16(ETH_P_IP))
  943. command |= MVNETA_TXD_IP_CSUM;
  944. else
  945. command |= MVNETA_TX_L3_IP6;
  946. if (l4_proto == IPPROTO_TCP)
  947. command |= MVNETA_TX_L4_CSUM_FULL;
  948. else if (l4_proto == IPPROTO_UDP)
  949. command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
  950. else
  951. command |= MVNETA_TX_L4_CSUM_NOT;
  952. return command;
  953. }
  954. /* Display more error info */
  955. static void mvneta_rx_error(struct mvneta_port *pp,
  956. struct mvneta_rx_desc *rx_desc)
  957. {
  958. u32 status = rx_desc->status;
  959. if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
  960. netdev_err(pp->dev,
  961. "bad rx status %08x (buffer oversize), size=%d\n",
  962. rx_desc->status, rx_desc->data_size);
  963. return;
  964. }
  965. switch (status & MVNETA_RXD_ERR_CODE_MASK) {
  966. case MVNETA_RXD_ERR_CRC:
  967. netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
  968. status, rx_desc->data_size);
  969. break;
  970. case MVNETA_RXD_ERR_OVERRUN:
  971. netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
  972. status, rx_desc->data_size);
  973. break;
  974. case MVNETA_RXD_ERR_LEN:
  975. netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
  976. status, rx_desc->data_size);
  977. break;
  978. case MVNETA_RXD_ERR_RESOURCE:
  979. netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
  980. status, rx_desc->data_size);
  981. break;
  982. }
  983. }
  984. /* Handle RX checksum offload */
  985. static void mvneta_rx_csum(struct mvneta_port *pp,
  986. struct mvneta_rx_desc *rx_desc,
  987. struct sk_buff *skb)
  988. {
  989. if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
  990. (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
  991. skb->csum = 0;
  992. skb->ip_summed = CHECKSUM_UNNECESSARY;
  993. return;
  994. }
  995. skb->ip_summed = CHECKSUM_NONE;
  996. }
  997. /* Return tx queue pointer (find last set bit) according to causeTxDone reg */
  998. static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
  999. u32 cause)
  1000. {
  1001. int queue = fls(cause) - 1;
  1002. return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
  1003. }
  1004. /* Free tx queue skbuffs */
  1005. static void mvneta_txq_bufs_free(struct mvneta_port *pp,
  1006. struct mvneta_tx_queue *txq, int num)
  1007. {
  1008. int i;
  1009. for (i = 0; i < num; i++) {
  1010. struct mvneta_tx_desc *tx_desc = txq->descs +
  1011. txq->txq_get_index;
  1012. struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
  1013. mvneta_txq_inc_get(txq);
  1014. if (!skb)
  1015. continue;
  1016. dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
  1017. tx_desc->data_size, DMA_TO_DEVICE);
  1018. dev_kfree_skb_any(skb);
  1019. }
  1020. }
  1021. /* Handle end of transmission */
  1022. static int mvneta_txq_done(struct mvneta_port *pp,
  1023. struct mvneta_tx_queue *txq)
  1024. {
  1025. struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
  1026. int tx_done;
  1027. tx_done = mvneta_txq_sent_desc_proc(pp, txq);
  1028. if (tx_done == 0)
  1029. return tx_done;
  1030. mvneta_txq_bufs_free(pp, txq, tx_done);
  1031. txq->count -= tx_done;
  1032. if (netif_tx_queue_stopped(nq)) {
  1033. if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
  1034. netif_tx_wake_queue(nq);
  1035. }
  1036. return tx_done;
  1037. }
  1038. /* Refill processing */
  1039. static int mvneta_rx_refill(struct mvneta_port *pp,
  1040. struct mvneta_rx_desc *rx_desc)
  1041. {
  1042. dma_addr_t phys_addr;
  1043. struct sk_buff *skb;
  1044. skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
  1045. if (!skb)
  1046. return -ENOMEM;
  1047. phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
  1048. MVNETA_RX_BUF_SIZE(pp->pkt_size),
  1049. DMA_FROM_DEVICE);
  1050. if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
  1051. dev_kfree_skb(skb);
  1052. return -ENOMEM;
  1053. }
  1054. mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
  1055. return 0;
  1056. }
  1057. /* Handle tx checksum */
  1058. static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
  1059. {
  1060. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1061. int ip_hdr_len = 0;
  1062. u8 l4_proto;
  1063. if (skb->protocol == htons(ETH_P_IP)) {
  1064. struct iphdr *ip4h = ip_hdr(skb);
  1065. /* Calculate IPv4 checksum and L4 checksum */
  1066. ip_hdr_len = ip4h->ihl;
  1067. l4_proto = ip4h->protocol;
  1068. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  1069. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  1070. /* Read l4_protocol from one of IPv6 extra headers */
  1071. if (skb_network_header_len(skb) > 0)
  1072. ip_hdr_len = (skb_network_header_len(skb) >> 2);
  1073. l4_proto = ip6h->nexthdr;
  1074. } else
  1075. return MVNETA_TX_L4_CSUM_NOT;
  1076. return mvneta_txq_desc_csum(skb_network_offset(skb),
  1077. skb->protocol, ip_hdr_len, l4_proto);
  1078. }
  1079. return MVNETA_TX_L4_CSUM_NOT;
  1080. }
  1081. /* Returns rx queue pointer (find last set bit) according to causeRxTx
  1082. * value
  1083. */
  1084. static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
  1085. u32 cause)
  1086. {
  1087. int queue = fls(cause >> 8) - 1;
  1088. return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
  1089. }
  1090. /* Drop packets received by the RXQ and free buffers */
  1091. static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
  1092. struct mvneta_rx_queue *rxq)
  1093. {
  1094. int rx_done, i;
  1095. rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
  1096. for (i = 0; i < rxq->size; i++) {
  1097. struct mvneta_rx_desc *rx_desc = rxq->descs + i;
  1098. struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
  1099. dev_kfree_skb_any(skb);
  1100. dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
  1101. rx_desc->data_size, DMA_FROM_DEVICE);
  1102. }
  1103. if (rx_done)
  1104. mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
  1105. }
  1106. /* Main rx processing */
  1107. static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
  1108. struct mvneta_rx_queue *rxq)
  1109. {
  1110. struct net_device *dev = pp->dev;
  1111. int rx_done, rx_filled;
  1112. /* Get number of received packets */
  1113. rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
  1114. if (rx_todo > rx_done)
  1115. rx_todo = rx_done;
  1116. rx_done = 0;
  1117. rx_filled = 0;
  1118. /* Fairness NAPI loop */
  1119. while (rx_done < rx_todo) {
  1120. struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
  1121. struct sk_buff *skb;
  1122. u32 rx_status;
  1123. int rx_bytes, err;
  1124. prefetch(rx_desc);
  1125. rx_done++;
  1126. rx_filled++;
  1127. rx_status = rx_desc->status;
  1128. skb = (struct sk_buff *)rx_desc->buf_cookie;
  1129. if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
  1130. (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
  1131. dev->stats.rx_errors++;
  1132. mvneta_rx_error(pp, rx_desc);
  1133. mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
  1134. (u32)skb);
  1135. continue;
  1136. }
  1137. dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
  1138. rx_desc->data_size, DMA_FROM_DEVICE);
  1139. rx_bytes = rx_desc->data_size -
  1140. (ETH_FCS_LEN + MVNETA_MH_SIZE);
  1141. u64_stats_update_begin(&pp->rx_stats.syncp);
  1142. pp->rx_stats.packets++;
  1143. pp->rx_stats.bytes += rx_bytes;
  1144. u64_stats_update_end(&pp->rx_stats.syncp);
  1145. /* Linux processing */
  1146. skb_reserve(skb, MVNETA_MH_SIZE);
  1147. skb_put(skb, rx_bytes);
  1148. skb->protocol = eth_type_trans(skb, dev);
  1149. mvneta_rx_csum(pp, rx_desc, skb);
  1150. napi_gro_receive(&pp->napi, skb);
  1151. /* Refill processing */
  1152. err = mvneta_rx_refill(pp, rx_desc);
  1153. if (err) {
  1154. netdev_err(pp->dev, "Linux processing - Can't refill\n");
  1155. rxq->missed++;
  1156. rx_filled--;
  1157. }
  1158. }
  1159. /* Update rxq management counters */
  1160. mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
  1161. return rx_done;
  1162. }
  1163. /* Handle tx fragmentation processing */
  1164. static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
  1165. struct mvneta_tx_queue *txq)
  1166. {
  1167. struct mvneta_tx_desc *tx_desc;
  1168. int i;
  1169. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1170. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1171. void *addr = page_address(frag->page.p) + frag->page_offset;
  1172. tx_desc = mvneta_txq_next_desc_get(txq);
  1173. tx_desc->data_size = frag->size;
  1174. tx_desc->buf_phys_addr =
  1175. dma_map_single(pp->dev->dev.parent, addr,
  1176. tx_desc->data_size, DMA_TO_DEVICE);
  1177. if (dma_mapping_error(pp->dev->dev.parent,
  1178. tx_desc->buf_phys_addr)) {
  1179. mvneta_txq_desc_put(txq);
  1180. goto error;
  1181. }
  1182. if (i == (skb_shinfo(skb)->nr_frags - 1)) {
  1183. /* Last descriptor */
  1184. tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
  1185. txq->tx_skb[txq->txq_put_index] = skb;
  1186. mvneta_txq_inc_put(txq);
  1187. } else {
  1188. /* Descriptor in the middle: Not First, Not Last */
  1189. tx_desc->command = 0;
  1190. txq->tx_skb[txq->txq_put_index] = NULL;
  1191. mvneta_txq_inc_put(txq);
  1192. }
  1193. }
  1194. return 0;
  1195. error:
  1196. /* Release all descriptors that were used to map fragments of
  1197. * this packet, as well as the corresponding DMA mappings
  1198. */
  1199. for (i = i - 1; i >= 0; i--) {
  1200. tx_desc = txq->descs + i;
  1201. dma_unmap_single(pp->dev->dev.parent,
  1202. tx_desc->buf_phys_addr,
  1203. tx_desc->data_size,
  1204. DMA_TO_DEVICE);
  1205. mvneta_txq_desc_put(txq);
  1206. }
  1207. return -ENOMEM;
  1208. }
  1209. /* Main tx processing */
  1210. static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
  1211. {
  1212. struct mvneta_port *pp = netdev_priv(dev);
  1213. struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
  1214. struct mvneta_tx_desc *tx_desc;
  1215. struct netdev_queue *nq;
  1216. int frags = 0;
  1217. u32 tx_cmd;
  1218. if (!netif_running(dev))
  1219. goto out;
  1220. frags = skb_shinfo(skb)->nr_frags + 1;
  1221. nq = netdev_get_tx_queue(dev, txq_def);
  1222. /* Get a descriptor for the first part of the packet */
  1223. tx_desc = mvneta_txq_next_desc_get(txq);
  1224. tx_cmd = mvneta_skb_tx_csum(pp, skb);
  1225. tx_desc->data_size = skb_headlen(skb);
  1226. tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
  1227. tx_desc->data_size,
  1228. DMA_TO_DEVICE);
  1229. if (unlikely(dma_mapping_error(dev->dev.parent,
  1230. tx_desc->buf_phys_addr))) {
  1231. mvneta_txq_desc_put(txq);
  1232. frags = 0;
  1233. goto out;
  1234. }
  1235. if (frags == 1) {
  1236. /* First and Last descriptor */
  1237. tx_cmd |= MVNETA_TXD_FLZ_DESC;
  1238. tx_desc->command = tx_cmd;
  1239. txq->tx_skb[txq->txq_put_index] = skb;
  1240. mvneta_txq_inc_put(txq);
  1241. } else {
  1242. /* First but not Last */
  1243. tx_cmd |= MVNETA_TXD_F_DESC;
  1244. txq->tx_skb[txq->txq_put_index] = NULL;
  1245. mvneta_txq_inc_put(txq);
  1246. tx_desc->command = tx_cmd;
  1247. /* Continue with other skb fragments */
  1248. if (mvneta_tx_frag_process(pp, skb, txq)) {
  1249. dma_unmap_single(dev->dev.parent,
  1250. tx_desc->buf_phys_addr,
  1251. tx_desc->data_size,
  1252. DMA_TO_DEVICE);
  1253. mvneta_txq_desc_put(txq);
  1254. frags = 0;
  1255. goto out;
  1256. }
  1257. }
  1258. txq->count += frags;
  1259. mvneta_txq_pend_desc_add(pp, txq, frags);
  1260. if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
  1261. netif_tx_stop_queue(nq);
  1262. out:
  1263. if (frags > 0) {
  1264. u64_stats_update_begin(&pp->tx_stats.syncp);
  1265. pp->tx_stats.packets++;
  1266. pp->tx_stats.bytes += skb->len;
  1267. u64_stats_update_end(&pp->tx_stats.syncp);
  1268. } else {
  1269. dev->stats.tx_dropped++;
  1270. dev_kfree_skb_any(skb);
  1271. }
  1272. if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
  1273. mvneta_txq_done(pp, txq);
  1274. /* If after calling mvneta_txq_done, count equals
  1275. * frags, we need to set the timer
  1276. */
  1277. if (txq->count == frags && frags > 0)
  1278. mvneta_add_tx_done_timer(pp);
  1279. return NETDEV_TX_OK;
  1280. }
  1281. /* Free tx resources, when resetting a port */
  1282. static void mvneta_txq_done_force(struct mvneta_port *pp,
  1283. struct mvneta_tx_queue *txq)
  1284. {
  1285. int tx_done = txq->count;
  1286. mvneta_txq_bufs_free(pp, txq, tx_done);
  1287. /* reset txq */
  1288. txq->count = 0;
  1289. txq->txq_put_index = 0;
  1290. txq->txq_get_index = 0;
  1291. }
  1292. /* handle tx done - called from tx done timer callback */
  1293. static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
  1294. int *tx_todo)
  1295. {
  1296. struct mvneta_tx_queue *txq;
  1297. u32 tx_done = 0;
  1298. struct netdev_queue *nq;
  1299. *tx_todo = 0;
  1300. while (cause_tx_done != 0) {
  1301. txq = mvneta_tx_done_policy(pp, cause_tx_done);
  1302. if (!txq)
  1303. break;
  1304. nq = netdev_get_tx_queue(pp->dev, txq->id);
  1305. __netif_tx_lock(nq, smp_processor_id());
  1306. if (txq->count) {
  1307. tx_done += mvneta_txq_done(pp, txq);
  1308. *tx_todo += txq->count;
  1309. }
  1310. __netif_tx_unlock(nq);
  1311. cause_tx_done &= ~((1 << txq->id));
  1312. }
  1313. return tx_done;
  1314. }
  1315. /* Compute crc8 of the specified address, using a unique algorithm ,
  1316. * according to hw spec, different than generic crc8 algorithm
  1317. */
  1318. static int mvneta_addr_crc(unsigned char *addr)
  1319. {
  1320. int crc = 0;
  1321. int i;
  1322. for (i = 0; i < ETH_ALEN; i++) {
  1323. int j;
  1324. crc = (crc ^ addr[i]) << 8;
  1325. for (j = 7; j >= 0; j--) {
  1326. if (crc & (0x100 << j))
  1327. crc ^= 0x107 << j;
  1328. }
  1329. }
  1330. return crc;
  1331. }
  1332. /* This method controls the net device special MAC multicast support.
  1333. * The Special Multicast Table for MAC addresses supports MAC of the form
  1334. * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
  1335. * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
  1336. * Table entries in the DA-Filter table. This method set the Special
  1337. * Multicast Table appropriate entry.
  1338. */
  1339. static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
  1340. unsigned char last_byte,
  1341. int queue)
  1342. {
  1343. unsigned int smc_table_reg;
  1344. unsigned int tbl_offset;
  1345. unsigned int reg_offset;
  1346. /* Register offset from SMC table base */
  1347. tbl_offset = (last_byte / 4);
  1348. /* Entry offset within the above reg */
  1349. reg_offset = last_byte % 4;
  1350. smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
  1351. + tbl_offset * 4));
  1352. if (queue == -1)
  1353. smc_table_reg &= ~(0xff << (8 * reg_offset));
  1354. else {
  1355. smc_table_reg &= ~(0xff << (8 * reg_offset));
  1356. smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
  1357. }
  1358. mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
  1359. smc_table_reg);
  1360. }
  1361. /* This method controls the network device Other MAC multicast support.
  1362. * The Other Multicast Table is used for multicast of another type.
  1363. * A CRC-8 is used as an index to the Other Multicast Table entries
  1364. * in the DA-Filter table.
  1365. * The method gets the CRC-8 value from the calling routine and
  1366. * sets the Other Multicast Table appropriate entry according to the
  1367. * specified CRC-8 .
  1368. */
  1369. static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
  1370. unsigned char crc8,
  1371. int queue)
  1372. {
  1373. unsigned int omc_table_reg;
  1374. unsigned int tbl_offset;
  1375. unsigned int reg_offset;
  1376. tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
  1377. reg_offset = crc8 % 4; /* Entry offset within the above reg */
  1378. omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
  1379. if (queue == -1) {
  1380. /* Clear accepts frame bit at specified Other DA table entry */
  1381. omc_table_reg &= ~(0xff << (8 * reg_offset));
  1382. } else {
  1383. omc_table_reg &= ~(0xff << (8 * reg_offset));
  1384. omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
  1385. }
  1386. mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
  1387. }
  1388. /* The network device supports multicast using two tables:
  1389. * 1) Special Multicast Table for MAC addresses of the form
  1390. * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
  1391. * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
  1392. * Table entries in the DA-Filter table.
  1393. * 2) Other Multicast Table for multicast of another type. A CRC-8 value
  1394. * is used as an index to the Other Multicast Table entries in the
  1395. * DA-Filter table.
  1396. */
  1397. static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
  1398. int queue)
  1399. {
  1400. unsigned char crc_result = 0;
  1401. if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
  1402. mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
  1403. return 0;
  1404. }
  1405. crc_result = mvneta_addr_crc(p_addr);
  1406. if (queue == -1) {
  1407. if (pp->mcast_count[crc_result] == 0) {
  1408. netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
  1409. crc_result);
  1410. return -EINVAL;
  1411. }
  1412. pp->mcast_count[crc_result]--;
  1413. if (pp->mcast_count[crc_result] != 0) {
  1414. netdev_info(pp->dev,
  1415. "After delete there are %d valid Mcast for crc8=0x%02x\n",
  1416. pp->mcast_count[crc_result], crc_result);
  1417. return -EINVAL;
  1418. }
  1419. } else
  1420. pp->mcast_count[crc_result]++;
  1421. mvneta_set_other_mcast_addr(pp, crc_result, queue);
  1422. return 0;
  1423. }
  1424. /* Configure Fitering mode of Ethernet port */
  1425. static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
  1426. int is_promisc)
  1427. {
  1428. u32 port_cfg_reg, val;
  1429. port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
  1430. val = mvreg_read(pp, MVNETA_TYPE_PRIO);
  1431. /* Set / Clear UPM bit in port configuration register */
  1432. if (is_promisc) {
  1433. /* Accept all Unicast addresses */
  1434. port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
  1435. val |= MVNETA_FORCE_UNI;
  1436. mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
  1437. mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
  1438. } else {
  1439. /* Reject all Unicast addresses */
  1440. port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
  1441. val &= ~MVNETA_FORCE_UNI;
  1442. }
  1443. mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
  1444. mvreg_write(pp, MVNETA_TYPE_PRIO, val);
  1445. }
  1446. /* register unicast and multicast addresses */
  1447. static void mvneta_set_rx_mode(struct net_device *dev)
  1448. {
  1449. struct mvneta_port *pp = netdev_priv(dev);
  1450. struct netdev_hw_addr *ha;
  1451. if (dev->flags & IFF_PROMISC) {
  1452. /* Accept all: Multicast + Unicast */
  1453. mvneta_rx_unicast_promisc_set(pp, 1);
  1454. mvneta_set_ucast_table(pp, rxq_def);
  1455. mvneta_set_special_mcast_table(pp, rxq_def);
  1456. mvneta_set_other_mcast_table(pp, rxq_def);
  1457. } else {
  1458. /* Accept single Unicast */
  1459. mvneta_rx_unicast_promisc_set(pp, 0);
  1460. mvneta_set_ucast_table(pp, -1);
  1461. mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
  1462. if (dev->flags & IFF_ALLMULTI) {
  1463. /* Accept all multicast */
  1464. mvneta_set_special_mcast_table(pp, rxq_def);
  1465. mvneta_set_other_mcast_table(pp, rxq_def);
  1466. } else {
  1467. /* Accept only initialized multicast */
  1468. mvneta_set_special_mcast_table(pp, -1);
  1469. mvneta_set_other_mcast_table(pp, -1);
  1470. if (!netdev_mc_empty(dev)) {
  1471. netdev_for_each_mc_addr(ha, dev) {
  1472. mvneta_mcast_addr_set(pp, ha->addr,
  1473. rxq_def);
  1474. }
  1475. }
  1476. }
  1477. }
  1478. }
  1479. /* Interrupt handling - the callback for request_irq() */
  1480. static irqreturn_t mvneta_isr(int irq, void *dev_id)
  1481. {
  1482. struct mvneta_port *pp = (struct mvneta_port *)dev_id;
  1483. /* Mask all interrupts */
  1484. mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
  1485. napi_schedule(&pp->napi);
  1486. return IRQ_HANDLED;
  1487. }
  1488. /* NAPI handler
  1489. * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
  1490. * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
  1491. * Bits 8 -15 of the cause Rx Tx register indicate that are received
  1492. * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
  1493. * Each CPU has its own causeRxTx register
  1494. */
  1495. static int mvneta_poll(struct napi_struct *napi, int budget)
  1496. {
  1497. int rx_done = 0;
  1498. u32 cause_rx_tx;
  1499. unsigned long flags;
  1500. struct mvneta_port *pp = netdev_priv(napi->dev);
  1501. if (!netif_running(pp->dev)) {
  1502. napi_complete(napi);
  1503. return rx_done;
  1504. }
  1505. /* Read cause register */
  1506. cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
  1507. MVNETA_RX_INTR_MASK(rxq_number);
  1508. /* For the case where the last mvneta_poll did not process all
  1509. * RX packets
  1510. */
  1511. cause_rx_tx |= pp->cause_rx_tx;
  1512. if (rxq_number > 1) {
  1513. while ((cause_rx_tx != 0) && (budget > 0)) {
  1514. int count;
  1515. struct mvneta_rx_queue *rxq;
  1516. /* get rx queue number from cause_rx_tx */
  1517. rxq = mvneta_rx_policy(pp, cause_rx_tx);
  1518. if (!rxq)
  1519. break;
  1520. /* process the packet in that rx queue */
  1521. count = mvneta_rx(pp, budget, rxq);
  1522. rx_done += count;
  1523. budget -= count;
  1524. if (budget > 0) {
  1525. /* set off the rx bit of the
  1526. * corresponding bit in the cause rx
  1527. * tx register, so that next iteration
  1528. * will find the next rx queue where
  1529. * packets are received on
  1530. */
  1531. cause_rx_tx &= ~((1 << rxq->id) << 8);
  1532. }
  1533. }
  1534. } else {
  1535. rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
  1536. budget -= rx_done;
  1537. }
  1538. if (budget > 0) {
  1539. cause_rx_tx = 0;
  1540. napi_complete(napi);
  1541. local_irq_save(flags);
  1542. mvreg_write(pp, MVNETA_INTR_NEW_MASK,
  1543. MVNETA_RX_INTR_MASK(rxq_number));
  1544. local_irq_restore(flags);
  1545. }
  1546. pp->cause_rx_tx = cause_rx_tx;
  1547. return rx_done;
  1548. }
  1549. /* tx done timer callback */
  1550. static void mvneta_tx_done_timer_callback(unsigned long data)
  1551. {
  1552. struct net_device *dev = (struct net_device *)data;
  1553. struct mvneta_port *pp = netdev_priv(dev);
  1554. int tx_done = 0, tx_todo = 0;
  1555. if (!netif_running(dev))
  1556. return ;
  1557. clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
  1558. tx_done = mvneta_tx_done_gbe(pp,
  1559. (((1 << txq_number) - 1) &
  1560. MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
  1561. &tx_todo);
  1562. if (tx_todo > 0)
  1563. mvneta_add_tx_done_timer(pp);
  1564. }
  1565. /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
  1566. static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
  1567. int num)
  1568. {
  1569. struct net_device *dev = pp->dev;
  1570. int i;
  1571. for (i = 0; i < num; i++) {
  1572. struct sk_buff *skb;
  1573. struct mvneta_rx_desc *rx_desc;
  1574. unsigned long phys_addr;
  1575. skb = dev_alloc_skb(pp->pkt_size);
  1576. if (!skb) {
  1577. netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
  1578. __func__, rxq->id, i, num);
  1579. break;
  1580. }
  1581. rx_desc = rxq->descs + i;
  1582. memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
  1583. phys_addr = dma_map_single(dev->dev.parent, skb->head,
  1584. MVNETA_RX_BUF_SIZE(pp->pkt_size),
  1585. DMA_FROM_DEVICE);
  1586. if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
  1587. dev_kfree_skb(skb);
  1588. break;
  1589. }
  1590. mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
  1591. }
  1592. /* Add this number of RX descriptors as non occupied (ready to
  1593. * get packets)
  1594. */
  1595. mvneta_rxq_non_occup_desc_add(pp, rxq, i);
  1596. return i;
  1597. }
  1598. /* Free all packets pending transmit from all TXQs and reset TX port */
  1599. static void mvneta_tx_reset(struct mvneta_port *pp)
  1600. {
  1601. int queue;
  1602. /* free the skb's in the hal tx ring */
  1603. for (queue = 0; queue < txq_number; queue++)
  1604. mvneta_txq_done_force(pp, &pp->txqs[queue]);
  1605. mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
  1606. mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
  1607. }
  1608. static void mvneta_rx_reset(struct mvneta_port *pp)
  1609. {
  1610. mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
  1611. mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
  1612. }
  1613. /* Rx/Tx queue initialization/cleanup methods */
  1614. /* Create a specified RX queue */
  1615. static int mvneta_rxq_init(struct mvneta_port *pp,
  1616. struct mvneta_rx_queue *rxq)
  1617. {
  1618. rxq->size = pp->rx_ring_size;
  1619. /* Allocate memory for RX descriptors */
  1620. rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
  1621. rxq->size * MVNETA_DESC_ALIGNED_SIZE,
  1622. &rxq->descs_phys, GFP_KERNEL);
  1623. if (rxq->descs == NULL) {
  1624. netdev_err(pp->dev,
  1625. "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
  1626. rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
  1627. rxq->size);
  1628. return -ENOMEM;
  1629. }
  1630. BUG_ON(rxq->descs !=
  1631. PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
  1632. rxq->last_desc = rxq->size - 1;
  1633. /* Set Rx descriptors queue starting address */
  1634. mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
  1635. mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
  1636. /* Set Offset */
  1637. mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
  1638. /* Set coalescing pkts and time */
  1639. mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
  1640. mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
  1641. /* Fill RXQ with buffers from RX pool */
  1642. mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
  1643. mvneta_rxq_bm_disable(pp, rxq);
  1644. mvneta_rxq_fill(pp, rxq, rxq->size);
  1645. return 0;
  1646. }
  1647. /* Cleanup Rx queue */
  1648. static void mvneta_rxq_deinit(struct mvneta_port *pp,
  1649. struct mvneta_rx_queue *rxq)
  1650. {
  1651. mvneta_rxq_drop_pkts(pp, rxq);
  1652. if (rxq->descs)
  1653. dma_free_coherent(pp->dev->dev.parent,
  1654. rxq->size * MVNETA_DESC_ALIGNED_SIZE,
  1655. rxq->descs,
  1656. rxq->descs_phys);
  1657. rxq->descs = NULL;
  1658. rxq->last_desc = 0;
  1659. rxq->next_desc_to_proc = 0;
  1660. rxq->descs_phys = 0;
  1661. }
  1662. /* Create and initialize a tx queue */
  1663. static int mvneta_txq_init(struct mvneta_port *pp,
  1664. struct mvneta_tx_queue *txq)
  1665. {
  1666. txq->size = pp->tx_ring_size;
  1667. /* Allocate memory for TX descriptors */
  1668. txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
  1669. txq->size * MVNETA_DESC_ALIGNED_SIZE,
  1670. &txq->descs_phys, GFP_KERNEL);
  1671. if (txq->descs == NULL) {
  1672. netdev_err(pp->dev,
  1673. "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
  1674. txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
  1675. txq->size);
  1676. return -ENOMEM;
  1677. }
  1678. /* Make sure descriptor address is cache line size aligned */
  1679. BUG_ON(txq->descs !=
  1680. PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
  1681. txq->last_desc = txq->size - 1;
  1682. /* Set maximum bandwidth for enabled TXQs */
  1683. mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
  1684. mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
  1685. /* Set Tx descriptors queue starting address */
  1686. mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
  1687. mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
  1688. txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
  1689. if (txq->tx_skb == NULL) {
  1690. dma_free_coherent(pp->dev->dev.parent,
  1691. txq->size * MVNETA_DESC_ALIGNED_SIZE,
  1692. txq->descs, txq->descs_phys);
  1693. return -ENOMEM;
  1694. }
  1695. mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
  1696. return 0;
  1697. }
  1698. /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
  1699. static void mvneta_txq_deinit(struct mvneta_port *pp,
  1700. struct mvneta_tx_queue *txq)
  1701. {
  1702. kfree(txq->tx_skb);
  1703. if (txq->descs)
  1704. dma_free_coherent(pp->dev->dev.parent,
  1705. txq->size * MVNETA_DESC_ALIGNED_SIZE,
  1706. txq->descs, txq->descs_phys);
  1707. txq->descs = NULL;
  1708. txq->last_desc = 0;
  1709. txq->next_desc_to_proc = 0;
  1710. txq->descs_phys = 0;
  1711. /* Set minimum bandwidth for disabled TXQs */
  1712. mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
  1713. mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
  1714. /* Set Tx descriptors queue starting address and size */
  1715. mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
  1716. mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
  1717. }
  1718. /* Cleanup all Tx queues */
  1719. static void mvneta_cleanup_txqs(struct mvneta_port *pp)
  1720. {
  1721. int queue;
  1722. for (queue = 0; queue < txq_number; queue++)
  1723. mvneta_txq_deinit(pp, &pp->txqs[queue]);
  1724. }
  1725. /* Cleanup all Rx queues */
  1726. static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
  1727. {
  1728. int queue;
  1729. for (queue = 0; queue < rxq_number; queue++)
  1730. mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
  1731. }
  1732. /* Init all Rx queues */
  1733. static int mvneta_setup_rxqs(struct mvneta_port *pp)
  1734. {
  1735. int queue;
  1736. for (queue = 0; queue < rxq_number; queue++) {
  1737. int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
  1738. if (err) {
  1739. netdev_err(pp->dev, "%s: can't create rxq=%d\n",
  1740. __func__, queue);
  1741. mvneta_cleanup_rxqs(pp);
  1742. return err;
  1743. }
  1744. }
  1745. return 0;
  1746. }
  1747. /* Init all tx queues */
  1748. static int mvneta_setup_txqs(struct mvneta_port *pp)
  1749. {
  1750. int queue;
  1751. for (queue = 0; queue < txq_number; queue++) {
  1752. int err = mvneta_txq_init(pp, &pp->txqs[queue]);
  1753. if (err) {
  1754. netdev_err(pp->dev, "%s: can't create txq=%d\n",
  1755. __func__, queue);
  1756. mvneta_cleanup_txqs(pp);
  1757. return err;
  1758. }
  1759. }
  1760. return 0;
  1761. }
  1762. static void mvneta_start_dev(struct mvneta_port *pp)
  1763. {
  1764. mvneta_max_rx_size_set(pp, pp->pkt_size);
  1765. mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
  1766. /* start the Rx/Tx activity */
  1767. mvneta_port_enable(pp);
  1768. /* Enable polling on the port */
  1769. napi_enable(&pp->napi);
  1770. /* Unmask interrupts */
  1771. mvreg_write(pp, MVNETA_INTR_NEW_MASK,
  1772. MVNETA_RX_INTR_MASK(rxq_number));
  1773. phy_start(pp->phy_dev);
  1774. netif_tx_start_all_queues(pp->dev);
  1775. }
  1776. static void mvneta_stop_dev(struct mvneta_port *pp)
  1777. {
  1778. phy_stop(pp->phy_dev);
  1779. napi_disable(&pp->napi);
  1780. netif_carrier_off(pp->dev);
  1781. mvneta_port_down(pp);
  1782. netif_tx_stop_all_queues(pp->dev);
  1783. /* Stop the port activity */
  1784. mvneta_port_disable(pp);
  1785. /* Clear all ethernet port interrupts */
  1786. mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
  1787. mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
  1788. /* Mask all ethernet port interrupts */
  1789. mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
  1790. mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
  1791. mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
  1792. mvneta_tx_reset(pp);
  1793. mvneta_rx_reset(pp);
  1794. }
  1795. /* tx timeout callback - display a message and stop/start the network device */
  1796. static void mvneta_tx_timeout(struct net_device *dev)
  1797. {
  1798. struct mvneta_port *pp = netdev_priv(dev);
  1799. netdev_info(dev, "tx timeout\n");
  1800. mvneta_stop_dev(pp);
  1801. mvneta_start_dev(pp);
  1802. }
  1803. /* Return positive if MTU is valid */
  1804. static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
  1805. {
  1806. if (mtu < 68) {
  1807. netdev_err(dev, "cannot change mtu to less than 68\n");
  1808. return -EINVAL;
  1809. }
  1810. /* 9676 == 9700 - 20 and rounding to 8 */
  1811. if (mtu > 9676) {
  1812. netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
  1813. mtu = 9676;
  1814. }
  1815. if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
  1816. netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
  1817. mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
  1818. mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
  1819. }
  1820. return mtu;
  1821. }
  1822. /* Change the device mtu */
  1823. static int mvneta_change_mtu(struct net_device *dev, int mtu)
  1824. {
  1825. struct mvneta_port *pp = netdev_priv(dev);
  1826. int ret;
  1827. mtu = mvneta_check_mtu_valid(dev, mtu);
  1828. if (mtu < 0)
  1829. return -EINVAL;
  1830. dev->mtu = mtu;
  1831. if (!netif_running(dev))
  1832. return 0;
  1833. /* The interface is running, so we have to force a
  1834. * reallocation of the RXQs
  1835. */
  1836. mvneta_stop_dev(pp);
  1837. mvneta_cleanup_txqs(pp);
  1838. mvneta_cleanup_rxqs(pp);
  1839. pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
  1840. ret = mvneta_setup_rxqs(pp);
  1841. if (ret) {
  1842. netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
  1843. return ret;
  1844. }
  1845. mvneta_setup_txqs(pp);
  1846. mvneta_start_dev(pp);
  1847. mvneta_port_up(pp);
  1848. return 0;
  1849. }
  1850. /* Handle setting mac address */
  1851. static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
  1852. {
  1853. struct mvneta_port *pp = netdev_priv(dev);
  1854. u8 *mac = addr + 2;
  1855. int i;
  1856. if (netif_running(dev))
  1857. return -EBUSY;
  1858. /* Remove previous address table entry */
  1859. mvneta_mac_addr_set(pp, dev->dev_addr, -1);
  1860. /* Set new addr in hw */
  1861. mvneta_mac_addr_set(pp, mac, rxq_def);
  1862. /* Set addr in the device */
  1863. for (i = 0; i < ETH_ALEN; i++)
  1864. dev->dev_addr[i] = mac[i];
  1865. return 0;
  1866. }
  1867. static void mvneta_adjust_link(struct net_device *ndev)
  1868. {
  1869. struct mvneta_port *pp = netdev_priv(ndev);
  1870. struct phy_device *phydev = pp->phy_dev;
  1871. int status_change = 0;
  1872. if (phydev->link) {
  1873. if ((pp->speed != phydev->speed) ||
  1874. (pp->duplex != phydev->duplex)) {
  1875. u32 val;
  1876. val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
  1877. val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
  1878. MVNETA_GMAC_CONFIG_GMII_SPEED |
  1879. MVNETA_GMAC_CONFIG_FULL_DUPLEX);
  1880. if (phydev->duplex)
  1881. val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
  1882. if (phydev->speed == SPEED_1000)
  1883. val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
  1884. else
  1885. val |= MVNETA_GMAC_CONFIG_MII_SPEED;
  1886. mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
  1887. pp->duplex = phydev->duplex;
  1888. pp->speed = phydev->speed;
  1889. }
  1890. }
  1891. if (phydev->link != pp->link) {
  1892. if (!phydev->link) {
  1893. pp->duplex = -1;
  1894. pp->speed = 0;
  1895. }
  1896. pp->link = phydev->link;
  1897. status_change = 1;
  1898. }
  1899. if (status_change) {
  1900. if (phydev->link) {
  1901. u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
  1902. val |= (MVNETA_GMAC_FORCE_LINK_PASS |
  1903. MVNETA_GMAC_FORCE_LINK_DOWN);
  1904. mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
  1905. mvneta_port_up(pp);
  1906. netdev_info(pp->dev, "link up\n");
  1907. } else {
  1908. mvneta_port_down(pp);
  1909. netdev_info(pp->dev, "link down\n");
  1910. }
  1911. }
  1912. }
  1913. static int mvneta_mdio_probe(struct mvneta_port *pp)
  1914. {
  1915. struct phy_device *phy_dev;
  1916. phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
  1917. pp->phy_interface);
  1918. if (!phy_dev) {
  1919. netdev_err(pp->dev, "could not find the PHY\n");
  1920. return -ENODEV;
  1921. }
  1922. phy_dev->supported &= PHY_GBIT_FEATURES;
  1923. phy_dev->advertising = phy_dev->supported;
  1924. pp->phy_dev = phy_dev;
  1925. pp->link = 0;
  1926. pp->duplex = 0;
  1927. pp->speed = 0;
  1928. return 0;
  1929. }
  1930. static void mvneta_mdio_remove(struct mvneta_port *pp)
  1931. {
  1932. phy_disconnect(pp->phy_dev);
  1933. pp->phy_dev = NULL;
  1934. }
  1935. static int mvneta_open(struct net_device *dev)
  1936. {
  1937. struct mvneta_port *pp = netdev_priv(dev);
  1938. int ret;
  1939. mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
  1940. pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
  1941. ret = mvneta_setup_rxqs(pp);
  1942. if (ret)
  1943. return ret;
  1944. ret = mvneta_setup_txqs(pp);
  1945. if (ret)
  1946. goto err_cleanup_rxqs;
  1947. /* Connect to port interrupt line */
  1948. ret = request_irq(pp->dev->irq, mvneta_isr, 0,
  1949. MVNETA_DRIVER_NAME, pp);
  1950. if (ret) {
  1951. netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
  1952. goto err_cleanup_txqs;
  1953. }
  1954. /* In default link is down */
  1955. netif_carrier_off(pp->dev);
  1956. ret = mvneta_mdio_probe(pp);
  1957. if (ret < 0) {
  1958. netdev_err(dev, "cannot probe MDIO bus\n");
  1959. goto err_free_irq;
  1960. }
  1961. mvneta_start_dev(pp);
  1962. return 0;
  1963. err_free_irq:
  1964. free_irq(pp->dev->irq, pp);
  1965. err_cleanup_txqs:
  1966. mvneta_cleanup_txqs(pp);
  1967. err_cleanup_rxqs:
  1968. mvneta_cleanup_rxqs(pp);
  1969. return ret;
  1970. }
  1971. /* Stop the port, free port interrupt line */
  1972. static int mvneta_stop(struct net_device *dev)
  1973. {
  1974. struct mvneta_port *pp = netdev_priv(dev);
  1975. mvneta_stop_dev(pp);
  1976. mvneta_mdio_remove(pp);
  1977. free_irq(dev->irq, pp);
  1978. mvneta_cleanup_rxqs(pp);
  1979. mvneta_cleanup_txqs(pp);
  1980. del_timer(&pp->tx_done_timer);
  1981. clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
  1982. return 0;
  1983. }
  1984. /* Ethtool methods */
  1985. /* Get settings (phy address, speed) for ethtools */
  1986. int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1987. {
  1988. struct mvneta_port *pp = netdev_priv(dev);
  1989. if (!pp->phy_dev)
  1990. return -ENODEV;
  1991. return phy_ethtool_gset(pp->phy_dev, cmd);
  1992. }
  1993. /* Set settings (phy address, speed) for ethtools */
  1994. int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1995. {
  1996. struct mvneta_port *pp = netdev_priv(dev);
  1997. if (!pp->phy_dev)
  1998. return -ENODEV;
  1999. return phy_ethtool_sset(pp->phy_dev, cmd);
  2000. }
  2001. /* Set interrupt coalescing for ethtools */
  2002. static int mvneta_ethtool_set_coalesce(struct net_device *dev,
  2003. struct ethtool_coalesce *c)
  2004. {
  2005. struct mvneta_port *pp = netdev_priv(dev);
  2006. int queue;
  2007. for (queue = 0; queue < rxq_number; queue++) {
  2008. struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
  2009. rxq->time_coal = c->rx_coalesce_usecs;
  2010. rxq->pkts_coal = c->rx_max_coalesced_frames;
  2011. mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
  2012. mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
  2013. }
  2014. for (queue = 0; queue < txq_number; queue++) {
  2015. struct mvneta_tx_queue *txq = &pp->txqs[queue];
  2016. txq->done_pkts_coal = c->tx_max_coalesced_frames;
  2017. mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
  2018. }
  2019. return 0;
  2020. }
  2021. /* get coalescing for ethtools */
  2022. static int mvneta_ethtool_get_coalesce(struct net_device *dev,
  2023. struct ethtool_coalesce *c)
  2024. {
  2025. struct mvneta_port *pp = netdev_priv(dev);
  2026. c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
  2027. c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
  2028. c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
  2029. return 0;
  2030. }
  2031. static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
  2032. struct ethtool_drvinfo *drvinfo)
  2033. {
  2034. strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
  2035. sizeof(drvinfo->driver));
  2036. strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
  2037. sizeof(drvinfo->version));
  2038. strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
  2039. sizeof(drvinfo->bus_info));
  2040. }
  2041. static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
  2042. struct ethtool_ringparam *ring)
  2043. {
  2044. struct mvneta_port *pp = netdev_priv(netdev);
  2045. ring->rx_max_pending = MVNETA_MAX_RXD;
  2046. ring->tx_max_pending = MVNETA_MAX_TXD;
  2047. ring->rx_pending = pp->rx_ring_size;
  2048. ring->tx_pending = pp->tx_ring_size;
  2049. }
  2050. static int mvneta_ethtool_set_ringparam(struct net_device *dev,
  2051. struct ethtool_ringparam *ring)
  2052. {
  2053. struct mvneta_port *pp = netdev_priv(dev);
  2054. if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
  2055. return -EINVAL;
  2056. pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
  2057. ring->rx_pending : MVNETA_MAX_RXD;
  2058. pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
  2059. ring->tx_pending : MVNETA_MAX_TXD;
  2060. if (netif_running(dev)) {
  2061. mvneta_stop(dev);
  2062. if (mvneta_open(dev)) {
  2063. netdev_err(dev,
  2064. "error on opening device after ring param change\n");
  2065. return -ENOMEM;
  2066. }
  2067. }
  2068. return 0;
  2069. }
  2070. static const struct net_device_ops mvneta_netdev_ops = {
  2071. .ndo_open = mvneta_open,
  2072. .ndo_stop = mvneta_stop,
  2073. .ndo_start_xmit = mvneta_tx,
  2074. .ndo_set_rx_mode = mvneta_set_rx_mode,
  2075. .ndo_set_mac_address = mvneta_set_mac_addr,
  2076. .ndo_change_mtu = mvneta_change_mtu,
  2077. .ndo_tx_timeout = mvneta_tx_timeout,
  2078. .ndo_get_stats64 = mvneta_get_stats64,
  2079. };
  2080. const struct ethtool_ops mvneta_eth_tool_ops = {
  2081. .get_link = ethtool_op_get_link,
  2082. .get_settings = mvneta_ethtool_get_settings,
  2083. .set_settings = mvneta_ethtool_set_settings,
  2084. .set_coalesce = mvneta_ethtool_set_coalesce,
  2085. .get_coalesce = mvneta_ethtool_get_coalesce,
  2086. .get_drvinfo = mvneta_ethtool_get_drvinfo,
  2087. .get_ringparam = mvneta_ethtool_get_ringparam,
  2088. .set_ringparam = mvneta_ethtool_set_ringparam,
  2089. };
  2090. /* Initialize hw */
  2091. static int mvneta_init(struct mvneta_port *pp, int phy_addr)
  2092. {
  2093. int queue;
  2094. /* Disable port */
  2095. mvneta_port_disable(pp);
  2096. /* Set port default values */
  2097. mvneta_defaults_set(pp);
  2098. pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
  2099. GFP_KERNEL);
  2100. if (!pp->txqs)
  2101. return -ENOMEM;
  2102. /* Initialize TX descriptor rings */
  2103. for (queue = 0; queue < txq_number; queue++) {
  2104. struct mvneta_tx_queue *txq = &pp->txqs[queue];
  2105. txq->id = queue;
  2106. txq->size = pp->tx_ring_size;
  2107. txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
  2108. }
  2109. pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
  2110. GFP_KERNEL);
  2111. if (!pp->rxqs) {
  2112. kfree(pp->txqs);
  2113. return -ENOMEM;
  2114. }
  2115. /* Create Rx descriptor rings */
  2116. for (queue = 0; queue < rxq_number; queue++) {
  2117. struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
  2118. rxq->id = queue;
  2119. rxq->size = pp->rx_ring_size;
  2120. rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
  2121. rxq->time_coal = MVNETA_RX_COAL_USEC;
  2122. }
  2123. return 0;
  2124. }
  2125. static void mvneta_deinit(struct mvneta_port *pp)
  2126. {
  2127. kfree(pp->txqs);
  2128. kfree(pp->rxqs);
  2129. }
  2130. /* platform glue : initialize decoding windows */
  2131. static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
  2132. const struct mbus_dram_target_info *dram)
  2133. {
  2134. u32 win_enable;
  2135. u32 win_protect;
  2136. int i;
  2137. for (i = 0; i < 6; i++) {
  2138. mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
  2139. mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
  2140. if (i < 4)
  2141. mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
  2142. }
  2143. win_enable = 0x3f;
  2144. win_protect = 0;
  2145. for (i = 0; i < dram->num_cs; i++) {
  2146. const struct mbus_dram_window *cs = dram->cs + i;
  2147. mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
  2148. (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
  2149. mvreg_write(pp, MVNETA_WIN_SIZE(i),
  2150. (cs->size - 1) & 0xffff0000);
  2151. win_enable &= ~(1 << i);
  2152. win_protect |= 3 << (2 * i);
  2153. }
  2154. mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
  2155. }
  2156. /* Power up the port */
  2157. static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
  2158. {
  2159. u32 val;
  2160. /* MAC Cause register should be cleared */
  2161. mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
  2162. if (phy_mode == PHY_INTERFACE_MODE_SGMII)
  2163. mvneta_port_sgmii_config(pp);
  2164. mvneta_gmac_rgmii_set(pp, 1);
  2165. /* Cancel Port Reset */
  2166. val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
  2167. val &= ~MVNETA_GMAC2_PORT_RESET;
  2168. mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
  2169. while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
  2170. MVNETA_GMAC2_PORT_RESET) != 0)
  2171. continue;
  2172. }
  2173. /* Device initialization routine */
  2174. static int mvneta_probe(struct platform_device *pdev)
  2175. {
  2176. const struct mbus_dram_target_info *dram_target_info;
  2177. struct device_node *dn = pdev->dev.of_node;
  2178. struct device_node *phy_node;
  2179. u32 phy_addr;
  2180. struct mvneta_port *pp;
  2181. struct net_device *dev;
  2182. const char *mac_addr;
  2183. int phy_mode;
  2184. int err;
  2185. /* Our multiqueue support is not complete, so for now, only
  2186. * allow the usage of the first RX queue
  2187. */
  2188. if (rxq_def != 0) {
  2189. dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
  2190. return -EINVAL;
  2191. }
  2192. dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
  2193. if (!dev)
  2194. return -ENOMEM;
  2195. dev->irq = irq_of_parse_and_map(dn, 0);
  2196. if (dev->irq == 0) {
  2197. err = -EINVAL;
  2198. goto err_free_netdev;
  2199. }
  2200. phy_node = of_parse_phandle(dn, "phy", 0);
  2201. if (!phy_node) {
  2202. dev_err(&pdev->dev, "no associated PHY\n");
  2203. err = -ENODEV;
  2204. goto err_free_irq;
  2205. }
  2206. phy_mode = of_get_phy_mode(dn);
  2207. if (phy_mode < 0) {
  2208. dev_err(&pdev->dev, "incorrect phy-mode\n");
  2209. err = -EINVAL;
  2210. goto err_free_irq;
  2211. }
  2212. mac_addr = of_get_mac_address(dn);
  2213. if (!mac_addr || !is_valid_ether_addr(mac_addr))
  2214. eth_hw_addr_random(dev);
  2215. else
  2216. memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
  2217. dev->tx_queue_len = MVNETA_MAX_TXD;
  2218. dev->watchdog_timeo = 5 * HZ;
  2219. dev->netdev_ops = &mvneta_netdev_ops;
  2220. SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
  2221. pp = netdev_priv(dev);
  2222. pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
  2223. init_timer(&pp->tx_done_timer);
  2224. clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
  2225. pp->weight = MVNETA_RX_POLL_WEIGHT;
  2226. pp->phy_node = phy_node;
  2227. pp->phy_interface = phy_mode;
  2228. pp->base = of_iomap(dn, 0);
  2229. if (pp->base == NULL) {
  2230. err = -ENOMEM;
  2231. goto err_free_irq;
  2232. }
  2233. pp->clk = devm_clk_get(&pdev->dev, NULL);
  2234. if (IS_ERR(pp->clk)) {
  2235. err = PTR_ERR(pp->clk);
  2236. goto err_unmap;
  2237. }
  2238. clk_prepare_enable(pp->clk);
  2239. pp->tx_done_timer.data = (unsigned long)dev;
  2240. pp->tx_ring_size = MVNETA_MAX_TXD;
  2241. pp->rx_ring_size = MVNETA_MAX_RXD;
  2242. pp->dev = dev;
  2243. SET_NETDEV_DEV(dev, &pdev->dev);
  2244. err = mvneta_init(pp, phy_addr);
  2245. if (err < 0) {
  2246. dev_err(&pdev->dev, "can't init eth hal\n");
  2247. goto err_clk;
  2248. }
  2249. mvneta_port_power_up(pp, phy_mode);
  2250. dram_target_info = mv_mbus_dram_info();
  2251. if (dram_target_info)
  2252. mvneta_conf_mbus_windows(pp, dram_target_info);
  2253. netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
  2254. err = register_netdev(dev);
  2255. if (err < 0) {
  2256. dev_err(&pdev->dev, "failed to register\n");
  2257. goto err_deinit;
  2258. }
  2259. dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2260. dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2261. dev->priv_flags |= IFF_UNICAST_FLT;
  2262. netdev_info(dev, "mac: %pM\n", dev->dev_addr);
  2263. platform_set_drvdata(pdev, pp->dev);
  2264. return 0;
  2265. err_deinit:
  2266. mvneta_deinit(pp);
  2267. err_clk:
  2268. clk_disable_unprepare(pp->clk);
  2269. err_unmap:
  2270. iounmap(pp->base);
  2271. err_free_irq:
  2272. irq_dispose_mapping(dev->irq);
  2273. err_free_netdev:
  2274. free_netdev(dev);
  2275. return err;
  2276. }
  2277. /* Device removal routine */
  2278. static int mvneta_remove(struct platform_device *pdev)
  2279. {
  2280. struct net_device *dev = platform_get_drvdata(pdev);
  2281. struct mvneta_port *pp = netdev_priv(dev);
  2282. unregister_netdev(dev);
  2283. mvneta_deinit(pp);
  2284. clk_disable_unprepare(pp->clk);
  2285. iounmap(pp->base);
  2286. irq_dispose_mapping(dev->irq);
  2287. free_netdev(dev);
  2288. platform_set_drvdata(pdev, NULL);
  2289. return 0;
  2290. }
  2291. static const struct of_device_id mvneta_match[] = {
  2292. { .compatible = "marvell,armada-370-neta" },
  2293. { }
  2294. };
  2295. MODULE_DEVICE_TABLE(of, mvneta_match);
  2296. static struct platform_driver mvneta_driver = {
  2297. .probe = mvneta_probe,
  2298. .remove = mvneta_remove,
  2299. .driver = {
  2300. .name = MVNETA_DRIVER_NAME,
  2301. .of_match_table = mvneta_match,
  2302. },
  2303. };
  2304. module_platform_driver(mvneta_driver);
  2305. MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
  2306. MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
  2307. MODULE_LICENSE("GPL");
  2308. module_param(rxq_number, int, S_IRUGO);
  2309. module_param(txq_number, int, S_IRUGO);
  2310. module_param(rxq_def, int, S_IRUGO);
  2311. module_param(txq_def, int, S_IRUGO);