mvneta.c 75 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863
  1. /*
  2. * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Rami Rosen <rosenr@marvell.com>
  7. * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  8. *
  9. * This file is licensed under the terms of the GNU General Public
  10. * License version 2. This program is licensed "as is" without any
  11. * warranty of any kind, whether express or implied.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/inetdevice.h>
  19. #include <linux/mbus.h>
  20. #include <linux/module.h>
  21. #include <linux/interrupt.h>
  22. #include <net/ip.h>
  23. #include <net/ipv6.h>
  24. #include <linux/of.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/of_mdio.h>
  27. #include <linux/of_net.h>
  28. #include <linux/of_address.h>
  29. #include <linux/phy.h>
  30. #include <linux/clk.h>
  31. /* Registers */
  32. #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
  33. #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
  34. #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
  35. #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
  36. #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
  37. #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
  38. #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
  39. #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
  40. #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
  41. #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
  42. #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
  43. #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
  44. #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
  45. #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
  46. #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
  47. #define MVNETA_PORT_RX_RESET 0x1cc0
  48. #define MVNETA_PORT_RX_DMA_RESET BIT(0)
  49. #define MVNETA_PHY_ADDR 0x2000
  50. #define MVNETA_PHY_ADDR_MASK 0x1f
  51. #define MVNETA_MBUS_RETRY 0x2010
  52. #define MVNETA_UNIT_INTR_CAUSE 0x2080
  53. #define MVNETA_UNIT_CONTROL 0x20B0
  54. #define MVNETA_PHY_POLLING_ENABLE BIT(1)
  55. #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
  56. #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
  57. #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
  58. #define MVNETA_BASE_ADDR_ENABLE 0x2290
  59. #define MVNETA_PORT_CONFIG 0x2400
  60. #define MVNETA_UNI_PROMISC_MODE BIT(0)
  61. #define MVNETA_DEF_RXQ(q) ((q) << 1)
  62. #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
  63. #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
  64. #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
  65. #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
  66. #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
  67. #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
  68. #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
  69. MVNETA_DEF_RXQ_ARP(q) | \
  70. MVNETA_DEF_RXQ_TCP(q) | \
  71. MVNETA_DEF_RXQ_UDP(q) | \
  72. MVNETA_DEF_RXQ_BPDU(q) | \
  73. MVNETA_TX_UNSET_ERR_SUM | \
  74. MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
  75. #define MVNETA_PORT_CONFIG_EXTEND 0x2404
  76. #define MVNETA_MAC_ADDR_LOW 0x2414
  77. #define MVNETA_MAC_ADDR_HIGH 0x2418
  78. #define MVNETA_SDMA_CONFIG 0x241c
  79. #define MVNETA_SDMA_BRST_SIZE_16 4
  80. #define MVNETA_NO_DESC_SWAP 0x0
  81. #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
  82. #define MVNETA_RX_NO_DATA_SWAP BIT(4)
  83. #define MVNETA_TX_NO_DATA_SWAP BIT(5)
  84. #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
  85. #define MVNETA_PORT_STATUS 0x2444
  86. #define MVNETA_TX_IN_PRGRS BIT(1)
  87. #define MVNETA_TX_FIFO_EMPTY BIT(8)
  88. #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
  89. #define MVNETA_SGMII_SERDES_CFG 0x24A0
  90. #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
  91. #define MVNETA_TYPE_PRIO 0x24bc
  92. #define MVNETA_FORCE_UNI BIT(21)
  93. #define MVNETA_TXQ_CMD_1 0x24e4
  94. #define MVNETA_TXQ_CMD 0x2448
  95. #define MVNETA_TXQ_DISABLE_SHIFT 8
  96. #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
  97. #define MVNETA_ACC_MODE 0x2500
  98. #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
  99. #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
  100. #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
  101. #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
  102. #define MVNETA_INTR_NEW_CAUSE 0x25a0
  103. #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
  104. #define MVNETA_INTR_NEW_MASK 0x25a4
  105. #define MVNETA_INTR_OLD_CAUSE 0x25a8
  106. #define MVNETA_INTR_OLD_MASK 0x25ac
  107. #define MVNETA_INTR_MISC_CAUSE 0x25b0
  108. #define MVNETA_INTR_MISC_MASK 0x25b4
  109. #define MVNETA_INTR_ENABLE 0x25b8
  110. #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
  111. #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
  112. #define MVNETA_RXQ_CMD 0x2680
  113. #define MVNETA_RXQ_DISABLE_SHIFT 8
  114. #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
  115. #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
  116. #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
  117. #define MVNETA_GMAC_CTRL_0 0x2c00
  118. #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
  119. #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
  120. #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
  121. #define MVNETA_GMAC_CTRL_2 0x2c08
  122. #define MVNETA_GMAC2_PSC_ENABLE BIT(3)
  123. #define MVNETA_GMAC2_PORT_RGMII BIT(4)
  124. #define MVNETA_GMAC2_PORT_RESET BIT(6)
  125. #define MVNETA_GMAC_STATUS 0x2c10
  126. #define MVNETA_GMAC_LINK_UP BIT(0)
  127. #define MVNETA_GMAC_SPEED_1000 BIT(1)
  128. #define MVNETA_GMAC_SPEED_100 BIT(2)
  129. #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
  130. #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
  131. #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
  132. #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
  133. #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
  134. #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
  135. #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
  136. #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
  137. #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
  138. #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
  139. #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
  140. #define MVNETA_MIB_COUNTERS_BASE 0x3080
  141. #define MVNETA_MIB_LATE_COLLISION 0x7c
  142. #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
  143. #define MVNETA_DA_FILT_OTH_MCAST 0x3500
  144. #define MVNETA_DA_FILT_UCAST_BASE 0x3600
  145. #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
  146. #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
  147. #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
  148. #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
  149. #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
  150. #define MVNETA_TXQ_DEC_SENT_SHIFT 16
  151. #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
  152. #define MVNETA_TXQ_SENT_DESC_SHIFT 16
  153. #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
  154. #define MVNETA_PORT_TX_RESET 0x3cf0
  155. #define MVNETA_PORT_TX_DMA_RESET BIT(0)
  156. #define MVNETA_TX_MTU 0x3e0c
  157. #define MVNETA_TX_TOKEN_SIZE 0x3e14
  158. #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
  159. #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
  160. #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
  161. #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
  162. /* Descriptor ring Macros */
  163. #define MVNETA_QUEUE_NEXT_DESC(q, index) \
  164. (((index) < (q)->last_desc) ? ((index) + 1) : 0)
  165. /* Various constants */
  166. /* Coalescing */
  167. #define MVNETA_TXDONE_COAL_PKTS 16
  168. #define MVNETA_RX_COAL_PKTS 32
  169. #define MVNETA_RX_COAL_USEC 100
  170. /* Timer */
  171. #define MVNETA_TX_DONE_TIMER_PERIOD 10
  172. /* Napi polling weight */
  173. #define MVNETA_RX_POLL_WEIGHT 64
  174. /* The two bytes Marvell header. Either contains a special value used
  175. * by Marvell switches when a specific hardware mode is enabled (not
  176. * supported by this driver) or is filled automatically by zeroes on
  177. * the RX side. Those two bytes being at the front of the Ethernet
  178. * header, they allow to have the IP header aligned on a 4 bytes
  179. * boundary automatically: the hardware skips those two bytes on its
  180. * own.
  181. */
  182. #define MVNETA_MH_SIZE 2
  183. #define MVNETA_VLAN_TAG_LEN 4
  184. #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
  185. #define MVNETA_TX_CSUM_MAX_SIZE 9800
  186. #define MVNETA_ACC_MODE_EXT 1
  187. /* Timeout constants */
  188. #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
  189. #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
  190. #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
  191. #define MVNETA_TX_MTU_MAX 0x3ffff
  192. /* Max number of Rx descriptors */
  193. #define MVNETA_MAX_RXD 128
  194. /* Max number of Tx descriptors */
  195. #define MVNETA_MAX_TXD 532
  196. /* descriptor aligned size */
  197. #define MVNETA_DESC_ALIGNED_SIZE 32
  198. #define MVNETA_RX_PKT_SIZE(mtu) \
  199. ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
  200. ETH_HLEN + ETH_FCS_LEN, \
  201. MVNETA_CPU_D_CACHE_LINE_SIZE)
  202. #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
  203. struct mvneta_stats {
  204. struct u64_stats_sync syncp;
  205. u64 packets;
  206. u64 bytes;
  207. };
  208. struct mvneta_port {
  209. int pkt_size;
  210. void __iomem *base;
  211. struct mvneta_rx_queue *rxqs;
  212. struct mvneta_tx_queue *txqs;
  213. struct timer_list tx_done_timer;
  214. struct net_device *dev;
  215. u32 cause_rx_tx;
  216. struct napi_struct napi;
  217. /* Flags */
  218. unsigned long flags;
  219. #define MVNETA_F_TX_DONE_TIMER_BIT 0
  220. /* Napi weight */
  221. int weight;
  222. /* Core clock */
  223. struct clk *clk;
  224. u8 mcast_count[256];
  225. u16 tx_ring_size;
  226. u16 rx_ring_size;
  227. struct mvneta_stats tx_stats;
  228. struct mvneta_stats rx_stats;
  229. struct mii_bus *mii_bus;
  230. struct phy_device *phy_dev;
  231. phy_interface_t phy_interface;
  232. struct device_node *phy_node;
  233. unsigned int link;
  234. unsigned int duplex;
  235. unsigned int speed;
  236. };
  237. /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
  238. * layout of the transmit and reception DMA descriptors, and their
  239. * layout is therefore defined by the hardware design
  240. */
  241. struct mvneta_tx_desc {
  242. u32 command; /* Options used by HW for packet transmitting.*/
  243. #define MVNETA_TX_L3_OFF_SHIFT 0
  244. #define MVNETA_TX_IP_HLEN_SHIFT 8
  245. #define MVNETA_TX_L4_UDP BIT(16)
  246. #define MVNETA_TX_L3_IP6 BIT(17)
  247. #define MVNETA_TXD_IP_CSUM BIT(18)
  248. #define MVNETA_TXD_Z_PAD BIT(19)
  249. #define MVNETA_TXD_L_DESC BIT(20)
  250. #define MVNETA_TXD_F_DESC BIT(21)
  251. #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
  252. MVNETA_TXD_L_DESC | \
  253. MVNETA_TXD_F_DESC)
  254. #define MVNETA_TX_L4_CSUM_FULL BIT(30)
  255. #define MVNETA_TX_L4_CSUM_NOT BIT(31)
  256. u16 reserverd1; /* csum_l4 (for future use) */
  257. u16 data_size; /* Data size of transmitted packet in bytes */
  258. u32 buf_phys_addr; /* Physical addr of transmitted buffer */
  259. u32 reserved2; /* hw_cmd - (for future use, PMT) */
  260. u32 reserved3[4]; /* Reserved - (for future use) */
  261. };
  262. struct mvneta_rx_desc {
  263. u32 status; /* Info about received packet */
  264. #define MVNETA_RXD_ERR_CRC 0x0
  265. #define MVNETA_RXD_ERR_SUMMARY BIT(16)
  266. #define MVNETA_RXD_ERR_OVERRUN BIT(17)
  267. #define MVNETA_RXD_ERR_LEN BIT(18)
  268. #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
  269. #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
  270. #define MVNETA_RXD_L3_IP4 BIT(25)
  271. #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
  272. #define MVNETA_RXD_L4_CSUM_OK BIT(30)
  273. u16 reserved1; /* pnc_info - (for future use, PnC) */
  274. u16 data_size; /* Size of received packet in bytes */
  275. u32 buf_phys_addr; /* Physical address of the buffer */
  276. u32 reserved2; /* pnc_flow_id (for future use, PnC) */
  277. u32 buf_cookie; /* cookie for access to RX buffer in rx path */
  278. u16 reserved3; /* prefetch_cmd, for future use */
  279. u16 reserved4; /* csum_l4 - (for future use, PnC) */
  280. u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
  281. u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
  282. };
  283. struct mvneta_tx_queue {
  284. /* Number of this TX queue, in the range 0-7 */
  285. u8 id;
  286. /* Number of TX DMA descriptors in the descriptor ring */
  287. int size;
  288. /* Number of currently used TX DMA descriptor in the
  289. * descriptor ring
  290. */
  291. int count;
  292. /* Array of transmitted skb */
  293. struct sk_buff **tx_skb;
  294. /* Index of last TX DMA descriptor that was inserted */
  295. int txq_put_index;
  296. /* Index of the TX DMA descriptor to be cleaned up */
  297. int txq_get_index;
  298. u32 done_pkts_coal;
  299. /* Virtual address of the TX DMA descriptors array */
  300. struct mvneta_tx_desc *descs;
  301. /* DMA address of the TX DMA descriptors array */
  302. dma_addr_t descs_phys;
  303. /* Index of the last TX DMA descriptor */
  304. int last_desc;
  305. /* Index of the next TX DMA descriptor to process */
  306. int next_desc_to_proc;
  307. };
  308. struct mvneta_rx_queue {
  309. /* rx queue number, in the range 0-7 */
  310. u8 id;
  311. /* num of rx descriptors in the rx descriptor ring */
  312. int size;
  313. /* counter of times when mvneta_refill() failed */
  314. int missed;
  315. u32 pkts_coal;
  316. u32 time_coal;
  317. /* Virtual address of the RX DMA descriptors array */
  318. struct mvneta_rx_desc *descs;
  319. /* DMA address of the RX DMA descriptors array */
  320. dma_addr_t descs_phys;
  321. /* Index of the last RX DMA descriptor */
  322. int last_desc;
  323. /* Index of the next RX DMA descriptor to process */
  324. int next_desc_to_proc;
  325. };
  326. static int rxq_number = 8;
  327. static int txq_number = 8;
  328. static int rxq_def;
  329. #define MVNETA_DRIVER_NAME "mvneta"
  330. #define MVNETA_DRIVER_VERSION "1.0"
  331. /* Utility/helper methods */
  332. /* Write helper method */
  333. static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
  334. {
  335. writel(data, pp->base + offset);
  336. }
  337. /* Read helper method */
  338. static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
  339. {
  340. return readl(pp->base + offset);
  341. }
  342. /* Increment txq get counter */
  343. static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
  344. {
  345. txq->txq_get_index++;
  346. if (txq->txq_get_index == txq->size)
  347. txq->txq_get_index = 0;
  348. }
  349. /* Increment txq put counter */
  350. static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
  351. {
  352. txq->txq_put_index++;
  353. if (txq->txq_put_index == txq->size)
  354. txq->txq_put_index = 0;
  355. }
  356. /* Clear all MIB counters */
  357. static void mvneta_mib_counters_clear(struct mvneta_port *pp)
  358. {
  359. int i;
  360. u32 dummy;
  361. /* Perform dummy reads from MIB counters */
  362. for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
  363. dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
  364. }
  365. /* Get System Network Statistics */
  366. struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
  367. struct rtnl_link_stats64 *stats)
  368. {
  369. struct mvneta_port *pp = netdev_priv(dev);
  370. unsigned int start;
  371. memset(stats, 0, sizeof(struct rtnl_link_stats64));
  372. do {
  373. start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
  374. stats->rx_packets = pp->rx_stats.packets;
  375. stats->rx_bytes = pp->rx_stats.bytes;
  376. } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
  377. do {
  378. start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
  379. stats->tx_packets = pp->tx_stats.packets;
  380. stats->tx_bytes = pp->tx_stats.bytes;
  381. } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
  382. stats->rx_errors = dev->stats.rx_errors;
  383. stats->rx_dropped = dev->stats.rx_dropped;
  384. stats->tx_dropped = dev->stats.tx_dropped;
  385. return stats;
  386. }
  387. /* Rx descriptors helper methods */
  388. /* Checks whether the given RX descriptor is both the first and the
  389. * last descriptor for the RX packet. Each RX packet is currently
  390. * received through a single RX descriptor, so not having each RX
  391. * descriptor with its first and last bits set is an error
  392. */
  393. static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
  394. {
  395. return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
  396. MVNETA_RXD_FIRST_LAST_DESC;
  397. }
  398. /* Add number of descriptors ready to receive new packets */
  399. static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
  400. struct mvneta_rx_queue *rxq,
  401. int ndescs)
  402. {
  403. /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
  404. * be added at once
  405. */
  406. while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
  407. mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
  408. (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
  409. MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
  410. ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
  411. }
  412. mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
  413. (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
  414. }
  415. /* Get number of RX descriptors occupied by received packets */
  416. static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
  417. struct mvneta_rx_queue *rxq)
  418. {
  419. u32 val;
  420. val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
  421. return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
  422. }
  423. /* Update num of rx desc called upon return from rx path or
  424. * from mvneta_rxq_drop_pkts().
  425. */
  426. static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
  427. struct mvneta_rx_queue *rxq,
  428. int rx_done, int rx_filled)
  429. {
  430. u32 val;
  431. if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
  432. val = rx_done |
  433. (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
  434. mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
  435. return;
  436. }
  437. /* Only 255 descriptors can be added at once */
  438. while ((rx_done > 0) || (rx_filled > 0)) {
  439. if (rx_done <= 0xff) {
  440. val = rx_done;
  441. rx_done = 0;
  442. } else {
  443. val = 0xff;
  444. rx_done -= 0xff;
  445. }
  446. if (rx_filled <= 0xff) {
  447. val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
  448. rx_filled = 0;
  449. } else {
  450. val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
  451. rx_filled -= 0xff;
  452. }
  453. mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
  454. }
  455. }
  456. /* Get pointer to next RX descriptor to be processed by SW */
  457. static struct mvneta_rx_desc *
  458. mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
  459. {
  460. int rx_desc = rxq->next_desc_to_proc;
  461. rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
  462. return rxq->descs + rx_desc;
  463. }
  464. /* Change maximum receive size of the port. */
  465. static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
  466. {
  467. u32 val;
  468. val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
  469. val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
  470. val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
  471. MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
  472. mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
  473. }
  474. /* Set rx queue offset */
  475. static void mvneta_rxq_offset_set(struct mvneta_port *pp,
  476. struct mvneta_rx_queue *rxq,
  477. int offset)
  478. {
  479. u32 val;
  480. val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
  481. val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
  482. /* Offset is in */
  483. val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
  484. mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
  485. }
  486. /* Tx descriptors helper methods */
  487. /* Update HW with number of TX descriptors to be sent */
  488. static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
  489. struct mvneta_tx_queue *txq,
  490. int pend_desc)
  491. {
  492. u32 val;
  493. /* Only 255 descriptors can be added at once ; Assume caller
  494. * process TX desriptors in quanta less than 256
  495. */
  496. val = pend_desc;
  497. mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
  498. }
  499. /* Get pointer to next TX descriptor to be processed (send) by HW */
  500. static struct mvneta_tx_desc *
  501. mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
  502. {
  503. int tx_desc = txq->next_desc_to_proc;
  504. txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
  505. return txq->descs + tx_desc;
  506. }
  507. /* Release the last allocated TX descriptor. Useful to handle DMA
  508. * mapping failures in the TX path.
  509. */
  510. static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
  511. {
  512. if (txq->next_desc_to_proc == 0)
  513. txq->next_desc_to_proc = txq->last_desc - 1;
  514. else
  515. txq->next_desc_to_proc--;
  516. }
  517. /* Set rxq buf size */
  518. static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
  519. struct mvneta_rx_queue *rxq,
  520. int buf_size)
  521. {
  522. u32 val;
  523. val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
  524. val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
  525. val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
  526. mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
  527. }
  528. /* Disable buffer management (BM) */
  529. static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
  530. struct mvneta_rx_queue *rxq)
  531. {
  532. u32 val;
  533. val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
  534. val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
  535. mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
  536. }
  537. /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
  538. static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
  539. {
  540. u32 val;
  541. val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
  542. if (enable)
  543. val |= MVNETA_GMAC2_PORT_RGMII;
  544. else
  545. val &= ~MVNETA_GMAC2_PORT_RGMII;
  546. mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
  547. }
  548. /* Config SGMII port */
  549. static void mvneta_port_sgmii_config(struct mvneta_port *pp)
  550. {
  551. u32 val;
  552. val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
  553. val |= MVNETA_GMAC2_PSC_ENABLE;
  554. mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
  555. mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
  556. }
  557. /* Start the Ethernet port RX and TX activity */
  558. static void mvneta_port_up(struct mvneta_port *pp)
  559. {
  560. int queue;
  561. u32 q_map;
  562. /* Enable all initialized TXs. */
  563. mvneta_mib_counters_clear(pp);
  564. q_map = 0;
  565. for (queue = 0; queue < txq_number; queue++) {
  566. struct mvneta_tx_queue *txq = &pp->txqs[queue];
  567. if (txq->descs != NULL)
  568. q_map |= (1 << queue);
  569. }
  570. mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
  571. /* Enable all initialized RXQs. */
  572. q_map = 0;
  573. for (queue = 0; queue < rxq_number; queue++) {
  574. struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
  575. if (rxq->descs != NULL)
  576. q_map |= (1 << queue);
  577. }
  578. mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
  579. }
  580. /* Stop the Ethernet port activity */
  581. static void mvneta_port_down(struct mvneta_port *pp)
  582. {
  583. u32 val;
  584. int count;
  585. /* Stop Rx port activity. Check port Rx activity. */
  586. val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
  587. /* Issue stop command for active channels only */
  588. if (val != 0)
  589. mvreg_write(pp, MVNETA_RXQ_CMD,
  590. val << MVNETA_RXQ_DISABLE_SHIFT);
  591. /* Wait for all Rx activity to terminate. */
  592. count = 0;
  593. do {
  594. if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
  595. netdev_warn(pp->dev,
  596. "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
  597. val);
  598. break;
  599. }
  600. mdelay(1);
  601. val = mvreg_read(pp, MVNETA_RXQ_CMD);
  602. } while (val & 0xff);
  603. /* Stop Tx port activity. Check port Tx activity. Issue stop
  604. * command for active channels only
  605. */
  606. val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
  607. if (val != 0)
  608. mvreg_write(pp, MVNETA_TXQ_CMD,
  609. (val << MVNETA_TXQ_DISABLE_SHIFT));
  610. /* Wait for all Tx activity to terminate. */
  611. count = 0;
  612. do {
  613. if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
  614. netdev_warn(pp->dev,
  615. "TIMEOUT for TX stopped status=0x%08x\n",
  616. val);
  617. break;
  618. }
  619. mdelay(1);
  620. /* Check TX Command reg that all Txqs are stopped */
  621. val = mvreg_read(pp, MVNETA_TXQ_CMD);
  622. } while (val & 0xff);
  623. /* Double check to verify that TX FIFO is empty */
  624. count = 0;
  625. do {
  626. if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
  627. netdev_warn(pp->dev,
  628. "TX FIFO empty timeout status=0x08%x\n",
  629. val);
  630. break;
  631. }
  632. mdelay(1);
  633. val = mvreg_read(pp, MVNETA_PORT_STATUS);
  634. } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
  635. (val & MVNETA_TX_IN_PRGRS));
  636. udelay(200);
  637. }
  638. /* Enable the port by setting the port enable bit of the MAC control register */
  639. static void mvneta_port_enable(struct mvneta_port *pp)
  640. {
  641. u32 val;
  642. /* Enable port */
  643. val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
  644. val |= MVNETA_GMAC0_PORT_ENABLE;
  645. mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
  646. }
  647. /* Disable the port and wait for about 200 usec before retuning */
  648. static void mvneta_port_disable(struct mvneta_port *pp)
  649. {
  650. u32 val;
  651. /* Reset the Enable bit in the Serial Control Register */
  652. val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
  653. val &= ~MVNETA_GMAC0_PORT_ENABLE;
  654. mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
  655. udelay(200);
  656. }
  657. /* Multicast tables methods */
  658. /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
  659. static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
  660. {
  661. int offset;
  662. u32 val;
  663. if (queue == -1) {
  664. val = 0;
  665. } else {
  666. val = 0x1 | (queue << 1);
  667. val |= (val << 24) | (val << 16) | (val << 8);
  668. }
  669. for (offset = 0; offset <= 0xc; offset += 4)
  670. mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
  671. }
  672. /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
  673. static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
  674. {
  675. int offset;
  676. u32 val;
  677. if (queue == -1) {
  678. val = 0;
  679. } else {
  680. val = 0x1 | (queue << 1);
  681. val |= (val << 24) | (val << 16) | (val << 8);
  682. }
  683. for (offset = 0; offset <= 0xfc; offset += 4)
  684. mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
  685. }
  686. /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
  687. static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
  688. {
  689. int offset;
  690. u32 val;
  691. if (queue == -1) {
  692. memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
  693. val = 0;
  694. } else {
  695. memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
  696. val = 0x1 | (queue << 1);
  697. val |= (val << 24) | (val << 16) | (val << 8);
  698. }
  699. for (offset = 0; offset <= 0xfc; offset += 4)
  700. mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
  701. }
  702. /* This method sets defaults to the NETA port:
  703. * Clears interrupt Cause and Mask registers.
  704. * Clears all MAC tables.
  705. * Sets defaults to all registers.
  706. * Resets RX and TX descriptor rings.
  707. * Resets PHY.
  708. * This method can be called after mvneta_port_down() to return the port
  709. * settings to defaults.
  710. */
  711. static void mvneta_defaults_set(struct mvneta_port *pp)
  712. {
  713. int cpu;
  714. int queue;
  715. u32 val;
  716. /* Clear all Cause registers */
  717. mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
  718. mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
  719. mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
  720. /* Mask all interrupts */
  721. mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
  722. mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
  723. mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
  724. mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
  725. /* Enable MBUS Retry bit16 */
  726. mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
  727. /* Set CPU queue access map - all CPUs have access to all RX
  728. * queues and to all TX queues
  729. */
  730. for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
  731. mvreg_write(pp, MVNETA_CPU_MAP(cpu),
  732. (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
  733. MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
  734. /* Reset RX and TX DMAs */
  735. mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
  736. mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
  737. /* Disable Legacy WRR, Disable EJP, Release from reset */
  738. mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
  739. for (queue = 0; queue < txq_number; queue++) {
  740. mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
  741. mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
  742. }
  743. mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
  744. mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
  745. /* Set Port Acceleration Mode */
  746. val = MVNETA_ACC_MODE_EXT;
  747. mvreg_write(pp, MVNETA_ACC_MODE, val);
  748. /* Update val of portCfg register accordingly with all RxQueue types */
  749. val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
  750. mvreg_write(pp, MVNETA_PORT_CONFIG, val);
  751. val = 0;
  752. mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
  753. mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
  754. /* Build PORT_SDMA_CONFIG_REG */
  755. val = 0;
  756. /* Default burst size */
  757. val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
  758. val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
  759. val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
  760. MVNETA_NO_DESC_SWAP);
  761. /* Assign port SDMA configuration */
  762. mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
  763. mvneta_set_ucast_table(pp, -1);
  764. mvneta_set_special_mcast_table(pp, -1);
  765. mvneta_set_other_mcast_table(pp, -1);
  766. /* Set port interrupt enable register - default enable all */
  767. mvreg_write(pp, MVNETA_INTR_ENABLE,
  768. (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
  769. | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
  770. }
  771. /* Set max sizes for tx queues */
  772. static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
  773. {
  774. u32 val, size, mtu;
  775. int queue;
  776. mtu = max_tx_size * 8;
  777. if (mtu > MVNETA_TX_MTU_MAX)
  778. mtu = MVNETA_TX_MTU_MAX;
  779. /* Set MTU */
  780. val = mvreg_read(pp, MVNETA_TX_MTU);
  781. val &= ~MVNETA_TX_MTU_MAX;
  782. val |= mtu;
  783. mvreg_write(pp, MVNETA_TX_MTU, val);
  784. /* TX token size and all TXQs token size must be larger that MTU */
  785. val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
  786. size = val & MVNETA_TX_TOKEN_SIZE_MAX;
  787. if (size < mtu) {
  788. size = mtu;
  789. val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
  790. val |= size;
  791. mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
  792. }
  793. for (queue = 0; queue < txq_number; queue++) {
  794. val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
  795. size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
  796. if (size < mtu) {
  797. size = mtu;
  798. val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
  799. val |= size;
  800. mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
  801. }
  802. }
  803. }
  804. /* Set unicast address */
  805. static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
  806. int queue)
  807. {
  808. unsigned int unicast_reg;
  809. unsigned int tbl_offset;
  810. unsigned int reg_offset;
  811. /* Locate the Unicast table entry */
  812. last_nibble = (0xf & last_nibble);
  813. /* offset from unicast tbl base */
  814. tbl_offset = (last_nibble / 4) * 4;
  815. /* offset within the above reg */
  816. reg_offset = last_nibble % 4;
  817. unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
  818. if (queue == -1) {
  819. /* Clear accepts frame bit at specified unicast DA tbl entry */
  820. unicast_reg &= ~(0xff << (8 * reg_offset));
  821. } else {
  822. unicast_reg &= ~(0xff << (8 * reg_offset));
  823. unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
  824. }
  825. mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
  826. }
  827. /* Set mac address */
  828. static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
  829. int queue)
  830. {
  831. unsigned int mac_h;
  832. unsigned int mac_l;
  833. if (queue != -1) {
  834. mac_l = (addr[4] << 8) | (addr[5]);
  835. mac_h = (addr[0] << 24) | (addr[1] << 16) |
  836. (addr[2] << 8) | (addr[3] << 0);
  837. mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
  838. mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
  839. }
  840. /* Accept frames of this address */
  841. mvneta_set_ucast_addr(pp, addr[5], queue);
  842. }
  843. /* Set the number of packets that will be received before RX interrupt
  844. * will be generated by HW.
  845. */
  846. static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
  847. struct mvneta_rx_queue *rxq, u32 value)
  848. {
  849. mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
  850. value | MVNETA_RXQ_NON_OCCUPIED(0));
  851. rxq->pkts_coal = value;
  852. }
  853. /* Set the time delay in usec before RX interrupt will be generated by
  854. * HW.
  855. */
  856. static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
  857. struct mvneta_rx_queue *rxq, u32 value)
  858. {
  859. u32 val;
  860. unsigned long clk_rate;
  861. clk_rate = clk_get_rate(pp->clk);
  862. val = (clk_rate / 1000000) * value;
  863. mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
  864. rxq->time_coal = value;
  865. }
  866. /* Set threshold for TX_DONE pkts coalescing */
  867. static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
  868. struct mvneta_tx_queue *txq, u32 value)
  869. {
  870. u32 val;
  871. val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
  872. val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
  873. val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
  874. mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
  875. txq->done_pkts_coal = value;
  876. }
  877. /* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
  878. static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
  879. {
  880. if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
  881. pp->tx_done_timer.expires = jiffies +
  882. msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
  883. add_timer(&pp->tx_done_timer);
  884. }
  885. }
  886. /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
  887. static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
  888. u32 phys_addr, u32 cookie)
  889. {
  890. rx_desc->buf_cookie = cookie;
  891. rx_desc->buf_phys_addr = phys_addr;
  892. }
  893. /* Decrement sent descriptors counter */
  894. static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
  895. struct mvneta_tx_queue *txq,
  896. int sent_desc)
  897. {
  898. u32 val;
  899. /* Only 255 TX descriptors can be updated at once */
  900. while (sent_desc > 0xff) {
  901. val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
  902. mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
  903. sent_desc = sent_desc - 0xff;
  904. }
  905. val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
  906. mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
  907. }
  908. /* Get number of TX descriptors already sent by HW */
  909. static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
  910. struct mvneta_tx_queue *txq)
  911. {
  912. u32 val;
  913. int sent_desc;
  914. val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
  915. sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
  916. MVNETA_TXQ_SENT_DESC_SHIFT;
  917. return sent_desc;
  918. }
  919. /* Get number of sent descriptors and decrement counter.
  920. * The number of sent descriptors is returned.
  921. */
  922. static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
  923. struct mvneta_tx_queue *txq)
  924. {
  925. int sent_desc;
  926. /* Get number of sent descriptors */
  927. sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
  928. /* Decrement sent descriptors counter */
  929. if (sent_desc)
  930. mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
  931. return sent_desc;
  932. }
  933. /* Set TXQ descriptors fields relevant for CSUM calculation */
  934. static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
  935. int ip_hdr_len, int l4_proto)
  936. {
  937. u32 command;
  938. /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
  939. * G_L4_chk, L4_type; required only for checksum
  940. * calculation
  941. */
  942. command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
  943. command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
  944. if (l3_proto == swab16(ETH_P_IP))
  945. command |= MVNETA_TXD_IP_CSUM;
  946. else
  947. command |= MVNETA_TX_L3_IP6;
  948. if (l4_proto == IPPROTO_TCP)
  949. command |= MVNETA_TX_L4_CSUM_FULL;
  950. else if (l4_proto == IPPROTO_UDP)
  951. command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
  952. else
  953. command |= MVNETA_TX_L4_CSUM_NOT;
  954. return command;
  955. }
  956. /* Display more error info */
  957. static void mvneta_rx_error(struct mvneta_port *pp,
  958. struct mvneta_rx_desc *rx_desc)
  959. {
  960. u32 status = rx_desc->status;
  961. if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
  962. netdev_err(pp->dev,
  963. "bad rx status %08x (buffer oversize), size=%d\n",
  964. rx_desc->status, rx_desc->data_size);
  965. return;
  966. }
  967. switch (status & MVNETA_RXD_ERR_CODE_MASK) {
  968. case MVNETA_RXD_ERR_CRC:
  969. netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
  970. status, rx_desc->data_size);
  971. break;
  972. case MVNETA_RXD_ERR_OVERRUN:
  973. netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
  974. status, rx_desc->data_size);
  975. break;
  976. case MVNETA_RXD_ERR_LEN:
  977. netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
  978. status, rx_desc->data_size);
  979. break;
  980. case MVNETA_RXD_ERR_RESOURCE:
  981. netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
  982. status, rx_desc->data_size);
  983. break;
  984. }
  985. }
  986. /* Handle RX checksum offload */
  987. static void mvneta_rx_csum(struct mvneta_port *pp,
  988. struct mvneta_rx_desc *rx_desc,
  989. struct sk_buff *skb)
  990. {
  991. if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
  992. (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
  993. skb->csum = 0;
  994. skb->ip_summed = CHECKSUM_UNNECESSARY;
  995. return;
  996. }
  997. skb->ip_summed = CHECKSUM_NONE;
  998. }
  999. /* Return tx queue pointer (find last set bit) according to causeTxDone reg */
  1000. static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
  1001. u32 cause)
  1002. {
  1003. int queue = fls(cause) - 1;
  1004. return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
  1005. }
  1006. /* Free tx queue skbuffs */
  1007. static void mvneta_txq_bufs_free(struct mvneta_port *pp,
  1008. struct mvneta_tx_queue *txq, int num)
  1009. {
  1010. int i;
  1011. for (i = 0; i < num; i++) {
  1012. struct mvneta_tx_desc *tx_desc = txq->descs +
  1013. txq->txq_get_index;
  1014. struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
  1015. mvneta_txq_inc_get(txq);
  1016. if (!skb)
  1017. continue;
  1018. dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
  1019. tx_desc->data_size, DMA_TO_DEVICE);
  1020. dev_kfree_skb_any(skb);
  1021. }
  1022. }
  1023. /* Handle end of transmission */
  1024. static int mvneta_txq_done(struct mvneta_port *pp,
  1025. struct mvneta_tx_queue *txq)
  1026. {
  1027. struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
  1028. int tx_done;
  1029. tx_done = mvneta_txq_sent_desc_proc(pp, txq);
  1030. if (tx_done == 0)
  1031. return tx_done;
  1032. mvneta_txq_bufs_free(pp, txq, tx_done);
  1033. txq->count -= tx_done;
  1034. if (netif_tx_queue_stopped(nq)) {
  1035. if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
  1036. netif_tx_wake_queue(nq);
  1037. }
  1038. return tx_done;
  1039. }
  1040. /* Refill processing */
  1041. static int mvneta_rx_refill(struct mvneta_port *pp,
  1042. struct mvneta_rx_desc *rx_desc)
  1043. {
  1044. dma_addr_t phys_addr;
  1045. struct sk_buff *skb;
  1046. skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
  1047. if (!skb)
  1048. return -ENOMEM;
  1049. phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
  1050. MVNETA_RX_BUF_SIZE(pp->pkt_size),
  1051. DMA_FROM_DEVICE);
  1052. if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
  1053. dev_kfree_skb(skb);
  1054. return -ENOMEM;
  1055. }
  1056. mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
  1057. return 0;
  1058. }
  1059. /* Handle tx checksum */
  1060. static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
  1061. {
  1062. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1063. int ip_hdr_len = 0;
  1064. u8 l4_proto;
  1065. if (skb->protocol == htons(ETH_P_IP)) {
  1066. struct iphdr *ip4h = ip_hdr(skb);
  1067. /* Calculate IPv4 checksum and L4 checksum */
  1068. ip_hdr_len = ip4h->ihl;
  1069. l4_proto = ip4h->protocol;
  1070. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  1071. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  1072. /* Read l4_protocol from one of IPv6 extra headers */
  1073. if (skb_network_header_len(skb) > 0)
  1074. ip_hdr_len = (skb_network_header_len(skb) >> 2);
  1075. l4_proto = ip6h->nexthdr;
  1076. } else
  1077. return MVNETA_TX_L4_CSUM_NOT;
  1078. return mvneta_txq_desc_csum(skb_network_offset(skb),
  1079. skb->protocol, ip_hdr_len, l4_proto);
  1080. }
  1081. return MVNETA_TX_L4_CSUM_NOT;
  1082. }
  1083. /* Returns rx queue pointer (find last set bit) according to causeRxTx
  1084. * value
  1085. */
  1086. static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
  1087. u32 cause)
  1088. {
  1089. int queue = fls(cause >> 8) - 1;
  1090. return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
  1091. }
  1092. /* Drop packets received by the RXQ and free buffers */
  1093. static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
  1094. struct mvneta_rx_queue *rxq)
  1095. {
  1096. int rx_done, i;
  1097. rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
  1098. for (i = 0; i < rxq->size; i++) {
  1099. struct mvneta_rx_desc *rx_desc = rxq->descs + i;
  1100. struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
  1101. dev_kfree_skb_any(skb);
  1102. dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
  1103. rx_desc->data_size, DMA_FROM_DEVICE);
  1104. }
  1105. if (rx_done)
  1106. mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
  1107. }
  1108. /* Main rx processing */
  1109. static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
  1110. struct mvneta_rx_queue *rxq)
  1111. {
  1112. struct net_device *dev = pp->dev;
  1113. int rx_done, rx_filled;
  1114. /* Get number of received packets */
  1115. rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
  1116. if (rx_todo > rx_done)
  1117. rx_todo = rx_done;
  1118. rx_done = 0;
  1119. rx_filled = 0;
  1120. /* Fairness NAPI loop */
  1121. while (rx_done < rx_todo) {
  1122. struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
  1123. struct sk_buff *skb;
  1124. u32 rx_status;
  1125. int rx_bytes, err;
  1126. prefetch(rx_desc);
  1127. rx_done++;
  1128. rx_filled++;
  1129. rx_status = rx_desc->status;
  1130. skb = (struct sk_buff *)rx_desc->buf_cookie;
  1131. if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
  1132. (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
  1133. dev->stats.rx_errors++;
  1134. mvneta_rx_error(pp, rx_desc);
  1135. mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
  1136. (u32)skb);
  1137. continue;
  1138. }
  1139. dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
  1140. rx_desc->data_size, DMA_FROM_DEVICE);
  1141. rx_bytes = rx_desc->data_size -
  1142. (ETH_FCS_LEN + MVNETA_MH_SIZE);
  1143. u64_stats_update_begin(&pp->rx_stats.syncp);
  1144. pp->rx_stats.packets++;
  1145. pp->rx_stats.bytes += rx_bytes;
  1146. u64_stats_update_end(&pp->rx_stats.syncp);
  1147. /* Linux processing */
  1148. skb_reserve(skb, MVNETA_MH_SIZE);
  1149. skb_put(skb, rx_bytes);
  1150. skb->protocol = eth_type_trans(skb, dev);
  1151. mvneta_rx_csum(pp, rx_desc, skb);
  1152. napi_gro_receive(&pp->napi, skb);
  1153. /* Refill processing */
  1154. err = mvneta_rx_refill(pp, rx_desc);
  1155. if (err) {
  1156. netdev_err(pp->dev, "Linux processing - Can't refill\n");
  1157. rxq->missed++;
  1158. rx_filled--;
  1159. }
  1160. }
  1161. /* Update rxq management counters */
  1162. mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
  1163. return rx_done;
  1164. }
  1165. /* Handle tx fragmentation processing */
  1166. static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
  1167. struct mvneta_tx_queue *txq)
  1168. {
  1169. struct mvneta_tx_desc *tx_desc;
  1170. int i;
  1171. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1172. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1173. void *addr = page_address(frag->page.p) + frag->page_offset;
  1174. tx_desc = mvneta_txq_next_desc_get(txq);
  1175. tx_desc->data_size = frag->size;
  1176. tx_desc->buf_phys_addr =
  1177. dma_map_single(pp->dev->dev.parent, addr,
  1178. tx_desc->data_size, DMA_TO_DEVICE);
  1179. if (dma_mapping_error(pp->dev->dev.parent,
  1180. tx_desc->buf_phys_addr)) {
  1181. mvneta_txq_desc_put(txq);
  1182. goto error;
  1183. }
  1184. if (i == (skb_shinfo(skb)->nr_frags - 1)) {
  1185. /* Last descriptor */
  1186. tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
  1187. txq->tx_skb[txq->txq_put_index] = skb;
  1188. mvneta_txq_inc_put(txq);
  1189. } else {
  1190. /* Descriptor in the middle: Not First, Not Last */
  1191. tx_desc->command = 0;
  1192. txq->tx_skb[txq->txq_put_index] = NULL;
  1193. mvneta_txq_inc_put(txq);
  1194. }
  1195. }
  1196. return 0;
  1197. error:
  1198. /* Release all descriptors that were used to map fragments of
  1199. * this packet, as well as the corresponding DMA mappings
  1200. */
  1201. for (i = i - 1; i >= 0; i--) {
  1202. tx_desc = txq->descs + i;
  1203. dma_unmap_single(pp->dev->dev.parent,
  1204. tx_desc->buf_phys_addr,
  1205. tx_desc->data_size,
  1206. DMA_TO_DEVICE);
  1207. mvneta_txq_desc_put(txq);
  1208. }
  1209. return -ENOMEM;
  1210. }
  1211. /* Main tx processing */
  1212. static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
  1213. {
  1214. struct mvneta_port *pp = netdev_priv(dev);
  1215. u16 txq_id = skb_get_queue_mapping(skb);
  1216. struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
  1217. struct mvneta_tx_desc *tx_desc;
  1218. struct netdev_queue *nq;
  1219. int frags = 0;
  1220. u32 tx_cmd;
  1221. if (!netif_running(dev))
  1222. goto out;
  1223. frags = skb_shinfo(skb)->nr_frags + 1;
  1224. nq = netdev_get_tx_queue(dev, txq_id);
  1225. /* Get a descriptor for the first part of the packet */
  1226. tx_desc = mvneta_txq_next_desc_get(txq);
  1227. tx_cmd = mvneta_skb_tx_csum(pp, skb);
  1228. tx_desc->data_size = skb_headlen(skb);
  1229. tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
  1230. tx_desc->data_size,
  1231. DMA_TO_DEVICE);
  1232. if (unlikely(dma_mapping_error(dev->dev.parent,
  1233. tx_desc->buf_phys_addr))) {
  1234. mvneta_txq_desc_put(txq);
  1235. frags = 0;
  1236. goto out;
  1237. }
  1238. if (frags == 1) {
  1239. /* First and Last descriptor */
  1240. tx_cmd |= MVNETA_TXD_FLZ_DESC;
  1241. tx_desc->command = tx_cmd;
  1242. txq->tx_skb[txq->txq_put_index] = skb;
  1243. mvneta_txq_inc_put(txq);
  1244. } else {
  1245. /* First but not Last */
  1246. tx_cmd |= MVNETA_TXD_F_DESC;
  1247. txq->tx_skb[txq->txq_put_index] = NULL;
  1248. mvneta_txq_inc_put(txq);
  1249. tx_desc->command = tx_cmd;
  1250. /* Continue with other skb fragments */
  1251. if (mvneta_tx_frag_process(pp, skb, txq)) {
  1252. dma_unmap_single(dev->dev.parent,
  1253. tx_desc->buf_phys_addr,
  1254. tx_desc->data_size,
  1255. DMA_TO_DEVICE);
  1256. mvneta_txq_desc_put(txq);
  1257. frags = 0;
  1258. goto out;
  1259. }
  1260. }
  1261. txq->count += frags;
  1262. mvneta_txq_pend_desc_add(pp, txq, frags);
  1263. if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
  1264. netif_tx_stop_queue(nq);
  1265. out:
  1266. if (frags > 0) {
  1267. u64_stats_update_begin(&pp->tx_stats.syncp);
  1268. pp->tx_stats.packets++;
  1269. pp->tx_stats.bytes += skb->len;
  1270. u64_stats_update_end(&pp->tx_stats.syncp);
  1271. } else {
  1272. dev->stats.tx_dropped++;
  1273. dev_kfree_skb_any(skb);
  1274. }
  1275. if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
  1276. mvneta_txq_done(pp, txq);
  1277. /* If after calling mvneta_txq_done, count equals
  1278. * frags, we need to set the timer
  1279. */
  1280. if (txq->count == frags && frags > 0)
  1281. mvneta_add_tx_done_timer(pp);
  1282. return NETDEV_TX_OK;
  1283. }
  1284. /* Free tx resources, when resetting a port */
  1285. static void mvneta_txq_done_force(struct mvneta_port *pp,
  1286. struct mvneta_tx_queue *txq)
  1287. {
  1288. int tx_done = txq->count;
  1289. mvneta_txq_bufs_free(pp, txq, tx_done);
  1290. /* reset txq */
  1291. txq->count = 0;
  1292. txq->txq_put_index = 0;
  1293. txq->txq_get_index = 0;
  1294. }
  1295. /* handle tx done - called from tx done timer callback */
  1296. static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
  1297. int *tx_todo)
  1298. {
  1299. struct mvneta_tx_queue *txq;
  1300. u32 tx_done = 0;
  1301. struct netdev_queue *nq;
  1302. *tx_todo = 0;
  1303. while (cause_tx_done != 0) {
  1304. txq = mvneta_tx_done_policy(pp, cause_tx_done);
  1305. if (!txq)
  1306. break;
  1307. nq = netdev_get_tx_queue(pp->dev, txq->id);
  1308. __netif_tx_lock(nq, smp_processor_id());
  1309. if (txq->count) {
  1310. tx_done += mvneta_txq_done(pp, txq);
  1311. *tx_todo += txq->count;
  1312. }
  1313. __netif_tx_unlock(nq);
  1314. cause_tx_done &= ~((1 << txq->id));
  1315. }
  1316. return tx_done;
  1317. }
  1318. /* Compute crc8 of the specified address, using a unique algorithm ,
  1319. * according to hw spec, different than generic crc8 algorithm
  1320. */
  1321. static int mvneta_addr_crc(unsigned char *addr)
  1322. {
  1323. int crc = 0;
  1324. int i;
  1325. for (i = 0; i < ETH_ALEN; i++) {
  1326. int j;
  1327. crc = (crc ^ addr[i]) << 8;
  1328. for (j = 7; j >= 0; j--) {
  1329. if (crc & (0x100 << j))
  1330. crc ^= 0x107 << j;
  1331. }
  1332. }
  1333. return crc;
  1334. }
  1335. /* This method controls the net device special MAC multicast support.
  1336. * The Special Multicast Table for MAC addresses supports MAC of the form
  1337. * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
  1338. * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
  1339. * Table entries in the DA-Filter table. This method set the Special
  1340. * Multicast Table appropriate entry.
  1341. */
  1342. static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
  1343. unsigned char last_byte,
  1344. int queue)
  1345. {
  1346. unsigned int smc_table_reg;
  1347. unsigned int tbl_offset;
  1348. unsigned int reg_offset;
  1349. /* Register offset from SMC table base */
  1350. tbl_offset = (last_byte / 4);
  1351. /* Entry offset within the above reg */
  1352. reg_offset = last_byte % 4;
  1353. smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
  1354. + tbl_offset * 4));
  1355. if (queue == -1)
  1356. smc_table_reg &= ~(0xff << (8 * reg_offset));
  1357. else {
  1358. smc_table_reg &= ~(0xff << (8 * reg_offset));
  1359. smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
  1360. }
  1361. mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
  1362. smc_table_reg);
  1363. }
  1364. /* This method controls the network device Other MAC multicast support.
  1365. * The Other Multicast Table is used for multicast of another type.
  1366. * A CRC-8 is used as an index to the Other Multicast Table entries
  1367. * in the DA-Filter table.
  1368. * The method gets the CRC-8 value from the calling routine and
  1369. * sets the Other Multicast Table appropriate entry according to the
  1370. * specified CRC-8 .
  1371. */
  1372. static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
  1373. unsigned char crc8,
  1374. int queue)
  1375. {
  1376. unsigned int omc_table_reg;
  1377. unsigned int tbl_offset;
  1378. unsigned int reg_offset;
  1379. tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
  1380. reg_offset = crc8 % 4; /* Entry offset within the above reg */
  1381. omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
  1382. if (queue == -1) {
  1383. /* Clear accepts frame bit at specified Other DA table entry */
  1384. omc_table_reg &= ~(0xff << (8 * reg_offset));
  1385. } else {
  1386. omc_table_reg &= ~(0xff << (8 * reg_offset));
  1387. omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
  1388. }
  1389. mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
  1390. }
  1391. /* The network device supports multicast using two tables:
  1392. * 1) Special Multicast Table for MAC addresses of the form
  1393. * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
  1394. * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
  1395. * Table entries in the DA-Filter table.
  1396. * 2) Other Multicast Table for multicast of another type. A CRC-8 value
  1397. * is used as an index to the Other Multicast Table entries in the
  1398. * DA-Filter table.
  1399. */
  1400. static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
  1401. int queue)
  1402. {
  1403. unsigned char crc_result = 0;
  1404. if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
  1405. mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
  1406. return 0;
  1407. }
  1408. crc_result = mvneta_addr_crc(p_addr);
  1409. if (queue == -1) {
  1410. if (pp->mcast_count[crc_result] == 0) {
  1411. netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
  1412. crc_result);
  1413. return -EINVAL;
  1414. }
  1415. pp->mcast_count[crc_result]--;
  1416. if (pp->mcast_count[crc_result] != 0) {
  1417. netdev_info(pp->dev,
  1418. "After delete there are %d valid Mcast for crc8=0x%02x\n",
  1419. pp->mcast_count[crc_result], crc_result);
  1420. return -EINVAL;
  1421. }
  1422. } else
  1423. pp->mcast_count[crc_result]++;
  1424. mvneta_set_other_mcast_addr(pp, crc_result, queue);
  1425. return 0;
  1426. }
  1427. /* Configure Fitering mode of Ethernet port */
  1428. static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
  1429. int is_promisc)
  1430. {
  1431. u32 port_cfg_reg, val;
  1432. port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
  1433. val = mvreg_read(pp, MVNETA_TYPE_PRIO);
  1434. /* Set / Clear UPM bit in port configuration register */
  1435. if (is_promisc) {
  1436. /* Accept all Unicast addresses */
  1437. port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
  1438. val |= MVNETA_FORCE_UNI;
  1439. mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
  1440. mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
  1441. } else {
  1442. /* Reject all Unicast addresses */
  1443. port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
  1444. val &= ~MVNETA_FORCE_UNI;
  1445. }
  1446. mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
  1447. mvreg_write(pp, MVNETA_TYPE_PRIO, val);
  1448. }
  1449. /* register unicast and multicast addresses */
  1450. static void mvneta_set_rx_mode(struct net_device *dev)
  1451. {
  1452. struct mvneta_port *pp = netdev_priv(dev);
  1453. struct netdev_hw_addr *ha;
  1454. if (dev->flags & IFF_PROMISC) {
  1455. /* Accept all: Multicast + Unicast */
  1456. mvneta_rx_unicast_promisc_set(pp, 1);
  1457. mvneta_set_ucast_table(pp, rxq_def);
  1458. mvneta_set_special_mcast_table(pp, rxq_def);
  1459. mvneta_set_other_mcast_table(pp, rxq_def);
  1460. } else {
  1461. /* Accept single Unicast */
  1462. mvneta_rx_unicast_promisc_set(pp, 0);
  1463. mvneta_set_ucast_table(pp, -1);
  1464. mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
  1465. if (dev->flags & IFF_ALLMULTI) {
  1466. /* Accept all multicast */
  1467. mvneta_set_special_mcast_table(pp, rxq_def);
  1468. mvneta_set_other_mcast_table(pp, rxq_def);
  1469. } else {
  1470. /* Accept only initialized multicast */
  1471. mvneta_set_special_mcast_table(pp, -1);
  1472. mvneta_set_other_mcast_table(pp, -1);
  1473. if (!netdev_mc_empty(dev)) {
  1474. netdev_for_each_mc_addr(ha, dev) {
  1475. mvneta_mcast_addr_set(pp, ha->addr,
  1476. rxq_def);
  1477. }
  1478. }
  1479. }
  1480. }
  1481. }
  1482. /* Interrupt handling - the callback for request_irq() */
  1483. static irqreturn_t mvneta_isr(int irq, void *dev_id)
  1484. {
  1485. struct mvneta_port *pp = (struct mvneta_port *)dev_id;
  1486. /* Mask all interrupts */
  1487. mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
  1488. napi_schedule(&pp->napi);
  1489. return IRQ_HANDLED;
  1490. }
  1491. /* NAPI handler
  1492. * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
  1493. * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
  1494. * Bits 8 -15 of the cause Rx Tx register indicate that are received
  1495. * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
  1496. * Each CPU has its own causeRxTx register
  1497. */
  1498. static int mvneta_poll(struct napi_struct *napi, int budget)
  1499. {
  1500. int rx_done = 0;
  1501. u32 cause_rx_tx;
  1502. unsigned long flags;
  1503. struct mvneta_port *pp = netdev_priv(napi->dev);
  1504. if (!netif_running(pp->dev)) {
  1505. napi_complete(napi);
  1506. return rx_done;
  1507. }
  1508. /* Read cause register */
  1509. cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
  1510. MVNETA_RX_INTR_MASK(rxq_number);
  1511. /* For the case where the last mvneta_poll did not process all
  1512. * RX packets
  1513. */
  1514. cause_rx_tx |= pp->cause_rx_tx;
  1515. if (rxq_number > 1) {
  1516. while ((cause_rx_tx != 0) && (budget > 0)) {
  1517. int count;
  1518. struct mvneta_rx_queue *rxq;
  1519. /* get rx queue number from cause_rx_tx */
  1520. rxq = mvneta_rx_policy(pp, cause_rx_tx);
  1521. if (!rxq)
  1522. break;
  1523. /* process the packet in that rx queue */
  1524. count = mvneta_rx(pp, budget, rxq);
  1525. rx_done += count;
  1526. budget -= count;
  1527. if (budget > 0) {
  1528. /* set off the rx bit of the
  1529. * corresponding bit in the cause rx
  1530. * tx register, so that next iteration
  1531. * will find the next rx queue where
  1532. * packets are received on
  1533. */
  1534. cause_rx_tx &= ~((1 << rxq->id) << 8);
  1535. }
  1536. }
  1537. } else {
  1538. rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
  1539. budget -= rx_done;
  1540. }
  1541. if (budget > 0) {
  1542. cause_rx_tx = 0;
  1543. napi_complete(napi);
  1544. local_irq_save(flags);
  1545. mvreg_write(pp, MVNETA_INTR_NEW_MASK,
  1546. MVNETA_RX_INTR_MASK(rxq_number));
  1547. local_irq_restore(flags);
  1548. }
  1549. pp->cause_rx_tx = cause_rx_tx;
  1550. return rx_done;
  1551. }
  1552. /* tx done timer callback */
  1553. static void mvneta_tx_done_timer_callback(unsigned long data)
  1554. {
  1555. struct net_device *dev = (struct net_device *)data;
  1556. struct mvneta_port *pp = netdev_priv(dev);
  1557. int tx_done = 0, tx_todo = 0;
  1558. if (!netif_running(dev))
  1559. return ;
  1560. clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
  1561. tx_done = mvneta_tx_done_gbe(pp,
  1562. (((1 << txq_number) - 1) &
  1563. MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
  1564. &tx_todo);
  1565. if (tx_todo > 0)
  1566. mvneta_add_tx_done_timer(pp);
  1567. }
  1568. /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
  1569. static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
  1570. int num)
  1571. {
  1572. struct net_device *dev = pp->dev;
  1573. int i;
  1574. for (i = 0; i < num; i++) {
  1575. struct sk_buff *skb;
  1576. struct mvneta_rx_desc *rx_desc;
  1577. unsigned long phys_addr;
  1578. skb = dev_alloc_skb(pp->pkt_size);
  1579. if (!skb) {
  1580. netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
  1581. __func__, rxq->id, i, num);
  1582. break;
  1583. }
  1584. rx_desc = rxq->descs + i;
  1585. memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
  1586. phys_addr = dma_map_single(dev->dev.parent, skb->head,
  1587. MVNETA_RX_BUF_SIZE(pp->pkt_size),
  1588. DMA_FROM_DEVICE);
  1589. if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
  1590. dev_kfree_skb(skb);
  1591. break;
  1592. }
  1593. mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
  1594. }
  1595. /* Add this number of RX descriptors as non occupied (ready to
  1596. * get packets)
  1597. */
  1598. mvneta_rxq_non_occup_desc_add(pp, rxq, i);
  1599. return i;
  1600. }
  1601. /* Free all packets pending transmit from all TXQs and reset TX port */
  1602. static void mvneta_tx_reset(struct mvneta_port *pp)
  1603. {
  1604. int queue;
  1605. /* free the skb's in the hal tx ring */
  1606. for (queue = 0; queue < txq_number; queue++)
  1607. mvneta_txq_done_force(pp, &pp->txqs[queue]);
  1608. mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
  1609. mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
  1610. }
  1611. static void mvneta_rx_reset(struct mvneta_port *pp)
  1612. {
  1613. mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
  1614. mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
  1615. }
  1616. /* Rx/Tx queue initialization/cleanup methods */
  1617. /* Create a specified RX queue */
  1618. static int mvneta_rxq_init(struct mvneta_port *pp,
  1619. struct mvneta_rx_queue *rxq)
  1620. {
  1621. rxq->size = pp->rx_ring_size;
  1622. /* Allocate memory for RX descriptors */
  1623. rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
  1624. rxq->size * MVNETA_DESC_ALIGNED_SIZE,
  1625. &rxq->descs_phys, GFP_KERNEL);
  1626. if (rxq->descs == NULL)
  1627. return -ENOMEM;
  1628. BUG_ON(rxq->descs !=
  1629. PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
  1630. rxq->last_desc = rxq->size - 1;
  1631. /* Set Rx descriptors queue starting address */
  1632. mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
  1633. mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
  1634. /* Set Offset */
  1635. mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
  1636. /* Set coalescing pkts and time */
  1637. mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
  1638. mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
  1639. /* Fill RXQ with buffers from RX pool */
  1640. mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
  1641. mvneta_rxq_bm_disable(pp, rxq);
  1642. mvneta_rxq_fill(pp, rxq, rxq->size);
  1643. return 0;
  1644. }
  1645. /* Cleanup Rx queue */
  1646. static void mvneta_rxq_deinit(struct mvneta_port *pp,
  1647. struct mvneta_rx_queue *rxq)
  1648. {
  1649. mvneta_rxq_drop_pkts(pp, rxq);
  1650. if (rxq->descs)
  1651. dma_free_coherent(pp->dev->dev.parent,
  1652. rxq->size * MVNETA_DESC_ALIGNED_SIZE,
  1653. rxq->descs,
  1654. rxq->descs_phys);
  1655. rxq->descs = NULL;
  1656. rxq->last_desc = 0;
  1657. rxq->next_desc_to_proc = 0;
  1658. rxq->descs_phys = 0;
  1659. }
  1660. /* Create and initialize a tx queue */
  1661. static int mvneta_txq_init(struct mvneta_port *pp,
  1662. struct mvneta_tx_queue *txq)
  1663. {
  1664. txq->size = pp->tx_ring_size;
  1665. /* Allocate memory for TX descriptors */
  1666. txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
  1667. txq->size * MVNETA_DESC_ALIGNED_SIZE,
  1668. &txq->descs_phys, GFP_KERNEL);
  1669. if (txq->descs == NULL)
  1670. return -ENOMEM;
  1671. /* Make sure descriptor address is cache line size aligned */
  1672. BUG_ON(txq->descs !=
  1673. PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
  1674. txq->last_desc = txq->size - 1;
  1675. /* Set maximum bandwidth for enabled TXQs */
  1676. mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
  1677. mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
  1678. /* Set Tx descriptors queue starting address */
  1679. mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
  1680. mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
  1681. txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
  1682. if (txq->tx_skb == NULL) {
  1683. dma_free_coherent(pp->dev->dev.parent,
  1684. txq->size * MVNETA_DESC_ALIGNED_SIZE,
  1685. txq->descs, txq->descs_phys);
  1686. return -ENOMEM;
  1687. }
  1688. mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
  1689. return 0;
  1690. }
  1691. /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
  1692. static void mvneta_txq_deinit(struct mvneta_port *pp,
  1693. struct mvneta_tx_queue *txq)
  1694. {
  1695. kfree(txq->tx_skb);
  1696. if (txq->descs)
  1697. dma_free_coherent(pp->dev->dev.parent,
  1698. txq->size * MVNETA_DESC_ALIGNED_SIZE,
  1699. txq->descs, txq->descs_phys);
  1700. txq->descs = NULL;
  1701. txq->last_desc = 0;
  1702. txq->next_desc_to_proc = 0;
  1703. txq->descs_phys = 0;
  1704. /* Set minimum bandwidth for disabled TXQs */
  1705. mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
  1706. mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
  1707. /* Set Tx descriptors queue starting address and size */
  1708. mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
  1709. mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
  1710. }
  1711. /* Cleanup all Tx queues */
  1712. static void mvneta_cleanup_txqs(struct mvneta_port *pp)
  1713. {
  1714. int queue;
  1715. for (queue = 0; queue < txq_number; queue++)
  1716. mvneta_txq_deinit(pp, &pp->txqs[queue]);
  1717. }
  1718. /* Cleanup all Rx queues */
  1719. static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
  1720. {
  1721. int queue;
  1722. for (queue = 0; queue < rxq_number; queue++)
  1723. mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
  1724. }
  1725. /* Init all Rx queues */
  1726. static int mvneta_setup_rxqs(struct mvneta_port *pp)
  1727. {
  1728. int queue;
  1729. for (queue = 0; queue < rxq_number; queue++) {
  1730. int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
  1731. if (err) {
  1732. netdev_err(pp->dev, "%s: can't create rxq=%d\n",
  1733. __func__, queue);
  1734. mvneta_cleanup_rxqs(pp);
  1735. return err;
  1736. }
  1737. }
  1738. return 0;
  1739. }
  1740. /* Init all tx queues */
  1741. static int mvneta_setup_txqs(struct mvneta_port *pp)
  1742. {
  1743. int queue;
  1744. for (queue = 0; queue < txq_number; queue++) {
  1745. int err = mvneta_txq_init(pp, &pp->txqs[queue]);
  1746. if (err) {
  1747. netdev_err(pp->dev, "%s: can't create txq=%d\n",
  1748. __func__, queue);
  1749. mvneta_cleanup_txqs(pp);
  1750. return err;
  1751. }
  1752. }
  1753. return 0;
  1754. }
  1755. static void mvneta_start_dev(struct mvneta_port *pp)
  1756. {
  1757. mvneta_max_rx_size_set(pp, pp->pkt_size);
  1758. mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
  1759. /* start the Rx/Tx activity */
  1760. mvneta_port_enable(pp);
  1761. /* Enable polling on the port */
  1762. napi_enable(&pp->napi);
  1763. /* Unmask interrupts */
  1764. mvreg_write(pp, MVNETA_INTR_NEW_MASK,
  1765. MVNETA_RX_INTR_MASK(rxq_number));
  1766. phy_start(pp->phy_dev);
  1767. netif_tx_start_all_queues(pp->dev);
  1768. }
  1769. static void mvneta_stop_dev(struct mvneta_port *pp)
  1770. {
  1771. phy_stop(pp->phy_dev);
  1772. napi_disable(&pp->napi);
  1773. netif_carrier_off(pp->dev);
  1774. mvneta_port_down(pp);
  1775. netif_tx_stop_all_queues(pp->dev);
  1776. /* Stop the port activity */
  1777. mvneta_port_disable(pp);
  1778. /* Clear all ethernet port interrupts */
  1779. mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
  1780. mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
  1781. /* Mask all ethernet port interrupts */
  1782. mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
  1783. mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
  1784. mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
  1785. mvneta_tx_reset(pp);
  1786. mvneta_rx_reset(pp);
  1787. }
  1788. /* tx timeout callback - display a message and stop/start the network device */
  1789. static void mvneta_tx_timeout(struct net_device *dev)
  1790. {
  1791. struct mvneta_port *pp = netdev_priv(dev);
  1792. netdev_info(dev, "tx timeout\n");
  1793. mvneta_stop_dev(pp);
  1794. mvneta_start_dev(pp);
  1795. }
  1796. /* Return positive if MTU is valid */
  1797. static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
  1798. {
  1799. if (mtu < 68) {
  1800. netdev_err(dev, "cannot change mtu to less than 68\n");
  1801. return -EINVAL;
  1802. }
  1803. /* 9676 == 9700 - 20 and rounding to 8 */
  1804. if (mtu > 9676) {
  1805. netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
  1806. mtu = 9676;
  1807. }
  1808. if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
  1809. netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
  1810. mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
  1811. mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
  1812. }
  1813. return mtu;
  1814. }
  1815. /* Change the device mtu */
  1816. static int mvneta_change_mtu(struct net_device *dev, int mtu)
  1817. {
  1818. struct mvneta_port *pp = netdev_priv(dev);
  1819. int ret;
  1820. mtu = mvneta_check_mtu_valid(dev, mtu);
  1821. if (mtu < 0)
  1822. return -EINVAL;
  1823. dev->mtu = mtu;
  1824. if (!netif_running(dev))
  1825. return 0;
  1826. /* The interface is running, so we have to force a
  1827. * reallocation of the RXQs
  1828. */
  1829. mvneta_stop_dev(pp);
  1830. mvneta_cleanup_txqs(pp);
  1831. mvneta_cleanup_rxqs(pp);
  1832. pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
  1833. ret = mvneta_setup_rxqs(pp);
  1834. if (ret) {
  1835. netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
  1836. return ret;
  1837. }
  1838. mvneta_setup_txqs(pp);
  1839. mvneta_start_dev(pp);
  1840. mvneta_port_up(pp);
  1841. return 0;
  1842. }
  1843. /* Get mac address */
  1844. static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
  1845. {
  1846. u32 mac_addr_l, mac_addr_h;
  1847. mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
  1848. mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
  1849. addr[0] = (mac_addr_h >> 24) & 0xFF;
  1850. addr[1] = (mac_addr_h >> 16) & 0xFF;
  1851. addr[2] = (mac_addr_h >> 8) & 0xFF;
  1852. addr[3] = mac_addr_h & 0xFF;
  1853. addr[4] = (mac_addr_l >> 8) & 0xFF;
  1854. addr[5] = mac_addr_l & 0xFF;
  1855. }
  1856. /* Handle setting mac address */
  1857. static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
  1858. {
  1859. struct mvneta_port *pp = netdev_priv(dev);
  1860. u8 *mac = addr + 2;
  1861. int i;
  1862. if (netif_running(dev))
  1863. return -EBUSY;
  1864. /* Remove previous address table entry */
  1865. mvneta_mac_addr_set(pp, dev->dev_addr, -1);
  1866. /* Set new addr in hw */
  1867. mvneta_mac_addr_set(pp, mac, rxq_def);
  1868. /* Set addr in the device */
  1869. for (i = 0; i < ETH_ALEN; i++)
  1870. dev->dev_addr[i] = mac[i];
  1871. return 0;
  1872. }
  1873. static void mvneta_adjust_link(struct net_device *ndev)
  1874. {
  1875. struct mvneta_port *pp = netdev_priv(ndev);
  1876. struct phy_device *phydev = pp->phy_dev;
  1877. int status_change = 0;
  1878. if (phydev->link) {
  1879. if ((pp->speed != phydev->speed) ||
  1880. (pp->duplex != phydev->duplex)) {
  1881. u32 val;
  1882. val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
  1883. val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
  1884. MVNETA_GMAC_CONFIG_GMII_SPEED |
  1885. MVNETA_GMAC_CONFIG_FULL_DUPLEX);
  1886. if (phydev->duplex)
  1887. val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
  1888. if (phydev->speed == SPEED_1000)
  1889. val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
  1890. else
  1891. val |= MVNETA_GMAC_CONFIG_MII_SPEED;
  1892. mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
  1893. pp->duplex = phydev->duplex;
  1894. pp->speed = phydev->speed;
  1895. }
  1896. }
  1897. if (phydev->link != pp->link) {
  1898. if (!phydev->link) {
  1899. pp->duplex = -1;
  1900. pp->speed = 0;
  1901. }
  1902. pp->link = phydev->link;
  1903. status_change = 1;
  1904. }
  1905. if (status_change) {
  1906. if (phydev->link) {
  1907. u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
  1908. val |= (MVNETA_GMAC_FORCE_LINK_PASS |
  1909. MVNETA_GMAC_FORCE_LINK_DOWN);
  1910. mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
  1911. mvneta_port_up(pp);
  1912. netdev_info(pp->dev, "link up\n");
  1913. } else {
  1914. mvneta_port_down(pp);
  1915. netdev_info(pp->dev, "link down\n");
  1916. }
  1917. }
  1918. }
  1919. static int mvneta_mdio_probe(struct mvneta_port *pp)
  1920. {
  1921. struct phy_device *phy_dev;
  1922. phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
  1923. pp->phy_interface);
  1924. if (!phy_dev) {
  1925. netdev_err(pp->dev, "could not find the PHY\n");
  1926. return -ENODEV;
  1927. }
  1928. phy_dev->supported &= PHY_GBIT_FEATURES;
  1929. phy_dev->advertising = phy_dev->supported;
  1930. pp->phy_dev = phy_dev;
  1931. pp->link = 0;
  1932. pp->duplex = 0;
  1933. pp->speed = 0;
  1934. return 0;
  1935. }
  1936. static void mvneta_mdio_remove(struct mvneta_port *pp)
  1937. {
  1938. phy_disconnect(pp->phy_dev);
  1939. pp->phy_dev = NULL;
  1940. }
  1941. static int mvneta_open(struct net_device *dev)
  1942. {
  1943. struct mvneta_port *pp = netdev_priv(dev);
  1944. int ret;
  1945. mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
  1946. pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
  1947. ret = mvneta_setup_rxqs(pp);
  1948. if (ret)
  1949. return ret;
  1950. ret = mvneta_setup_txqs(pp);
  1951. if (ret)
  1952. goto err_cleanup_rxqs;
  1953. /* Connect to port interrupt line */
  1954. ret = request_irq(pp->dev->irq, mvneta_isr, 0,
  1955. MVNETA_DRIVER_NAME, pp);
  1956. if (ret) {
  1957. netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
  1958. goto err_cleanup_txqs;
  1959. }
  1960. /* In default link is down */
  1961. netif_carrier_off(pp->dev);
  1962. ret = mvneta_mdio_probe(pp);
  1963. if (ret < 0) {
  1964. netdev_err(dev, "cannot probe MDIO bus\n");
  1965. goto err_free_irq;
  1966. }
  1967. mvneta_start_dev(pp);
  1968. return 0;
  1969. err_free_irq:
  1970. free_irq(pp->dev->irq, pp);
  1971. err_cleanup_txqs:
  1972. mvneta_cleanup_txqs(pp);
  1973. err_cleanup_rxqs:
  1974. mvneta_cleanup_rxqs(pp);
  1975. return ret;
  1976. }
  1977. /* Stop the port, free port interrupt line */
  1978. static int mvneta_stop(struct net_device *dev)
  1979. {
  1980. struct mvneta_port *pp = netdev_priv(dev);
  1981. mvneta_stop_dev(pp);
  1982. mvneta_mdio_remove(pp);
  1983. free_irq(dev->irq, pp);
  1984. mvneta_cleanup_rxqs(pp);
  1985. mvneta_cleanup_txqs(pp);
  1986. del_timer(&pp->tx_done_timer);
  1987. clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
  1988. return 0;
  1989. }
  1990. /* Ethtool methods */
  1991. /* Get settings (phy address, speed) for ethtools */
  1992. int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1993. {
  1994. struct mvneta_port *pp = netdev_priv(dev);
  1995. if (!pp->phy_dev)
  1996. return -ENODEV;
  1997. return phy_ethtool_gset(pp->phy_dev, cmd);
  1998. }
  1999. /* Set settings (phy address, speed) for ethtools */
  2000. int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  2001. {
  2002. struct mvneta_port *pp = netdev_priv(dev);
  2003. if (!pp->phy_dev)
  2004. return -ENODEV;
  2005. return phy_ethtool_sset(pp->phy_dev, cmd);
  2006. }
  2007. /* Set interrupt coalescing for ethtools */
  2008. static int mvneta_ethtool_set_coalesce(struct net_device *dev,
  2009. struct ethtool_coalesce *c)
  2010. {
  2011. struct mvneta_port *pp = netdev_priv(dev);
  2012. int queue;
  2013. for (queue = 0; queue < rxq_number; queue++) {
  2014. struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
  2015. rxq->time_coal = c->rx_coalesce_usecs;
  2016. rxq->pkts_coal = c->rx_max_coalesced_frames;
  2017. mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
  2018. mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
  2019. }
  2020. for (queue = 0; queue < txq_number; queue++) {
  2021. struct mvneta_tx_queue *txq = &pp->txqs[queue];
  2022. txq->done_pkts_coal = c->tx_max_coalesced_frames;
  2023. mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
  2024. }
  2025. return 0;
  2026. }
  2027. /* get coalescing for ethtools */
  2028. static int mvneta_ethtool_get_coalesce(struct net_device *dev,
  2029. struct ethtool_coalesce *c)
  2030. {
  2031. struct mvneta_port *pp = netdev_priv(dev);
  2032. c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
  2033. c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
  2034. c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
  2035. return 0;
  2036. }
  2037. static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
  2038. struct ethtool_drvinfo *drvinfo)
  2039. {
  2040. strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
  2041. sizeof(drvinfo->driver));
  2042. strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
  2043. sizeof(drvinfo->version));
  2044. strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
  2045. sizeof(drvinfo->bus_info));
  2046. }
  2047. static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
  2048. struct ethtool_ringparam *ring)
  2049. {
  2050. struct mvneta_port *pp = netdev_priv(netdev);
  2051. ring->rx_max_pending = MVNETA_MAX_RXD;
  2052. ring->tx_max_pending = MVNETA_MAX_TXD;
  2053. ring->rx_pending = pp->rx_ring_size;
  2054. ring->tx_pending = pp->tx_ring_size;
  2055. }
  2056. static int mvneta_ethtool_set_ringparam(struct net_device *dev,
  2057. struct ethtool_ringparam *ring)
  2058. {
  2059. struct mvneta_port *pp = netdev_priv(dev);
  2060. if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
  2061. return -EINVAL;
  2062. pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
  2063. ring->rx_pending : MVNETA_MAX_RXD;
  2064. pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
  2065. ring->tx_pending : MVNETA_MAX_TXD;
  2066. if (netif_running(dev)) {
  2067. mvneta_stop(dev);
  2068. if (mvneta_open(dev)) {
  2069. netdev_err(dev,
  2070. "error on opening device after ring param change\n");
  2071. return -ENOMEM;
  2072. }
  2073. }
  2074. return 0;
  2075. }
  2076. static const struct net_device_ops mvneta_netdev_ops = {
  2077. .ndo_open = mvneta_open,
  2078. .ndo_stop = mvneta_stop,
  2079. .ndo_start_xmit = mvneta_tx,
  2080. .ndo_set_rx_mode = mvneta_set_rx_mode,
  2081. .ndo_set_mac_address = mvneta_set_mac_addr,
  2082. .ndo_change_mtu = mvneta_change_mtu,
  2083. .ndo_tx_timeout = mvneta_tx_timeout,
  2084. .ndo_get_stats64 = mvneta_get_stats64,
  2085. };
  2086. const struct ethtool_ops mvneta_eth_tool_ops = {
  2087. .get_link = ethtool_op_get_link,
  2088. .get_settings = mvneta_ethtool_get_settings,
  2089. .set_settings = mvneta_ethtool_set_settings,
  2090. .set_coalesce = mvneta_ethtool_set_coalesce,
  2091. .get_coalesce = mvneta_ethtool_get_coalesce,
  2092. .get_drvinfo = mvneta_ethtool_get_drvinfo,
  2093. .get_ringparam = mvneta_ethtool_get_ringparam,
  2094. .set_ringparam = mvneta_ethtool_set_ringparam,
  2095. };
  2096. /* Initialize hw */
  2097. static int mvneta_init(struct mvneta_port *pp, int phy_addr)
  2098. {
  2099. int queue;
  2100. /* Disable port */
  2101. mvneta_port_disable(pp);
  2102. /* Set port default values */
  2103. mvneta_defaults_set(pp);
  2104. pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
  2105. GFP_KERNEL);
  2106. if (!pp->txqs)
  2107. return -ENOMEM;
  2108. /* Initialize TX descriptor rings */
  2109. for (queue = 0; queue < txq_number; queue++) {
  2110. struct mvneta_tx_queue *txq = &pp->txqs[queue];
  2111. txq->id = queue;
  2112. txq->size = pp->tx_ring_size;
  2113. txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
  2114. }
  2115. pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
  2116. GFP_KERNEL);
  2117. if (!pp->rxqs) {
  2118. kfree(pp->txqs);
  2119. return -ENOMEM;
  2120. }
  2121. /* Create Rx descriptor rings */
  2122. for (queue = 0; queue < rxq_number; queue++) {
  2123. struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
  2124. rxq->id = queue;
  2125. rxq->size = pp->rx_ring_size;
  2126. rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
  2127. rxq->time_coal = MVNETA_RX_COAL_USEC;
  2128. }
  2129. return 0;
  2130. }
  2131. static void mvneta_deinit(struct mvneta_port *pp)
  2132. {
  2133. kfree(pp->txqs);
  2134. kfree(pp->rxqs);
  2135. }
  2136. /* platform glue : initialize decoding windows */
  2137. static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
  2138. const struct mbus_dram_target_info *dram)
  2139. {
  2140. u32 win_enable;
  2141. u32 win_protect;
  2142. int i;
  2143. for (i = 0; i < 6; i++) {
  2144. mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
  2145. mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
  2146. if (i < 4)
  2147. mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
  2148. }
  2149. win_enable = 0x3f;
  2150. win_protect = 0;
  2151. for (i = 0; i < dram->num_cs; i++) {
  2152. const struct mbus_dram_window *cs = dram->cs + i;
  2153. mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
  2154. (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
  2155. mvreg_write(pp, MVNETA_WIN_SIZE(i),
  2156. (cs->size - 1) & 0xffff0000);
  2157. win_enable &= ~(1 << i);
  2158. win_protect |= 3 << (2 * i);
  2159. }
  2160. mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
  2161. }
  2162. /* Power up the port */
  2163. static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
  2164. {
  2165. u32 val;
  2166. /* MAC Cause register should be cleared */
  2167. mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
  2168. if (phy_mode == PHY_INTERFACE_MODE_SGMII)
  2169. mvneta_port_sgmii_config(pp);
  2170. mvneta_gmac_rgmii_set(pp, 1);
  2171. /* Cancel Port Reset */
  2172. val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
  2173. val &= ~MVNETA_GMAC2_PORT_RESET;
  2174. mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
  2175. while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
  2176. MVNETA_GMAC2_PORT_RESET) != 0)
  2177. continue;
  2178. }
  2179. /* Device initialization routine */
  2180. static int mvneta_probe(struct platform_device *pdev)
  2181. {
  2182. const struct mbus_dram_target_info *dram_target_info;
  2183. struct device_node *dn = pdev->dev.of_node;
  2184. struct device_node *phy_node;
  2185. u32 phy_addr;
  2186. struct mvneta_port *pp;
  2187. struct net_device *dev;
  2188. const char *dt_mac_addr;
  2189. char hw_mac_addr[ETH_ALEN];
  2190. const char *mac_from;
  2191. int phy_mode;
  2192. int err;
  2193. /* Our multiqueue support is not complete, so for now, only
  2194. * allow the usage of the first RX queue
  2195. */
  2196. if (rxq_def != 0) {
  2197. dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
  2198. return -EINVAL;
  2199. }
  2200. dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
  2201. if (!dev)
  2202. return -ENOMEM;
  2203. dev->irq = irq_of_parse_and_map(dn, 0);
  2204. if (dev->irq == 0) {
  2205. err = -EINVAL;
  2206. goto err_free_netdev;
  2207. }
  2208. phy_node = of_parse_phandle(dn, "phy", 0);
  2209. if (!phy_node) {
  2210. dev_err(&pdev->dev, "no associated PHY\n");
  2211. err = -ENODEV;
  2212. goto err_free_irq;
  2213. }
  2214. phy_mode = of_get_phy_mode(dn);
  2215. if (phy_mode < 0) {
  2216. dev_err(&pdev->dev, "incorrect phy-mode\n");
  2217. err = -EINVAL;
  2218. goto err_free_irq;
  2219. }
  2220. dev->tx_queue_len = MVNETA_MAX_TXD;
  2221. dev->watchdog_timeo = 5 * HZ;
  2222. dev->netdev_ops = &mvneta_netdev_ops;
  2223. SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
  2224. pp = netdev_priv(dev);
  2225. pp->weight = MVNETA_RX_POLL_WEIGHT;
  2226. pp->phy_node = phy_node;
  2227. pp->phy_interface = phy_mode;
  2228. pp->clk = devm_clk_get(&pdev->dev, NULL);
  2229. if (IS_ERR(pp->clk)) {
  2230. err = PTR_ERR(pp->clk);
  2231. goto err_free_irq;
  2232. }
  2233. clk_prepare_enable(pp->clk);
  2234. pp->base = of_iomap(dn, 0);
  2235. if (pp->base == NULL) {
  2236. err = -ENOMEM;
  2237. goto err_clk;
  2238. }
  2239. dt_mac_addr = of_get_mac_address(dn);
  2240. if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
  2241. mac_from = "device tree";
  2242. memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
  2243. } else {
  2244. mvneta_get_mac_addr(pp, hw_mac_addr);
  2245. if (is_valid_ether_addr(hw_mac_addr)) {
  2246. mac_from = "hardware";
  2247. memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
  2248. } else {
  2249. mac_from = "random";
  2250. eth_hw_addr_random(dev);
  2251. }
  2252. }
  2253. pp->tx_done_timer.data = (unsigned long)dev;
  2254. pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
  2255. init_timer(&pp->tx_done_timer);
  2256. clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
  2257. pp->tx_ring_size = MVNETA_MAX_TXD;
  2258. pp->rx_ring_size = MVNETA_MAX_RXD;
  2259. pp->dev = dev;
  2260. SET_NETDEV_DEV(dev, &pdev->dev);
  2261. err = mvneta_init(pp, phy_addr);
  2262. if (err < 0) {
  2263. dev_err(&pdev->dev, "can't init eth hal\n");
  2264. goto err_unmap;
  2265. }
  2266. mvneta_port_power_up(pp, phy_mode);
  2267. dram_target_info = mv_mbus_dram_info();
  2268. if (dram_target_info)
  2269. mvneta_conf_mbus_windows(pp, dram_target_info);
  2270. netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
  2271. dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
  2272. dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
  2273. dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
  2274. dev->priv_flags |= IFF_UNICAST_FLT;
  2275. err = register_netdev(dev);
  2276. if (err < 0) {
  2277. dev_err(&pdev->dev, "failed to register\n");
  2278. goto err_deinit;
  2279. }
  2280. netdev_info(dev, "Using %s mac address %pM\n", mac_from,
  2281. dev->dev_addr);
  2282. platform_set_drvdata(pdev, pp->dev);
  2283. return 0;
  2284. err_deinit:
  2285. mvneta_deinit(pp);
  2286. err_unmap:
  2287. iounmap(pp->base);
  2288. err_clk:
  2289. clk_disable_unprepare(pp->clk);
  2290. err_free_irq:
  2291. irq_dispose_mapping(dev->irq);
  2292. err_free_netdev:
  2293. free_netdev(dev);
  2294. return err;
  2295. }
  2296. /* Device removal routine */
  2297. static int mvneta_remove(struct platform_device *pdev)
  2298. {
  2299. struct net_device *dev = platform_get_drvdata(pdev);
  2300. struct mvneta_port *pp = netdev_priv(dev);
  2301. unregister_netdev(dev);
  2302. mvneta_deinit(pp);
  2303. clk_disable_unprepare(pp->clk);
  2304. iounmap(pp->base);
  2305. irq_dispose_mapping(dev->irq);
  2306. free_netdev(dev);
  2307. return 0;
  2308. }
  2309. static const struct of_device_id mvneta_match[] = {
  2310. { .compatible = "marvell,armada-370-neta" },
  2311. { }
  2312. };
  2313. MODULE_DEVICE_TABLE(of, mvneta_match);
  2314. static struct platform_driver mvneta_driver = {
  2315. .probe = mvneta_probe,
  2316. .remove = mvneta_remove,
  2317. .driver = {
  2318. .name = MVNETA_DRIVER_NAME,
  2319. .of_match_table = mvneta_match,
  2320. },
  2321. };
  2322. module_platform_driver(mvneta_driver);
  2323. MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
  2324. MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
  2325. MODULE_LICENSE("GPL");
  2326. module_param(rxq_number, int, S_IRUGO);
  2327. module_param(txq_number, int, S_IRUGO);
  2328. module_param(rxq_def, int, S_IRUGO);