netdevice.h 84 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <alan@lxorguk.ukuu.org.uk>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/if.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_packet.h>
  30. #include <linux/if_link.h>
  31. #ifdef __KERNEL__
  32. #include <linux/pm_qos.h>
  33. #include <linux/timer.h>
  34. #include <linux/delay.h>
  35. #include <linux/atomic.h>
  36. #include <asm/cache.h>
  37. #include <asm/byteorder.h>
  38. #include <linux/device.h>
  39. #include <linux/percpu.h>
  40. #include <linux/rculist.h>
  41. #include <linux/dmaengine.h>
  42. #include <linux/workqueue.h>
  43. #include <linux/dynamic_queue_limits.h>
  44. #include <linux/ethtool.h>
  45. #include <net/net_namespace.h>
  46. #include <net/dsa.h>
  47. #ifdef CONFIG_DCB
  48. #include <net/dcbnl.h>
  49. #endif
  50. #include <net/netprio_cgroup.h>
  51. #include <linux/netdev_features.h>
  52. struct netpoll_info;
  53. struct phy_device;
  54. /* 802.11 specific */
  55. struct wireless_dev;
  56. /* source back-compat hooks */
  57. #define SET_ETHTOOL_OPS(netdev,ops) \
  58. ( (netdev)->ethtool_ops = (ops) )
  59. /* hardware address assignment types */
  60. #define NET_ADDR_PERM 0 /* address is permanent (default) */
  61. #define NET_ADDR_RANDOM 1 /* address is generated randomly */
  62. #define NET_ADDR_STOLEN 2 /* address is stolen from other device */
  63. /* Backlog congestion levels */
  64. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  65. #define NET_RX_DROP 1 /* packet dropped */
  66. /*
  67. * Transmit return codes: transmit return codes originate from three different
  68. * namespaces:
  69. *
  70. * - qdisc return codes
  71. * - driver transmit return codes
  72. * - errno values
  73. *
  74. * Drivers are allowed to return any one of those in their hard_start_xmit()
  75. * function. Real network devices commonly used with qdiscs should only return
  76. * the driver transmit return codes though - when qdiscs are used, the actual
  77. * transmission happens asynchronously, so the value is not propagated to
  78. * higher layers. Virtual network devices transmit synchronously, in this case
  79. * the driver transmit return codes are consumed by dev_queue_xmit(), all
  80. * others are propagated to higher layers.
  81. */
  82. /* qdisc ->enqueue() return codes. */
  83. #define NET_XMIT_SUCCESS 0x00
  84. #define NET_XMIT_DROP 0x01 /* skb dropped */
  85. #define NET_XMIT_CN 0x02 /* congestion notification */
  86. #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
  87. #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
  88. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  89. * indicates that the device will soon be dropping packets, or already drops
  90. * some packets of the same priority; prompting us to send less aggressively. */
  91. #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
  92. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  93. /* Driver transmit return codes */
  94. #define NETDEV_TX_MASK 0xf0
  95. enum netdev_tx {
  96. __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
  97. NETDEV_TX_OK = 0x00, /* driver took care of packet */
  98. NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
  99. NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
  100. };
  101. typedef enum netdev_tx netdev_tx_t;
  102. /*
  103. * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
  104. * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
  105. */
  106. static inline bool dev_xmit_complete(int rc)
  107. {
  108. /*
  109. * Positive cases with an skb consumed by a driver:
  110. * - successful transmission (rc == NETDEV_TX_OK)
  111. * - error while transmitting (rc < 0)
  112. * - error while queueing to a different device (rc & NET_XMIT_MASK)
  113. */
  114. if (likely(rc < NET_XMIT_MASK))
  115. return true;
  116. return false;
  117. }
  118. #endif
  119. #define MAX_ADDR_LEN 32 /* Largest hardware address length */
  120. /* Initial net device group. All devices belong to group 0 by default. */
  121. #define INIT_NETDEV_GROUP 0
  122. #ifdef __KERNEL__
  123. /*
  124. * Compute the worst case header length according to the protocols
  125. * used.
  126. */
  127. #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
  128. # if defined(CONFIG_MAC80211_MESH)
  129. # define LL_MAX_HEADER 128
  130. # else
  131. # define LL_MAX_HEADER 96
  132. # endif
  133. #elif IS_ENABLED(CONFIG_TR)
  134. # define LL_MAX_HEADER 48
  135. #else
  136. # define LL_MAX_HEADER 32
  137. #endif
  138. #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
  139. !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
  140. #define MAX_HEADER LL_MAX_HEADER
  141. #else
  142. #define MAX_HEADER (LL_MAX_HEADER + 48)
  143. #endif
  144. /*
  145. * Old network device statistics. Fields are native words
  146. * (unsigned long) so they can be read and written atomically.
  147. */
  148. struct net_device_stats {
  149. unsigned long rx_packets;
  150. unsigned long tx_packets;
  151. unsigned long rx_bytes;
  152. unsigned long tx_bytes;
  153. unsigned long rx_errors;
  154. unsigned long tx_errors;
  155. unsigned long rx_dropped;
  156. unsigned long tx_dropped;
  157. unsigned long multicast;
  158. unsigned long collisions;
  159. unsigned long rx_length_errors;
  160. unsigned long rx_over_errors;
  161. unsigned long rx_crc_errors;
  162. unsigned long rx_frame_errors;
  163. unsigned long rx_fifo_errors;
  164. unsigned long rx_missed_errors;
  165. unsigned long tx_aborted_errors;
  166. unsigned long tx_carrier_errors;
  167. unsigned long tx_fifo_errors;
  168. unsigned long tx_heartbeat_errors;
  169. unsigned long tx_window_errors;
  170. unsigned long rx_compressed;
  171. unsigned long tx_compressed;
  172. };
  173. #endif /* __KERNEL__ */
  174. /* Media selection options. */
  175. enum {
  176. IF_PORT_UNKNOWN = 0,
  177. IF_PORT_10BASE2,
  178. IF_PORT_10BASET,
  179. IF_PORT_AUI,
  180. IF_PORT_100BASET,
  181. IF_PORT_100BASETX,
  182. IF_PORT_100BASEFX
  183. };
  184. #ifdef __KERNEL__
  185. #include <linux/cache.h>
  186. #include <linux/skbuff.h>
  187. #ifdef CONFIG_RPS
  188. #include <linux/jump_label.h>
  189. extern struct jump_label_key rps_needed;
  190. #endif
  191. struct neighbour;
  192. struct neigh_parms;
  193. struct sk_buff;
  194. struct netdev_hw_addr {
  195. struct list_head list;
  196. unsigned char addr[MAX_ADDR_LEN];
  197. unsigned char type;
  198. #define NETDEV_HW_ADDR_T_LAN 1
  199. #define NETDEV_HW_ADDR_T_SAN 2
  200. #define NETDEV_HW_ADDR_T_SLAVE 3
  201. #define NETDEV_HW_ADDR_T_UNICAST 4
  202. #define NETDEV_HW_ADDR_T_MULTICAST 5
  203. bool synced;
  204. bool global_use;
  205. int refcount;
  206. struct rcu_head rcu_head;
  207. };
  208. struct netdev_hw_addr_list {
  209. struct list_head list;
  210. int count;
  211. };
  212. #define netdev_hw_addr_list_count(l) ((l)->count)
  213. #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
  214. #define netdev_hw_addr_list_for_each(ha, l) \
  215. list_for_each_entry(ha, &(l)->list, list)
  216. #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
  217. #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
  218. #define netdev_for_each_uc_addr(ha, dev) \
  219. netdev_hw_addr_list_for_each(ha, &(dev)->uc)
  220. #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
  221. #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
  222. #define netdev_for_each_mc_addr(ha, dev) \
  223. netdev_hw_addr_list_for_each(ha, &(dev)->mc)
  224. struct hh_cache {
  225. u16 hh_len;
  226. u16 __pad;
  227. seqlock_t hh_lock;
  228. /* cached hardware header; allow for machine alignment needs. */
  229. #define HH_DATA_MOD 16
  230. #define HH_DATA_OFF(__len) \
  231. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  232. #define HH_DATA_ALIGN(__len) \
  233. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  234. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  235. };
  236. /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  237. * Alternative is:
  238. * dev->hard_header_len ? (dev->hard_header_len +
  239. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  240. *
  241. * We could use other alignment values, but we must maintain the
  242. * relationship HH alignment <= LL alignment.
  243. */
  244. #define LL_RESERVED_SPACE(dev) \
  245. ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  246. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  247. ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  248. struct header_ops {
  249. int (*create) (struct sk_buff *skb, struct net_device *dev,
  250. unsigned short type, const void *daddr,
  251. const void *saddr, unsigned len);
  252. int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
  253. int (*rebuild)(struct sk_buff *skb);
  254. int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
  255. void (*cache_update)(struct hh_cache *hh,
  256. const struct net_device *dev,
  257. const unsigned char *haddr);
  258. };
  259. /* These flag bits are private to the generic network queueing
  260. * layer, they may not be explicitly referenced by any other
  261. * code.
  262. */
  263. enum netdev_state_t {
  264. __LINK_STATE_START,
  265. __LINK_STATE_PRESENT,
  266. __LINK_STATE_NOCARRIER,
  267. __LINK_STATE_LINKWATCH_PENDING,
  268. __LINK_STATE_DORMANT,
  269. };
  270. /*
  271. * This structure holds at boot time configured netdevice settings. They
  272. * are then used in the device probing.
  273. */
  274. struct netdev_boot_setup {
  275. char name[IFNAMSIZ];
  276. struct ifmap map;
  277. };
  278. #define NETDEV_BOOT_SETUP_MAX 8
  279. extern int __init netdev_boot_setup(char *str);
  280. /*
  281. * Structure for NAPI scheduling similar to tasklet but with weighting
  282. */
  283. struct napi_struct {
  284. /* The poll_list must only be managed by the entity which
  285. * changes the state of the NAPI_STATE_SCHED bit. This means
  286. * whoever atomically sets that bit can add this napi_struct
  287. * to the per-cpu poll_list, and whoever clears that bit
  288. * can remove from the list right before clearing the bit.
  289. */
  290. struct list_head poll_list;
  291. unsigned long state;
  292. int weight;
  293. int (*poll)(struct napi_struct *, int);
  294. #ifdef CONFIG_NETPOLL
  295. spinlock_t poll_lock;
  296. int poll_owner;
  297. #endif
  298. unsigned int gro_count;
  299. struct net_device *dev;
  300. struct list_head dev_list;
  301. struct sk_buff *gro_list;
  302. struct sk_buff *skb;
  303. };
  304. enum {
  305. NAPI_STATE_SCHED, /* Poll is scheduled */
  306. NAPI_STATE_DISABLE, /* Disable pending */
  307. NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
  308. };
  309. enum gro_result {
  310. GRO_MERGED,
  311. GRO_MERGED_FREE,
  312. GRO_HELD,
  313. GRO_NORMAL,
  314. GRO_DROP,
  315. };
  316. typedef enum gro_result gro_result_t;
  317. /*
  318. * enum rx_handler_result - Possible return values for rx_handlers.
  319. * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
  320. * further.
  321. * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
  322. * case skb->dev was changed by rx_handler.
  323. * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
  324. * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
  325. *
  326. * rx_handlers are functions called from inside __netif_receive_skb(), to do
  327. * special processing of the skb, prior to delivery to protocol handlers.
  328. *
  329. * Currently, a net_device can only have a single rx_handler registered. Trying
  330. * to register a second rx_handler will return -EBUSY.
  331. *
  332. * To register a rx_handler on a net_device, use netdev_rx_handler_register().
  333. * To unregister a rx_handler on a net_device, use
  334. * netdev_rx_handler_unregister().
  335. *
  336. * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
  337. * do with the skb.
  338. *
  339. * If the rx_handler consumed to skb in some way, it should return
  340. * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
  341. * the skb to be delivered in some other ways.
  342. *
  343. * If the rx_handler changed skb->dev, to divert the skb to another
  344. * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
  345. * new device will be called if it exists.
  346. *
  347. * If the rx_handler consider the skb should be ignored, it should return
  348. * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
  349. * are registred on exact device (ptype->dev == skb->dev).
  350. *
  351. * If the rx_handler didn't changed skb->dev, but want the skb to be normally
  352. * delivered, it should return RX_HANDLER_PASS.
  353. *
  354. * A device without a registered rx_handler will behave as if rx_handler
  355. * returned RX_HANDLER_PASS.
  356. */
  357. enum rx_handler_result {
  358. RX_HANDLER_CONSUMED,
  359. RX_HANDLER_ANOTHER,
  360. RX_HANDLER_EXACT,
  361. RX_HANDLER_PASS,
  362. };
  363. typedef enum rx_handler_result rx_handler_result_t;
  364. typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  365. extern void __napi_schedule(struct napi_struct *n);
  366. static inline int napi_disable_pending(struct napi_struct *n)
  367. {
  368. return test_bit(NAPI_STATE_DISABLE, &n->state);
  369. }
  370. /**
  371. * napi_schedule_prep - check if napi can be scheduled
  372. * @n: napi context
  373. *
  374. * Test if NAPI routine is already running, and if not mark
  375. * it as running. This is used as a condition variable
  376. * insure only one NAPI poll instance runs. We also make
  377. * sure there is no pending NAPI disable.
  378. */
  379. static inline int napi_schedule_prep(struct napi_struct *n)
  380. {
  381. return !napi_disable_pending(n) &&
  382. !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  383. }
  384. /**
  385. * napi_schedule - schedule NAPI poll
  386. * @n: napi context
  387. *
  388. * Schedule NAPI poll routine to be called if it is not already
  389. * running.
  390. */
  391. static inline void napi_schedule(struct napi_struct *n)
  392. {
  393. if (napi_schedule_prep(n))
  394. __napi_schedule(n);
  395. }
  396. /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
  397. static inline int napi_reschedule(struct napi_struct *napi)
  398. {
  399. if (napi_schedule_prep(napi)) {
  400. __napi_schedule(napi);
  401. return 1;
  402. }
  403. return 0;
  404. }
  405. /**
  406. * napi_complete - NAPI processing complete
  407. * @n: napi context
  408. *
  409. * Mark NAPI processing as complete.
  410. */
  411. extern void __napi_complete(struct napi_struct *n);
  412. extern void napi_complete(struct napi_struct *n);
  413. /**
  414. * napi_disable - prevent NAPI from scheduling
  415. * @n: napi context
  416. *
  417. * Stop NAPI from being scheduled on this context.
  418. * Waits till any outstanding processing completes.
  419. */
  420. static inline void napi_disable(struct napi_struct *n)
  421. {
  422. set_bit(NAPI_STATE_DISABLE, &n->state);
  423. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  424. msleep(1);
  425. clear_bit(NAPI_STATE_DISABLE, &n->state);
  426. }
  427. /**
  428. * napi_enable - enable NAPI scheduling
  429. * @n: napi context
  430. *
  431. * Resume NAPI from being scheduled on this context.
  432. * Must be paired with napi_disable.
  433. */
  434. static inline void napi_enable(struct napi_struct *n)
  435. {
  436. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  437. smp_mb__before_clear_bit();
  438. clear_bit(NAPI_STATE_SCHED, &n->state);
  439. }
  440. #ifdef CONFIG_SMP
  441. /**
  442. * napi_synchronize - wait until NAPI is not running
  443. * @n: napi context
  444. *
  445. * Wait until NAPI is done being scheduled on this context.
  446. * Waits till any outstanding processing completes but
  447. * does not disable future activations.
  448. */
  449. static inline void napi_synchronize(const struct napi_struct *n)
  450. {
  451. while (test_bit(NAPI_STATE_SCHED, &n->state))
  452. msleep(1);
  453. }
  454. #else
  455. # define napi_synchronize(n) barrier()
  456. #endif
  457. enum netdev_queue_state_t {
  458. __QUEUE_STATE_DRV_XOFF,
  459. __QUEUE_STATE_STACK_XOFF,
  460. __QUEUE_STATE_FROZEN,
  461. #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
  462. (1 << __QUEUE_STATE_STACK_XOFF))
  463. #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
  464. (1 << __QUEUE_STATE_FROZEN))
  465. };
  466. /*
  467. * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
  468. * netif_tx_* functions below are used to manipulate this flag. The
  469. * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
  470. * queue independently. The netif_xmit_*stopped functions below are called
  471. * to check if the queue has been stopped by the driver or stack (either
  472. * of the XOFF bits are set in the state). Drivers should not need to call
  473. * netif_xmit*stopped functions, they should only be using netif_tx_*.
  474. */
  475. struct netdev_queue {
  476. /*
  477. * read mostly part
  478. */
  479. struct net_device *dev;
  480. struct Qdisc *qdisc;
  481. struct Qdisc *qdisc_sleeping;
  482. #ifdef CONFIG_SYSFS
  483. struct kobject kobj;
  484. #endif
  485. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  486. int numa_node;
  487. #endif
  488. /*
  489. * write mostly part
  490. */
  491. spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
  492. int xmit_lock_owner;
  493. /*
  494. * please use this field instead of dev->trans_start
  495. */
  496. unsigned long trans_start;
  497. /*
  498. * Number of TX timeouts for this queue
  499. * (/sys/class/net/DEV/Q/trans_timeout)
  500. */
  501. unsigned long trans_timeout;
  502. unsigned long state;
  503. #ifdef CONFIG_BQL
  504. struct dql dql;
  505. #endif
  506. } ____cacheline_aligned_in_smp;
  507. static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
  508. {
  509. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  510. return q->numa_node;
  511. #else
  512. return NUMA_NO_NODE;
  513. #endif
  514. }
  515. static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
  516. {
  517. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  518. q->numa_node = node;
  519. #endif
  520. }
  521. #ifdef CONFIG_RPS
  522. /*
  523. * This structure holds an RPS map which can be of variable length. The
  524. * map is an array of CPUs.
  525. */
  526. struct rps_map {
  527. unsigned int len;
  528. struct rcu_head rcu;
  529. u16 cpus[0];
  530. };
  531. #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
  532. /*
  533. * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
  534. * tail pointer for that CPU's input queue at the time of last enqueue, and
  535. * a hardware filter index.
  536. */
  537. struct rps_dev_flow {
  538. u16 cpu;
  539. u16 filter;
  540. unsigned int last_qtail;
  541. };
  542. #define RPS_NO_FILTER 0xffff
  543. /*
  544. * The rps_dev_flow_table structure contains a table of flow mappings.
  545. */
  546. struct rps_dev_flow_table {
  547. unsigned int mask;
  548. struct rcu_head rcu;
  549. struct work_struct free_work;
  550. struct rps_dev_flow flows[0];
  551. };
  552. #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
  553. ((_num) * sizeof(struct rps_dev_flow)))
  554. /*
  555. * The rps_sock_flow_table contains mappings of flows to the last CPU
  556. * on which they were processed by the application (set in recvmsg).
  557. */
  558. struct rps_sock_flow_table {
  559. unsigned int mask;
  560. u16 ents[0];
  561. };
  562. #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
  563. ((_num) * sizeof(u16)))
  564. #define RPS_NO_CPU 0xffff
  565. static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
  566. u32 hash)
  567. {
  568. if (table && hash) {
  569. unsigned int cpu, index = hash & table->mask;
  570. /* We only give a hint, preemption can change cpu under us */
  571. cpu = raw_smp_processor_id();
  572. if (table->ents[index] != cpu)
  573. table->ents[index] = cpu;
  574. }
  575. }
  576. static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
  577. u32 hash)
  578. {
  579. if (table && hash)
  580. table->ents[hash & table->mask] = RPS_NO_CPU;
  581. }
  582. extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
  583. #ifdef CONFIG_RFS_ACCEL
  584. extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
  585. u32 flow_id, u16 filter_id);
  586. #endif
  587. /* This structure contains an instance of an RX queue. */
  588. struct netdev_rx_queue {
  589. struct rps_map __rcu *rps_map;
  590. struct rps_dev_flow_table __rcu *rps_flow_table;
  591. struct kobject kobj;
  592. struct net_device *dev;
  593. } ____cacheline_aligned_in_smp;
  594. #endif /* CONFIG_RPS */
  595. #ifdef CONFIG_XPS
  596. /*
  597. * This structure holds an XPS map which can be of variable length. The
  598. * map is an array of queues.
  599. */
  600. struct xps_map {
  601. unsigned int len;
  602. unsigned int alloc_len;
  603. struct rcu_head rcu;
  604. u16 queues[0];
  605. };
  606. #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
  607. #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
  608. / sizeof(u16))
  609. /*
  610. * This structure holds all XPS maps for device. Maps are indexed by CPU.
  611. */
  612. struct xps_dev_maps {
  613. struct rcu_head rcu;
  614. struct xps_map __rcu *cpu_map[0];
  615. };
  616. #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
  617. (nr_cpu_ids * sizeof(struct xps_map *)))
  618. #endif /* CONFIG_XPS */
  619. #define TC_MAX_QUEUE 16
  620. #define TC_BITMASK 15
  621. /* HW offloaded queuing disciplines txq count and offset maps */
  622. struct netdev_tc_txq {
  623. u16 count;
  624. u16 offset;
  625. };
  626. #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
  627. /*
  628. * This structure is to hold information about the device
  629. * configured to run FCoE protocol stack.
  630. */
  631. struct netdev_fcoe_hbainfo {
  632. char manufacturer[64];
  633. char serial_number[64];
  634. char hardware_version[64];
  635. char driver_version[64];
  636. char optionrom_version[64];
  637. char firmware_version[64];
  638. char model[256];
  639. char model_description[256];
  640. };
  641. #endif
  642. /*
  643. * This structure defines the management hooks for network devices.
  644. * The following hooks can be defined; unless noted otherwise, they are
  645. * optional and can be filled with a null pointer.
  646. *
  647. * int (*ndo_init)(struct net_device *dev);
  648. * This function is called once when network device is registered.
  649. * The network device can use this to any late stage initializaton
  650. * or semantic validattion. It can fail with an error code which will
  651. * be propogated back to register_netdev
  652. *
  653. * void (*ndo_uninit)(struct net_device *dev);
  654. * This function is called when device is unregistered or when registration
  655. * fails. It is not called if init fails.
  656. *
  657. * int (*ndo_open)(struct net_device *dev);
  658. * This function is called when network device transistions to the up
  659. * state.
  660. *
  661. * int (*ndo_stop)(struct net_device *dev);
  662. * This function is called when network device transistions to the down
  663. * state.
  664. *
  665. * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  666. * struct net_device *dev);
  667. * Called when a packet needs to be transmitted.
  668. * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
  669. * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
  670. * Required can not be NULL.
  671. *
  672. * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
  673. * Called to decide which queue to when device supports multiple
  674. * transmit queues.
  675. *
  676. * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  677. * This function is called to allow device receiver to make
  678. * changes to configuration when multicast or promiscious is enabled.
  679. *
  680. * void (*ndo_set_rx_mode)(struct net_device *dev);
  681. * This function is called device changes address list filtering.
  682. * If driver handles unicast address filtering, it should set
  683. * IFF_UNICAST_FLT to its priv_flags.
  684. *
  685. * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
  686. * This function is called when the Media Access Control address
  687. * needs to be changed. If this interface is not defined, the
  688. * mac address can not be changed.
  689. *
  690. * int (*ndo_validate_addr)(struct net_device *dev);
  691. * Test if Media Access Control address is valid for the device.
  692. *
  693. * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
  694. * Called when a user request an ioctl which can't be handled by
  695. * the generic interface code. If not defined ioctl's return
  696. * not supported error code.
  697. *
  698. * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
  699. * Used to set network devices bus interface parameters. This interface
  700. * is retained for legacy reason, new devices should use the bus
  701. * interface (PCI) for low level management.
  702. *
  703. * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  704. * Called when a user wants to change the Maximum Transfer Unit
  705. * of a device. If not defined, any request to change MTU will
  706. * will return an error.
  707. *
  708. * void (*ndo_tx_timeout)(struct net_device *dev);
  709. * Callback uses when the transmitter has not made any progress
  710. * for dev->watchdog ticks.
  711. *
  712. * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  713. * struct rtnl_link_stats64 *storage);
  714. * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  715. * Called when a user wants to get the network device usage
  716. * statistics. Drivers must do one of the following:
  717. * 1. Define @ndo_get_stats64 to fill in a zero-initialised
  718. * rtnl_link_stats64 structure passed by the caller.
  719. * 2. Define @ndo_get_stats to update a net_device_stats structure
  720. * (which should normally be dev->stats) and return a pointer to
  721. * it. The structure may be changed asynchronously only if each
  722. * field is written atomically.
  723. * 3. Update dev->stats asynchronously and atomically, and define
  724. * neither operation.
  725. *
  726. * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
  727. * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  728. * this function is called when a VLAN id is registered.
  729. *
  730. * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
  731. * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  732. * this function is called when a VLAN id is unregistered.
  733. *
  734. * void (*ndo_poll_controller)(struct net_device *dev);
  735. *
  736. * SR-IOV management functions.
  737. * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
  738. * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
  739. * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
  740. * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
  741. * int (*ndo_get_vf_config)(struct net_device *dev,
  742. * int vf, struct ifla_vf_info *ivf);
  743. * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  744. * struct nlattr *port[]);
  745. * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
  746. * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
  747. * Called to setup 'tc' number of traffic classes in the net device. This
  748. * is always called from the stack with the rtnl lock held and netif tx
  749. * queues stopped. This allows the netdevice to perform queue management
  750. * safely.
  751. *
  752. * Fiber Channel over Ethernet (FCoE) offload functions.
  753. * int (*ndo_fcoe_enable)(struct net_device *dev);
  754. * Called when the FCoE protocol stack wants to start using LLD for FCoE
  755. * so the underlying device can perform whatever needed configuration or
  756. * initialization to support acceleration of FCoE traffic.
  757. *
  758. * int (*ndo_fcoe_disable)(struct net_device *dev);
  759. * Called when the FCoE protocol stack wants to stop using LLD for FCoE
  760. * so the underlying device can perform whatever needed clean-ups to
  761. * stop supporting acceleration of FCoE traffic.
  762. *
  763. * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
  764. * struct scatterlist *sgl, unsigned int sgc);
  765. * Called when the FCoE Initiator wants to initialize an I/O that
  766. * is a possible candidate for Direct Data Placement (DDP). The LLD can
  767. * perform necessary setup and returns 1 to indicate the device is set up
  768. * successfully to perform DDP on this I/O, otherwise this returns 0.
  769. *
  770. * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
  771. * Called when the FCoE Initiator/Target is done with the DDPed I/O as
  772. * indicated by the FC exchange id 'xid', so the underlying device can
  773. * clean up and reuse resources for later DDP requests.
  774. *
  775. * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
  776. * struct scatterlist *sgl, unsigned int sgc);
  777. * Called when the FCoE Target wants to initialize an I/O that
  778. * is a possible candidate for Direct Data Placement (DDP). The LLD can
  779. * perform necessary setup and returns 1 to indicate the device is set up
  780. * successfully to perform DDP on this I/O, otherwise this returns 0.
  781. *
  782. * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
  783. * struct netdev_fcoe_hbainfo *hbainfo);
  784. * Called when the FCoE Protocol stack wants information on the underlying
  785. * device. This information is utilized by the FCoE protocol stack to
  786. * register attributes with Fiber Channel management service as per the
  787. * FC-GS Fabric Device Management Information(FDMI) specification.
  788. *
  789. * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
  790. * Called when the underlying device wants to override default World Wide
  791. * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
  792. * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
  793. * protocol stack to use.
  794. *
  795. * RFS acceleration.
  796. * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
  797. * u16 rxq_index, u32 flow_id);
  798. * Set hardware filter for RFS. rxq_index is the target queue index;
  799. * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
  800. * Return the filter ID on success, or a negative error code.
  801. *
  802. * Slave management functions (for bridge, bonding, etc). User should
  803. * call netdev_set_master() to set dev->master properly.
  804. * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
  805. * Called to make another netdev an underling.
  806. *
  807. * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
  808. * Called to release previously enslaved netdev.
  809. *
  810. * Feature/offload setting functions.
  811. * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  812. * netdev_features_t features);
  813. * Adjusts the requested feature flags according to device-specific
  814. * constraints, and returns the resulting flags. Must not modify
  815. * the device state.
  816. *
  817. * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  818. * Called to update device configuration to new features. Passed
  819. * feature set might be less than what was returned by ndo_fix_features()).
  820. * Must return >0 or -errno if it changed dev->features itself.
  821. *
  822. */
  823. struct net_device_ops {
  824. int (*ndo_init)(struct net_device *dev);
  825. void (*ndo_uninit)(struct net_device *dev);
  826. int (*ndo_open)(struct net_device *dev);
  827. int (*ndo_stop)(struct net_device *dev);
  828. netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
  829. struct net_device *dev);
  830. u16 (*ndo_select_queue)(struct net_device *dev,
  831. struct sk_buff *skb);
  832. void (*ndo_change_rx_flags)(struct net_device *dev,
  833. int flags);
  834. void (*ndo_set_rx_mode)(struct net_device *dev);
  835. int (*ndo_set_mac_address)(struct net_device *dev,
  836. void *addr);
  837. int (*ndo_validate_addr)(struct net_device *dev);
  838. int (*ndo_do_ioctl)(struct net_device *dev,
  839. struct ifreq *ifr, int cmd);
  840. int (*ndo_set_config)(struct net_device *dev,
  841. struct ifmap *map);
  842. int (*ndo_change_mtu)(struct net_device *dev,
  843. int new_mtu);
  844. int (*ndo_neigh_setup)(struct net_device *dev,
  845. struct neigh_parms *);
  846. void (*ndo_tx_timeout) (struct net_device *dev);
  847. struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  848. struct rtnl_link_stats64 *storage);
  849. struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  850. int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
  851. unsigned short vid);
  852. int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
  853. unsigned short vid);
  854. #ifdef CONFIG_NET_POLL_CONTROLLER
  855. void (*ndo_poll_controller)(struct net_device *dev);
  856. int (*ndo_netpoll_setup)(struct net_device *dev,
  857. struct netpoll_info *info);
  858. void (*ndo_netpoll_cleanup)(struct net_device *dev);
  859. #endif
  860. int (*ndo_set_vf_mac)(struct net_device *dev,
  861. int queue, u8 *mac);
  862. int (*ndo_set_vf_vlan)(struct net_device *dev,
  863. int queue, u16 vlan, u8 qos);
  864. int (*ndo_set_vf_tx_rate)(struct net_device *dev,
  865. int vf, int rate);
  866. int (*ndo_set_vf_spoofchk)(struct net_device *dev,
  867. int vf, bool setting);
  868. int (*ndo_get_vf_config)(struct net_device *dev,
  869. int vf,
  870. struct ifla_vf_info *ivf);
  871. int (*ndo_set_vf_port)(struct net_device *dev,
  872. int vf,
  873. struct nlattr *port[]);
  874. int (*ndo_get_vf_port)(struct net_device *dev,
  875. int vf, struct sk_buff *skb);
  876. int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
  877. #if IS_ENABLED(CONFIG_FCOE)
  878. int (*ndo_fcoe_enable)(struct net_device *dev);
  879. int (*ndo_fcoe_disable)(struct net_device *dev);
  880. int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
  881. u16 xid,
  882. struct scatterlist *sgl,
  883. unsigned int sgc);
  884. int (*ndo_fcoe_ddp_done)(struct net_device *dev,
  885. u16 xid);
  886. int (*ndo_fcoe_ddp_target)(struct net_device *dev,
  887. u16 xid,
  888. struct scatterlist *sgl,
  889. unsigned int sgc);
  890. int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
  891. struct netdev_fcoe_hbainfo *hbainfo);
  892. #endif
  893. #if IS_ENABLED(CONFIG_LIBFCOE)
  894. #define NETDEV_FCOE_WWNN 0
  895. #define NETDEV_FCOE_WWPN 1
  896. int (*ndo_fcoe_get_wwn)(struct net_device *dev,
  897. u64 *wwn, int type);
  898. #endif
  899. #ifdef CONFIG_RFS_ACCEL
  900. int (*ndo_rx_flow_steer)(struct net_device *dev,
  901. const struct sk_buff *skb,
  902. u16 rxq_index,
  903. u32 flow_id);
  904. #endif
  905. int (*ndo_add_slave)(struct net_device *dev,
  906. struct net_device *slave_dev);
  907. int (*ndo_del_slave)(struct net_device *dev,
  908. struct net_device *slave_dev);
  909. netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  910. netdev_features_t features);
  911. int (*ndo_set_features)(struct net_device *dev,
  912. netdev_features_t features);
  913. int (*ndo_neigh_construct)(struct neighbour *n);
  914. void (*ndo_neigh_destroy)(struct neighbour *n);
  915. };
  916. /*
  917. * The DEVICE structure.
  918. * Actually, this whole structure is a big mistake. It mixes I/O
  919. * data with strictly "high-level" data, and it has to know about
  920. * almost every data structure used in the INET module.
  921. *
  922. * FIXME: cleanup struct net_device such that network protocol info
  923. * moves out.
  924. */
  925. struct net_device {
  926. /*
  927. * This is the first field of the "visible" part of this structure
  928. * (i.e. as seen by users in the "Space.c" file). It is the name
  929. * of the interface.
  930. */
  931. char name[IFNAMSIZ];
  932. struct pm_qos_request pm_qos_req;
  933. /* device name hash chain */
  934. struct hlist_node name_hlist;
  935. /* snmp alias */
  936. char *ifalias;
  937. /*
  938. * I/O specific fields
  939. * FIXME: Merge these and struct ifmap into one
  940. */
  941. unsigned long mem_end; /* shared mem end */
  942. unsigned long mem_start; /* shared mem start */
  943. unsigned long base_addr; /* device I/O address */
  944. unsigned int irq; /* device IRQ number */
  945. /*
  946. * Some hardware also needs these fields, but they are not
  947. * part of the usual set specified in Space.c.
  948. */
  949. unsigned long state;
  950. struct list_head dev_list;
  951. struct list_head napi_list;
  952. struct list_head unreg_list;
  953. /* currently active device features */
  954. netdev_features_t features;
  955. /* user-changeable features */
  956. netdev_features_t hw_features;
  957. /* user-requested features */
  958. netdev_features_t wanted_features;
  959. /* mask of features inheritable by VLAN devices */
  960. netdev_features_t vlan_features;
  961. /* Interface index. Unique device identifier */
  962. int ifindex;
  963. int iflink;
  964. struct net_device_stats stats;
  965. atomic_long_t rx_dropped; /* dropped packets by core network
  966. * Do not use this in drivers.
  967. */
  968. #ifdef CONFIG_WIRELESS_EXT
  969. /* List of functions to handle Wireless Extensions (instead of ioctl).
  970. * See <net/iw_handler.h> for details. Jean II */
  971. const struct iw_handler_def * wireless_handlers;
  972. /* Instance data managed by the core of Wireless Extensions. */
  973. struct iw_public_data * wireless_data;
  974. #endif
  975. /* Management operations */
  976. const struct net_device_ops *netdev_ops;
  977. const struct ethtool_ops *ethtool_ops;
  978. /* Hardware header description */
  979. const struct header_ops *header_ops;
  980. unsigned int flags; /* interface flags (a la BSD) */
  981. unsigned int priv_flags; /* Like 'flags' but invisible to userspace.
  982. * See if.h for definitions. */
  983. unsigned short gflags;
  984. unsigned short padded; /* How much padding added by alloc_netdev() */
  985. unsigned char operstate; /* RFC2863 operstate */
  986. unsigned char link_mode; /* mapping policy to operstate */
  987. unsigned char if_port; /* Selectable AUI, TP,..*/
  988. unsigned char dma; /* DMA channel */
  989. unsigned int mtu; /* interface MTU value */
  990. unsigned short type; /* interface hardware type */
  991. unsigned short hard_header_len; /* hardware hdr length */
  992. /* extra head- and tailroom the hardware may need, but not in all cases
  993. * can this be guaranteed, especially tailroom. Some cases also use
  994. * LL_MAX_HEADER instead to allocate the skb.
  995. */
  996. unsigned short needed_headroom;
  997. unsigned short needed_tailroom;
  998. /* Interface address info. */
  999. unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
  1000. unsigned char addr_assign_type; /* hw address assignment type */
  1001. unsigned char addr_len; /* hardware address length */
  1002. unsigned char neigh_priv_len;
  1003. unsigned short dev_id; /* for shared network cards */
  1004. spinlock_t addr_list_lock;
  1005. struct netdev_hw_addr_list uc; /* Unicast mac addresses */
  1006. struct netdev_hw_addr_list mc; /* Multicast mac addresses */
  1007. bool uc_promisc;
  1008. unsigned int promiscuity;
  1009. unsigned int allmulti;
  1010. /* Protocol specific pointers */
  1011. #if IS_ENABLED(CONFIG_VLAN_8021Q)
  1012. struct vlan_info __rcu *vlan_info; /* VLAN info */
  1013. #endif
  1014. #if IS_ENABLED(CONFIG_NET_DSA)
  1015. struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
  1016. #endif
  1017. void *atalk_ptr; /* AppleTalk link */
  1018. struct in_device __rcu *ip_ptr; /* IPv4 specific data */
  1019. struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
  1020. struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
  1021. void *ec_ptr; /* Econet specific data */
  1022. void *ax25_ptr; /* AX.25 specific data */
  1023. struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
  1024. assign before registering */
  1025. /*
  1026. * Cache lines mostly used on receive path (including eth_type_trans())
  1027. */
  1028. unsigned long last_rx; /* Time of last Rx
  1029. * This should not be set in
  1030. * drivers, unless really needed,
  1031. * because network stack (bonding)
  1032. * use it if/when necessary, to
  1033. * avoid dirtying this cache line.
  1034. */
  1035. struct net_device *master; /* Pointer to master device of a group,
  1036. * which this device is member of.
  1037. */
  1038. /* Interface address info used in eth_type_trans() */
  1039. unsigned char *dev_addr; /* hw address, (before bcast
  1040. because most packets are
  1041. unicast) */
  1042. struct netdev_hw_addr_list dev_addrs; /* list of device
  1043. hw addresses */
  1044. unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
  1045. #ifdef CONFIG_SYSFS
  1046. struct kset *queues_kset;
  1047. #endif
  1048. #ifdef CONFIG_RPS
  1049. struct netdev_rx_queue *_rx;
  1050. /* Number of RX queues allocated at register_netdev() time */
  1051. unsigned int num_rx_queues;
  1052. /* Number of RX queues currently active in device */
  1053. unsigned int real_num_rx_queues;
  1054. #ifdef CONFIG_RFS_ACCEL
  1055. /* CPU reverse-mapping for RX completion interrupts, indexed
  1056. * by RX queue number. Assigned by driver. This must only be
  1057. * set if the ndo_rx_flow_steer operation is defined. */
  1058. struct cpu_rmap *rx_cpu_rmap;
  1059. #endif
  1060. #endif
  1061. rx_handler_func_t __rcu *rx_handler;
  1062. void __rcu *rx_handler_data;
  1063. struct netdev_queue __rcu *ingress_queue;
  1064. /*
  1065. * Cache lines mostly used on transmit path
  1066. */
  1067. struct netdev_queue *_tx ____cacheline_aligned_in_smp;
  1068. /* Number of TX queues allocated at alloc_netdev_mq() time */
  1069. unsigned int num_tx_queues;
  1070. /* Number of TX queues currently active in device */
  1071. unsigned int real_num_tx_queues;
  1072. /* root qdisc from userspace point of view */
  1073. struct Qdisc *qdisc;
  1074. unsigned long tx_queue_len; /* Max frames per queue allowed */
  1075. spinlock_t tx_global_lock;
  1076. #ifdef CONFIG_XPS
  1077. struct xps_dev_maps __rcu *xps_maps;
  1078. #endif
  1079. /* These may be needed for future network-power-down code. */
  1080. /*
  1081. * trans_start here is expensive for high speed devices on SMP,
  1082. * please use netdev_queue->trans_start instead.
  1083. */
  1084. unsigned long trans_start; /* Time (in jiffies) of last Tx */
  1085. int watchdog_timeo; /* used by dev_watchdog() */
  1086. struct timer_list watchdog_timer;
  1087. /* Number of references to this device */
  1088. int __percpu *pcpu_refcnt;
  1089. /* delayed register/unregister */
  1090. struct list_head todo_list;
  1091. /* device index hash chain */
  1092. struct hlist_node index_hlist;
  1093. struct list_head link_watch_list;
  1094. /* register/unregister state machine */
  1095. enum { NETREG_UNINITIALIZED=0,
  1096. NETREG_REGISTERED, /* completed register_netdevice */
  1097. NETREG_UNREGISTERING, /* called unregister_netdevice */
  1098. NETREG_UNREGISTERED, /* completed unregister todo */
  1099. NETREG_RELEASED, /* called free_netdev */
  1100. NETREG_DUMMY, /* dummy device for NAPI poll */
  1101. } reg_state:8;
  1102. bool dismantle; /* device is going do be freed */
  1103. enum {
  1104. RTNL_LINK_INITIALIZED,
  1105. RTNL_LINK_INITIALIZING,
  1106. } rtnl_link_state:16;
  1107. /* Called from unregister, can be used to call free_netdev */
  1108. void (*destructor)(struct net_device *dev);
  1109. #ifdef CONFIG_NETPOLL
  1110. struct netpoll_info *npinfo;
  1111. #endif
  1112. #ifdef CONFIG_NET_NS
  1113. /* Network namespace this network device is inside */
  1114. struct net *nd_net;
  1115. #endif
  1116. /* mid-layer private */
  1117. union {
  1118. void *ml_priv;
  1119. struct pcpu_lstats __percpu *lstats; /* loopback stats */
  1120. struct pcpu_tstats __percpu *tstats; /* tunnel stats */
  1121. struct pcpu_dstats __percpu *dstats; /* dummy stats */
  1122. };
  1123. /* GARP */
  1124. struct garp_port __rcu *garp_port;
  1125. /* class/net/name entry */
  1126. struct device dev;
  1127. /* space for optional device, statistics, and wireless sysfs groups */
  1128. const struct attribute_group *sysfs_groups[4];
  1129. /* rtnetlink link ops */
  1130. const struct rtnl_link_ops *rtnl_link_ops;
  1131. /* for setting kernel sock attribute on TCP connection setup */
  1132. #define GSO_MAX_SIZE 65536
  1133. unsigned int gso_max_size;
  1134. #ifdef CONFIG_DCB
  1135. /* Data Center Bridging netlink ops */
  1136. const struct dcbnl_rtnl_ops *dcbnl_ops;
  1137. #endif
  1138. u8 num_tc;
  1139. struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
  1140. u8 prio_tc_map[TC_BITMASK + 1];
  1141. #if IS_ENABLED(CONFIG_FCOE)
  1142. /* max exchange id for FCoE LRO by ddp */
  1143. unsigned int fcoe_ddp_xid;
  1144. #endif
  1145. #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
  1146. struct netprio_map __rcu *priomap;
  1147. #endif
  1148. /* phy device may attach itself for hardware timestamping */
  1149. struct phy_device *phydev;
  1150. /* group the device belongs to */
  1151. int group;
  1152. };
  1153. #define to_net_dev(d) container_of(d, struct net_device, dev)
  1154. #define NETDEV_ALIGN 32
  1155. static inline
  1156. int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
  1157. {
  1158. return dev->prio_tc_map[prio & TC_BITMASK];
  1159. }
  1160. static inline
  1161. int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
  1162. {
  1163. if (tc >= dev->num_tc)
  1164. return -EINVAL;
  1165. dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
  1166. return 0;
  1167. }
  1168. static inline
  1169. void netdev_reset_tc(struct net_device *dev)
  1170. {
  1171. dev->num_tc = 0;
  1172. memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
  1173. memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
  1174. }
  1175. static inline
  1176. int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
  1177. {
  1178. if (tc >= dev->num_tc)
  1179. return -EINVAL;
  1180. dev->tc_to_txq[tc].count = count;
  1181. dev->tc_to_txq[tc].offset = offset;
  1182. return 0;
  1183. }
  1184. static inline
  1185. int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
  1186. {
  1187. if (num_tc > TC_MAX_QUEUE)
  1188. return -EINVAL;
  1189. dev->num_tc = num_tc;
  1190. return 0;
  1191. }
  1192. static inline
  1193. int netdev_get_num_tc(struct net_device *dev)
  1194. {
  1195. return dev->num_tc;
  1196. }
  1197. static inline
  1198. struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
  1199. unsigned int index)
  1200. {
  1201. return &dev->_tx[index];
  1202. }
  1203. static inline void netdev_for_each_tx_queue(struct net_device *dev,
  1204. void (*f)(struct net_device *,
  1205. struct netdev_queue *,
  1206. void *),
  1207. void *arg)
  1208. {
  1209. unsigned int i;
  1210. for (i = 0; i < dev->num_tx_queues; i++)
  1211. f(dev, &dev->_tx[i], arg);
  1212. }
  1213. /*
  1214. * Net namespace inlines
  1215. */
  1216. static inline
  1217. struct net *dev_net(const struct net_device *dev)
  1218. {
  1219. return read_pnet(&dev->nd_net);
  1220. }
  1221. static inline
  1222. void dev_net_set(struct net_device *dev, struct net *net)
  1223. {
  1224. #ifdef CONFIG_NET_NS
  1225. release_net(dev->nd_net);
  1226. dev->nd_net = hold_net(net);
  1227. #endif
  1228. }
  1229. static inline bool netdev_uses_dsa_tags(struct net_device *dev)
  1230. {
  1231. #ifdef CONFIG_NET_DSA_TAG_DSA
  1232. if (dev->dsa_ptr != NULL)
  1233. return dsa_uses_dsa_tags(dev->dsa_ptr);
  1234. #endif
  1235. return 0;
  1236. }
  1237. #ifndef CONFIG_NET_NS
  1238. static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
  1239. {
  1240. skb->dev = dev;
  1241. }
  1242. #else /* CONFIG_NET_NS */
  1243. void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
  1244. #endif
  1245. static inline bool netdev_uses_trailer_tags(struct net_device *dev)
  1246. {
  1247. #ifdef CONFIG_NET_DSA_TAG_TRAILER
  1248. if (dev->dsa_ptr != NULL)
  1249. return dsa_uses_trailer_tags(dev->dsa_ptr);
  1250. #endif
  1251. return 0;
  1252. }
  1253. /**
  1254. * netdev_priv - access network device private data
  1255. * @dev: network device
  1256. *
  1257. * Get network device private data
  1258. */
  1259. static inline void *netdev_priv(const struct net_device *dev)
  1260. {
  1261. return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
  1262. }
  1263. /* Set the sysfs physical device reference for the network logical device
  1264. * if set prior to registration will cause a symlink during initialization.
  1265. */
  1266. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  1267. /* Set the sysfs device type for the network logical device to allow
  1268. * fin grained indentification of different network device types. For
  1269. * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
  1270. */
  1271. #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
  1272. /**
  1273. * netif_napi_add - initialize a napi context
  1274. * @dev: network device
  1275. * @napi: napi context
  1276. * @poll: polling function
  1277. * @weight: default weight
  1278. *
  1279. * netif_napi_add() must be used to initialize a napi context prior to calling
  1280. * *any* of the other napi related functions.
  1281. */
  1282. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  1283. int (*poll)(struct napi_struct *, int), int weight);
  1284. /**
  1285. * netif_napi_del - remove a napi context
  1286. * @napi: napi context
  1287. *
  1288. * netif_napi_del() removes a napi context from the network device napi list
  1289. */
  1290. void netif_napi_del(struct napi_struct *napi);
  1291. struct napi_gro_cb {
  1292. /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
  1293. void *frag0;
  1294. /* Length of frag0. */
  1295. unsigned int frag0_len;
  1296. /* This indicates where we are processing relative to skb->data. */
  1297. int data_offset;
  1298. /* This is non-zero if the packet may be of the same flow. */
  1299. int same_flow;
  1300. /* This is non-zero if the packet cannot be merged with the new skb. */
  1301. int flush;
  1302. /* Number of segments aggregated. */
  1303. int count;
  1304. /* Free the skb? */
  1305. int free;
  1306. };
  1307. #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
  1308. struct packet_type {
  1309. __be16 type; /* This is really htons(ether_type). */
  1310. struct net_device *dev; /* NULL is wildcarded here */
  1311. int (*func) (struct sk_buff *,
  1312. struct net_device *,
  1313. struct packet_type *,
  1314. struct net_device *);
  1315. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  1316. netdev_features_t features);
  1317. int (*gso_send_check)(struct sk_buff *skb);
  1318. struct sk_buff **(*gro_receive)(struct sk_buff **head,
  1319. struct sk_buff *skb);
  1320. int (*gro_complete)(struct sk_buff *skb);
  1321. void *af_packet_priv;
  1322. struct list_head list;
  1323. };
  1324. #include <linux/notifier.h>
  1325. /* netdevice notifier chain. Please remember to update the rtnetlink
  1326. * notification exclusion list in rtnetlink_event() when adding new
  1327. * types.
  1328. */
  1329. #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
  1330. #define NETDEV_DOWN 0x0002
  1331. #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
  1332. detected a hardware crash and restarted
  1333. - we can use this eg to kick tcp sessions
  1334. once done */
  1335. #define NETDEV_CHANGE 0x0004 /* Notify device state change */
  1336. #define NETDEV_REGISTER 0x0005
  1337. #define NETDEV_UNREGISTER 0x0006
  1338. #define NETDEV_CHANGEMTU 0x0007
  1339. #define NETDEV_CHANGEADDR 0x0008
  1340. #define NETDEV_GOING_DOWN 0x0009
  1341. #define NETDEV_CHANGENAME 0x000A
  1342. #define NETDEV_FEAT_CHANGE 0x000B
  1343. #define NETDEV_BONDING_FAILOVER 0x000C
  1344. #define NETDEV_PRE_UP 0x000D
  1345. #define NETDEV_PRE_TYPE_CHANGE 0x000E
  1346. #define NETDEV_POST_TYPE_CHANGE 0x000F
  1347. #define NETDEV_POST_INIT 0x0010
  1348. #define NETDEV_UNREGISTER_BATCH 0x0011
  1349. #define NETDEV_RELEASE 0x0012
  1350. #define NETDEV_NOTIFY_PEERS 0x0013
  1351. #define NETDEV_JOIN 0x0014
  1352. extern int register_netdevice_notifier(struct notifier_block *nb);
  1353. extern int unregister_netdevice_notifier(struct notifier_block *nb);
  1354. extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  1355. extern rwlock_t dev_base_lock; /* Device list lock */
  1356. #define for_each_netdev(net, d) \
  1357. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  1358. #define for_each_netdev_reverse(net, d) \
  1359. list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
  1360. #define for_each_netdev_rcu(net, d) \
  1361. list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
  1362. #define for_each_netdev_safe(net, d, n) \
  1363. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  1364. #define for_each_netdev_continue(net, d) \
  1365. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  1366. #define for_each_netdev_continue_rcu(net, d) \
  1367. list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
  1368. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  1369. static inline struct net_device *next_net_device(struct net_device *dev)
  1370. {
  1371. struct list_head *lh;
  1372. struct net *net;
  1373. net = dev_net(dev);
  1374. lh = dev->dev_list.next;
  1375. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  1376. }
  1377. static inline struct net_device *next_net_device_rcu(struct net_device *dev)
  1378. {
  1379. struct list_head *lh;
  1380. struct net *net;
  1381. net = dev_net(dev);
  1382. lh = rcu_dereference(list_next_rcu(&dev->dev_list));
  1383. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  1384. }
  1385. static inline struct net_device *first_net_device(struct net *net)
  1386. {
  1387. return list_empty(&net->dev_base_head) ? NULL :
  1388. net_device_entry(net->dev_base_head.next);
  1389. }
  1390. static inline struct net_device *first_net_device_rcu(struct net *net)
  1391. {
  1392. struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
  1393. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  1394. }
  1395. extern int netdev_boot_setup_check(struct net_device *dev);
  1396. extern unsigned long netdev_boot_base(const char *prefix, int unit);
  1397. extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
  1398. const char *hwaddr);
  1399. extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  1400. extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  1401. extern void dev_add_pack(struct packet_type *pt);
  1402. extern void dev_remove_pack(struct packet_type *pt);
  1403. extern void __dev_remove_pack(struct packet_type *pt);
  1404. extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
  1405. unsigned short mask);
  1406. extern struct net_device *dev_get_by_name(struct net *net, const char *name);
  1407. extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
  1408. extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
  1409. extern int dev_alloc_name(struct net_device *dev, const char *name);
  1410. extern int dev_open(struct net_device *dev);
  1411. extern int dev_close(struct net_device *dev);
  1412. extern void dev_disable_lro(struct net_device *dev);
  1413. extern int dev_queue_xmit(struct sk_buff *skb);
  1414. extern int register_netdevice(struct net_device *dev);
  1415. extern void unregister_netdevice_queue(struct net_device *dev,
  1416. struct list_head *head);
  1417. extern void unregister_netdevice_many(struct list_head *head);
  1418. static inline void unregister_netdevice(struct net_device *dev)
  1419. {
  1420. unregister_netdevice_queue(dev, NULL);
  1421. }
  1422. extern int netdev_refcnt_read(const struct net_device *dev);
  1423. extern void free_netdev(struct net_device *dev);
  1424. extern void synchronize_net(void);
  1425. extern int init_dummy_netdev(struct net_device *dev);
  1426. extern void netdev_resync_ops(struct net_device *dev);
  1427. extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
  1428. extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  1429. extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
  1430. extern int dev_restart(struct net_device *dev);
  1431. #ifdef CONFIG_NETPOLL_TRAP
  1432. extern int netpoll_trap(void);
  1433. #endif
  1434. extern int skb_gro_receive(struct sk_buff **head,
  1435. struct sk_buff *skb);
  1436. extern void skb_gro_reset_offset(struct sk_buff *skb);
  1437. static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
  1438. {
  1439. return NAPI_GRO_CB(skb)->data_offset;
  1440. }
  1441. static inline unsigned int skb_gro_len(const struct sk_buff *skb)
  1442. {
  1443. return skb->len - NAPI_GRO_CB(skb)->data_offset;
  1444. }
  1445. static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
  1446. {
  1447. NAPI_GRO_CB(skb)->data_offset += len;
  1448. }
  1449. static inline void *skb_gro_header_fast(struct sk_buff *skb,
  1450. unsigned int offset)
  1451. {
  1452. return NAPI_GRO_CB(skb)->frag0 + offset;
  1453. }
  1454. static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
  1455. {
  1456. return NAPI_GRO_CB(skb)->frag0_len < hlen;
  1457. }
  1458. static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
  1459. unsigned int offset)
  1460. {
  1461. if (!pskb_may_pull(skb, hlen))
  1462. return NULL;
  1463. NAPI_GRO_CB(skb)->frag0 = NULL;
  1464. NAPI_GRO_CB(skb)->frag0_len = 0;
  1465. return skb->data + offset;
  1466. }
  1467. static inline void *skb_gro_mac_header(struct sk_buff *skb)
  1468. {
  1469. return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
  1470. }
  1471. static inline void *skb_gro_network_header(struct sk_buff *skb)
  1472. {
  1473. return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
  1474. skb_network_offset(skb);
  1475. }
  1476. static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
  1477. unsigned short type,
  1478. const void *daddr, const void *saddr,
  1479. unsigned len)
  1480. {
  1481. if (!dev->header_ops || !dev->header_ops->create)
  1482. return 0;
  1483. return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
  1484. }
  1485. static inline int dev_parse_header(const struct sk_buff *skb,
  1486. unsigned char *haddr)
  1487. {
  1488. const struct net_device *dev = skb->dev;
  1489. if (!dev->header_ops || !dev->header_ops->parse)
  1490. return 0;
  1491. return dev->header_ops->parse(skb, haddr);
  1492. }
  1493. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  1494. extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
  1495. static inline int unregister_gifconf(unsigned int family)
  1496. {
  1497. return register_gifconf(family, NULL);
  1498. }
  1499. /*
  1500. * Incoming packets are placed on per-cpu queues
  1501. */
  1502. struct softnet_data {
  1503. struct Qdisc *output_queue;
  1504. struct Qdisc **output_queue_tailp;
  1505. struct list_head poll_list;
  1506. struct sk_buff *completion_queue;
  1507. struct sk_buff_head process_queue;
  1508. /* stats */
  1509. unsigned int processed;
  1510. unsigned int time_squeeze;
  1511. unsigned int cpu_collision;
  1512. unsigned int received_rps;
  1513. #ifdef CONFIG_RPS
  1514. struct softnet_data *rps_ipi_list;
  1515. /* Elements below can be accessed between CPUs for RPS */
  1516. struct call_single_data csd ____cacheline_aligned_in_smp;
  1517. struct softnet_data *rps_ipi_next;
  1518. unsigned int cpu;
  1519. unsigned int input_queue_head;
  1520. unsigned int input_queue_tail;
  1521. #endif
  1522. unsigned dropped;
  1523. struct sk_buff_head input_pkt_queue;
  1524. struct napi_struct backlog;
  1525. };
  1526. static inline void input_queue_head_incr(struct softnet_data *sd)
  1527. {
  1528. #ifdef CONFIG_RPS
  1529. sd->input_queue_head++;
  1530. #endif
  1531. }
  1532. static inline void input_queue_tail_incr_save(struct softnet_data *sd,
  1533. unsigned int *qtail)
  1534. {
  1535. #ifdef CONFIG_RPS
  1536. *qtail = ++sd->input_queue_tail;
  1537. #endif
  1538. }
  1539. DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  1540. extern void __netif_schedule(struct Qdisc *q);
  1541. static inline void netif_schedule_queue(struct netdev_queue *txq)
  1542. {
  1543. if (!(txq->state & QUEUE_STATE_ANY_XOFF))
  1544. __netif_schedule(txq->qdisc);
  1545. }
  1546. static inline void netif_tx_schedule_all(struct net_device *dev)
  1547. {
  1548. unsigned int i;
  1549. for (i = 0; i < dev->num_tx_queues; i++)
  1550. netif_schedule_queue(netdev_get_tx_queue(dev, i));
  1551. }
  1552. static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
  1553. {
  1554. clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  1555. }
  1556. /**
  1557. * netif_start_queue - allow transmit
  1558. * @dev: network device
  1559. *
  1560. * Allow upper layers to call the device hard_start_xmit routine.
  1561. */
  1562. static inline void netif_start_queue(struct net_device *dev)
  1563. {
  1564. netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
  1565. }
  1566. static inline void netif_tx_start_all_queues(struct net_device *dev)
  1567. {
  1568. unsigned int i;
  1569. for (i = 0; i < dev->num_tx_queues; i++) {
  1570. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1571. netif_tx_start_queue(txq);
  1572. }
  1573. }
  1574. static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  1575. {
  1576. #ifdef CONFIG_NETPOLL_TRAP
  1577. if (netpoll_trap()) {
  1578. netif_tx_start_queue(dev_queue);
  1579. return;
  1580. }
  1581. #endif
  1582. if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
  1583. __netif_schedule(dev_queue->qdisc);
  1584. }
  1585. /**
  1586. * netif_wake_queue - restart transmit
  1587. * @dev: network device
  1588. *
  1589. * Allow upper layers to call the device hard_start_xmit routine.
  1590. * Used for flow control when transmit resources are available.
  1591. */
  1592. static inline void netif_wake_queue(struct net_device *dev)
  1593. {
  1594. netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
  1595. }
  1596. static inline void netif_tx_wake_all_queues(struct net_device *dev)
  1597. {
  1598. unsigned int i;
  1599. for (i = 0; i < dev->num_tx_queues; i++) {
  1600. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1601. netif_tx_wake_queue(txq);
  1602. }
  1603. }
  1604. static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
  1605. {
  1606. if (WARN_ON(!dev_queue)) {
  1607. pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
  1608. return;
  1609. }
  1610. set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  1611. }
  1612. /**
  1613. * netif_stop_queue - stop transmitted packets
  1614. * @dev: network device
  1615. *
  1616. * Stop upper layers calling the device hard_start_xmit routine.
  1617. * Used for flow control when transmit resources are unavailable.
  1618. */
  1619. static inline void netif_stop_queue(struct net_device *dev)
  1620. {
  1621. netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
  1622. }
  1623. static inline void netif_tx_stop_all_queues(struct net_device *dev)
  1624. {
  1625. unsigned int i;
  1626. for (i = 0; i < dev->num_tx_queues; i++) {
  1627. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1628. netif_tx_stop_queue(txq);
  1629. }
  1630. }
  1631. static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
  1632. {
  1633. return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
  1634. }
  1635. /**
  1636. * netif_queue_stopped - test if transmit queue is flowblocked
  1637. * @dev: network device
  1638. *
  1639. * Test if transmit queue on device is currently unable to send.
  1640. */
  1641. static inline int netif_queue_stopped(const struct net_device *dev)
  1642. {
  1643. return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
  1644. }
  1645. static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
  1646. {
  1647. return dev_queue->state & QUEUE_STATE_ANY_XOFF;
  1648. }
  1649. static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
  1650. {
  1651. return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
  1652. }
  1653. static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
  1654. unsigned int bytes)
  1655. {
  1656. #ifdef CONFIG_BQL
  1657. dql_queued(&dev_queue->dql, bytes);
  1658. if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
  1659. set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
  1660. if (unlikely(dql_avail(&dev_queue->dql) >= 0))
  1661. clear_bit(__QUEUE_STATE_STACK_XOFF,
  1662. &dev_queue->state);
  1663. }
  1664. #endif
  1665. }
  1666. static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
  1667. {
  1668. netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
  1669. }
  1670. static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
  1671. unsigned pkts, unsigned bytes)
  1672. {
  1673. #ifdef CONFIG_BQL
  1674. if (likely(bytes)) {
  1675. dql_completed(&dev_queue->dql, bytes);
  1676. if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
  1677. &dev_queue->state) &&
  1678. dql_avail(&dev_queue->dql) >= 0)) {
  1679. if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
  1680. &dev_queue->state))
  1681. netif_schedule_queue(dev_queue);
  1682. }
  1683. }
  1684. #endif
  1685. }
  1686. static inline void netdev_completed_queue(struct net_device *dev,
  1687. unsigned pkts, unsigned bytes)
  1688. {
  1689. netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
  1690. }
  1691. static inline void netdev_tx_reset_queue(struct netdev_queue *q)
  1692. {
  1693. #ifdef CONFIG_BQL
  1694. dql_reset(&q->dql);
  1695. #endif
  1696. }
  1697. static inline void netdev_reset_queue(struct net_device *dev_queue)
  1698. {
  1699. netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
  1700. }
  1701. /**
  1702. * netif_running - test if up
  1703. * @dev: network device
  1704. *
  1705. * Test if the device has been brought up.
  1706. */
  1707. static inline int netif_running(const struct net_device *dev)
  1708. {
  1709. return test_bit(__LINK_STATE_START, &dev->state);
  1710. }
  1711. /*
  1712. * Routines to manage the subqueues on a device. We only need start
  1713. * stop, and a check if it's stopped. All other device management is
  1714. * done at the overall netdevice level.
  1715. * Also test the device if we're multiqueue.
  1716. */
  1717. /**
  1718. * netif_start_subqueue - allow sending packets on subqueue
  1719. * @dev: network device
  1720. * @queue_index: sub queue index
  1721. *
  1722. * Start individual transmit queue of a device with multiple transmit queues.
  1723. */
  1724. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  1725. {
  1726. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1727. netif_tx_start_queue(txq);
  1728. }
  1729. /**
  1730. * netif_stop_subqueue - stop sending packets on subqueue
  1731. * @dev: network device
  1732. * @queue_index: sub queue index
  1733. *
  1734. * Stop individual transmit queue of a device with multiple transmit queues.
  1735. */
  1736. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  1737. {
  1738. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1739. #ifdef CONFIG_NETPOLL_TRAP
  1740. if (netpoll_trap())
  1741. return;
  1742. #endif
  1743. netif_tx_stop_queue(txq);
  1744. }
  1745. /**
  1746. * netif_subqueue_stopped - test status of subqueue
  1747. * @dev: network device
  1748. * @queue_index: sub queue index
  1749. *
  1750. * Check individual transmit queue of a device with multiple transmit queues.
  1751. */
  1752. static inline int __netif_subqueue_stopped(const struct net_device *dev,
  1753. u16 queue_index)
  1754. {
  1755. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1756. return netif_tx_queue_stopped(txq);
  1757. }
  1758. static inline int netif_subqueue_stopped(const struct net_device *dev,
  1759. struct sk_buff *skb)
  1760. {
  1761. return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
  1762. }
  1763. /**
  1764. * netif_wake_subqueue - allow sending packets on subqueue
  1765. * @dev: network device
  1766. * @queue_index: sub queue index
  1767. *
  1768. * Resume individual transmit queue of a device with multiple transmit queues.
  1769. */
  1770. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  1771. {
  1772. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1773. #ifdef CONFIG_NETPOLL_TRAP
  1774. if (netpoll_trap())
  1775. return;
  1776. #endif
  1777. if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
  1778. __netif_schedule(txq->qdisc);
  1779. }
  1780. /*
  1781. * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
  1782. * as a distribution range limit for the returned value.
  1783. */
  1784. static inline u16 skb_tx_hash(const struct net_device *dev,
  1785. const struct sk_buff *skb)
  1786. {
  1787. return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
  1788. }
  1789. /**
  1790. * netif_is_multiqueue - test if device has multiple transmit queues
  1791. * @dev: network device
  1792. *
  1793. * Check if device has multiple transmit queues
  1794. */
  1795. static inline int netif_is_multiqueue(const struct net_device *dev)
  1796. {
  1797. return dev->num_tx_queues > 1;
  1798. }
  1799. extern int netif_set_real_num_tx_queues(struct net_device *dev,
  1800. unsigned int txq);
  1801. #ifdef CONFIG_RPS
  1802. extern int netif_set_real_num_rx_queues(struct net_device *dev,
  1803. unsigned int rxq);
  1804. #else
  1805. static inline int netif_set_real_num_rx_queues(struct net_device *dev,
  1806. unsigned int rxq)
  1807. {
  1808. return 0;
  1809. }
  1810. #endif
  1811. static inline int netif_copy_real_num_queues(struct net_device *to_dev,
  1812. const struct net_device *from_dev)
  1813. {
  1814. netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues);
  1815. #ifdef CONFIG_RPS
  1816. return netif_set_real_num_rx_queues(to_dev,
  1817. from_dev->real_num_rx_queues);
  1818. #else
  1819. return 0;
  1820. #endif
  1821. }
  1822. /* Use this variant when it is known for sure that it
  1823. * is executing from hardware interrupt context or with hardware interrupts
  1824. * disabled.
  1825. */
  1826. extern void dev_kfree_skb_irq(struct sk_buff *skb);
  1827. /* Use this variant in places where it could be invoked
  1828. * from either hardware interrupt or other context, with hardware interrupts
  1829. * either disabled or enabled.
  1830. */
  1831. extern void dev_kfree_skb_any(struct sk_buff *skb);
  1832. extern int netif_rx(struct sk_buff *skb);
  1833. extern int netif_rx_ni(struct sk_buff *skb);
  1834. extern int netif_receive_skb(struct sk_buff *skb);
  1835. extern gro_result_t dev_gro_receive(struct napi_struct *napi,
  1836. struct sk_buff *skb);
  1837. extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
  1838. extern gro_result_t napi_gro_receive(struct napi_struct *napi,
  1839. struct sk_buff *skb);
  1840. extern void napi_gro_flush(struct napi_struct *napi);
  1841. extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
  1842. extern gro_result_t napi_frags_finish(struct napi_struct *napi,
  1843. struct sk_buff *skb,
  1844. gro_result_t ret);
  1845. extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
  1846. extern gro_result_t napi_gro_frags(struct napi_struct *napi);
  1847. static inline void napi_free_frags(struct napi_struct *napi)
  1848. {
  1849. kfree_skb(napi->skb);
  1850. napi->skb = NULL;
  1851. }
  1852. extern int netdev_rx_handler_register(struct net_device *dev,
  1853. rx_handler_func_t *rx_handler,
  1854. void *rx_handler_data);
  1855. extern void netdev_rx_handler_unregister(struct net_device *dev);
  1856. extern int dev_valid_name(const char *name);
  1857. extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  1858. extern int dev_ethtool(struct net *net, struct ifreq *);
  1859. extern unsigned dev_get_flags(const struct net_device *);
  1860. extern int __dev_change_flags(struct net_device *, unsigned int flags);
  1861. extern int dev_change_flags(struct net_device *, unsigned);
  1862. extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
  1863. extern int dev_change_name(struct net_device *, const char *);
  1864. extern int dev_set_alias(struct net_device *, const char *, size_t);
  1865. extern int dev_change_net_namespace(struct net_device *,
  1866. struct net *, const char *);
  1867. extern int dev_set_mtu(struct net_device *, int);
  1868. extern void dev_set_group(struct net_device *, int);
  1869. extern int dev_set_mac_address(struct net_device *,
  1870. struct sockaddr *);
  1871. extern int dev_hard_start_xmit(struct sk_buff *skb,
  1872. struct net_device *dev,
  1873. struct netdev_queue *txq);
  1874. extern int dev_forward_skb(struct net_device *dev,
  1875. struct sk_buff *skb);
  1876. extern int netdev_budget;
  1877. /* Called by rtnetlink.c:rtnl_unlock() */
  1878. extern void netdev_run_todo(void);
  1879. /**
  1880. * dev_put - release reference to device
  1881. * @dev: network device
  1882. *
  1883. * Release reference to device to allow it to be freed.
  1884. */
  1885. static inline void dev_put(struct net_device *dev)
  1886. {
  1887. this_cpu_dec(*dev->pcpu_refcnt);
  1888. }
  1889. /**
  1890. * dev_hold - get reference to device
  1891. * @dev: network device
  1892. *
  1893. * Hold reference to device to keep it from being freed.
  1894. */
  1895. static inline void dev_hold(struct net_device *dev)
  1896. {
  1897. this_cpu_inc(*dev->pcpu_refcnt);
  1898. }
  1899. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  1900. * and _off may be called from IRQ context, but it is caller
  1901. * who is responsible for serialization of these calls.
  1902. *
  1903. * The name carrier is inappropriate, these functions should really be
  1904. * called netif_lowerlayer_*() because they represent the state of any
  1905. * kind of lower layer not just hardware media.
  1906. */
  1907. extern void linkwatch_fire_event(struct net_device *dev);
  1908. extern void linkwatch_forget_dev(struct net_device *dev);
  1909. /**
  1910. * netif_carrier_ok - test if carrier present
  1911. * @dev: network device
  1912. *
  1913. * Check if carrier is present on device
  1914. */
  1915. static inline int netif_carrier_ok(const struct net_device *dev)
  1916. {
  1917. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  1918. }
  1919. extern unsigned long dev_trans_start(struct net_device *dev);
  1920. extern void __netdev_watchdog_up(struct net_device *dev);
  1921. extern void netif_carrier_on(struct net_device *dev);
  1922. extern void netif_carrier_off(struct net_device *dev);
  1923. extern void netif_notify_peers(struct net_device *dev);
  1924. /**
  1925. * netif_dormant_on - mark device as dormant.
  1926. * @dev: network device
  1927. *
  1928. * Mark device as dormant (as per RFC2863).
  1929. *
  1930. * The dormant state indicates that the relevant interface is not
  1931. * actually in a condition to pass packets (i.e., it is not 'up') but is
  1932. * in a "pending" state, waiting for some external event. For "on-
  1933. * demand" interfaces, this new state identifies the situation where the
  1934. * interface is waiting for events to place it in the up state.
  1935. *
  1936. */
  1937. static inline void netif_dormant_on(struct net_device *dev)
  1938. {
  1939. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  1940. linkwatch_fire_event(dev);
  1941. }
  1942. /**
  1943. * netif_dormant_off - set device as not dormant.
  1944. * @dev: network device
  1945. *
  1946. * Device is not in dormant state.
  1947. */
  1948. static inline void netif_dormant_off(struct net_device *dev)
  1949. {
  1950. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  1951. linkwatch_fire_event(dev);
  1952. }
  1953. /**
  1954. * netif_dormant - test if carrier present
  1955. * @dev: network device
  1956. *
  1957. * Check if carrier is present on device
  1958. */
  1959. static inline int netif_dormant(const struct net_device *dev)
  1960. {
  1961. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  1962. }
  1963. /**
  1964. * netif_oper_up - test if device is operational
  1965. * @dev: network device
  1966. *
  1967. * Check if carrier is operational
  1968. */
  1969. static inline int netif_oper_up(const struct net_device *dev)
  1970. {
  1971. return (dev->operstate == IF_OPER_UP ||
  1972. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  1973. }
  1974. /**
  1975. * netif_device_present - is device available or removed
  1976. * @dev: network device
  1977. *
  1978. * Check if device has not been removed from system.
  1979. */
  1980. static inline int netif_device_present(struct net_device *dev)
  1981. {
  1982. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  1983. }
  1984. extern void netif_device_detach(struct net_device *dev);
  1985. extern void netif_device_attach(struct net_device *dev);
  1986. /*
  1987. * Network interface message level settings
  1988. */
  1989. enum {
  1990. NETIF_MSG_DRV = 0x0001,
  1991. NETIF_MSG_PROBE = 0x0002,
  1992. NETIF_MSG_LINK = 0x0004,
  1993. NETIF_MSG_TIMER = 0x0008,
  1994. NETIF_MSG_IFDOWN = 0x0010,
  1995. NETIF_MSG_IFUP = 0x0020,
  1996. NETIF_MSG_RX_ERR = 0x0040,
  1997. NETIF_MSG_TX_ERR = 0x0080,
  1998. NETIF_MSG_TX_QUEUED = 0x0100,
  1999. NETIF_MSG_INTR = 0x0200,
  2000. NETIF_MSG_TX_DONE = 0x0400,
  2001. NETIF_MSG_RX_STATUS = 0x0800,
  2002. NETIF_MSG_PKTDATA = 0x1000,
  2003. NETIF_MSG_HW = 0x2000,
  2004. NETIF_MSG_WOL = 0x4000,
  2005. };
  2006. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  2007. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  2008. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  2009. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  2010. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  2011. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  2012. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  2013. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  2014. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  2015. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  2016. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  2017. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  2018. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  2019. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  2020. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  2021. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  2022. {
  2023. /* use default */
  2024. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  2025. return default_msg_enable_bits;
  2026. if (debug_value == 0) /* no output */
  2027. return 0;
  2028. /* set low N bits */
  2029. return (1 << debug_value) - 1;
  2030. }
  2031. static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
  2032. {
  2033. spin_lock(&txq->_xmit_lock);
  2034. txq->xmit_lock_owner = cpu;
  2035. }
  2036. static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
  2037. {
  2038. spin_lock_bh(&txq->_xmit_lock);
  2039. txq->xmit_lock_owner = smp_processor_id();
  2040. }
  2041. static inline int __netif_tx_trylock(struct netdev_queue *txq)
  2042. {
  2043. int ok = spin_trylock(&txq->_xmit_lock);
  2044. if (likely(ok))
  2045. txq->xmit_lock_owner = smp_processor_id();
  2046. return ok;
  2047. }
  2048. static inline void __netif_tx_unlock(struct netdev_queue *txq)
  2049. {
  2050. txq->xmit_lock_owner = -1;
  2051. spin_unlock(&txq->_xmit_lock);
  2052. }
  2053. static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
  2054. {
  2055. txq->xmit_lock_owner = -1;
  2056. spin_unlock_bh(&txq->_xmit_lock);
  2057. }
  2058. static inline void txq_trans_update(struct netdev_queue *txq)
  2059. {
  2060. if (txq->xmit_lock_owner != -1)
  2061. txq->trans_start = jiffies;
  2062. }
  2063. /**
  2064. * netif_tx_lock - grab network device transmit lock
  2065. * @dev: network device
  2066. *
  2067. * Get network device transmit lock
  2068. */
  2069. static inline void netif_tx_lock(struct net_device *dev)
  2070. {
  2071. unsigned int i;
  2072. int cpu;
  2073. spin_lock(&dev->tx_global_lock);
  2074. cpu = smp_processor_id();
  2075. for (i = 0; i < dev->num_tx_queues; i++) {
  2076. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2077. /* We are the only thread of execution doing a
  2078. * freeze, but we have to grab the _xmit_lock in
  2079. * order to synchronize with threads which are in
  2080. * the ->hard_start_xmit() handler and already
  2081. * checked the frozen bit.
  2082. */
  2083. __netif_tx_lock(txq, cpu);
  2084. set_bit(__QUEUE_STATE_FROZEN, &txq->state);
  2085. __netif_tx_unlock(txq);
  2086. }
  2087. }
  2088. static inline void netif_tx_lock_bh(struct net_device *dev)
  2089. {
  2090. local_bh_disable();
  2091. netif_tx_lock(dev);
  2092. }
  2093. static inline void netif_tx_unlock(struct net_device *dev)
  2094. {
  2095. unsigned int i;
  2096. for (i = 0; i < dev->num_tx_queues; i++) {
  2097. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2098. /* No need to grab the _xmit_lock here. If the
  2099. * queue is not stopped for another reason, we
  2100. * force a schedule.
  2101. */
  2102. clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
  2103. netif_schedule_queue(txq);
  2104. }
  2105. spin_unlock(&dev->tx_global_lock);
  2106. }
  2107. static inline void netif_tx_unlock_bh(struct net_device *dev)
  2108. {
  2109. netif_tx_unlock(dev);
  2110. local_bh_enable();
  2111. }
  2112. #define HARD_TX_LOCK(dev, txq, cpu) { \
  2113. if ((dev->features & NETIF_F_LLTX) == 0) { \
  2114. __netif_tx_lock(txq, cpu); \
  2115. } \
  2116. }
  2117. #define HARD_TX_UNLOCK(dev, txq) { \
  2118. if ((dev->features & NETIF_F_LLTX) == 0) { \
  2119. __netif_tx_unlock(txq); \
  2120. } \
  2121. }
  2122. static inline void netif_tx_disable(struct net_device *dev)
  2123. {
  2124. unsigned int i;
  2125. int cpu;
  2126. local_bh_disable();
  2127. cpu = smp_processor_id();
  2128. for (i = 0; i < dev->num_tx_queues; i++) {
  2129. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2130. __netif_tx_lock(txq, cpu);
  2131. netif_tx_stop_queue(txq);
  2132. __netif_tx_unlock(txq);
  2133. }
  2134. local_bh_enable();
  2135. }
  2136. static inline void netif_addr_lock(struct net_device *dev)
  2137. {
  2138. spin_lock(&dev->addr_list_lock);
  2139. }
  2140. static inline void netif_addr_lock_nested(struct net_device *dev)
  2141. {
  2142. spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
  2143. }
  2144. static inline void netif_addr_lock_bh(struct net_device *dev)
  2145. {
  2146. spin_lock_bh(&dev->addr_list_lock);
  2147. }
  2148. static inline void netif_addr_unlock(struct net_device *dev)
  2149. {
  2150. spin_unlock(&dev->addr_list_lock);
  2151. }
  2152. static inline void netif_addr_unlock_bh(struct net_device *dev)
  2153. {
  2154. spin_unlock_bh(&dev->addr_list_lock);
  2155. }
  2156. /*
  2157. * dev_addrs walker. Should be used only for read access. Call with
  2158. * rcu_read_lock held.
  2159. */
  2160. #define for_each_dev_addr(dev, ha) \
  2161. list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
  2162. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  2163. extern void ether_setup(struct net_device *dev);
  2164. /* Support for loadable net-drivers */
  2165. extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
  2166. void (*setup)(struct net_device *),
  2167. unsigned int txqs, unsigned int rxqs);
  2168. #define alloc_netdev(sizeof_priv, name, setup) \
  2169. alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
  2170. #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
  2171. alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
  2172. extern int register_netdev(struct net_device *dev);
  2173. extern void unregister_netdev(struct net_device *dev);
  2174. /* General hardware address lists handling functions */
  2175. extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
  2176. struct netdev_hw_addr_list *from_list,
  2177. int addr_len, unsigned char addr_type);
  2178. extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
  2179. struct netdev_hw_addr_list *from_list,
  2180. int addr_len, unsigned char addr_type);
  2181. extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
  2182. struct netdev_hw_addr_list *from_list,
  2183. int addr_len);
  2184. extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
  2185. struct netdev_hw_addr_list *from_list,
  2186. int addr_len);
  2187. extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
  2188. extern void __hw_addr_init(struct netdev_hw_addr_list *list);
  2189. /* Functions used for device addresses handling */
  2190. extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
  2191. unsigned char addr_type);
  2192. extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
  2193. unsigned char addr_type);
  2194. extern int dev_addr_add_multiple(struct net_device *to_dev,
  2195. struct net_device *from_dev,
  2196. unsigned char addr_type);
  2197. extern int dev_addr_del_multiple(struct net_device *to_dev,
  2198. struct net_device *from_dev,
  2199. unsigned char addr_type);
  2200. extern void dev_addr_flush(struct net_device *dev);
  2201. extern int dev_addr_init(struct net_device *dev);
  2202. /* Functions used for unicast addresses handling */
  2203. extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
  2204. extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
  2205. extern int dev_uc_sync(struct net_device *to, struct net_device *from);
  2206. extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
  2207. extern void dev_uc_flush(struct net_device *dev);
  2208. extern void dev_uc_init(struct net_device *dev);
  2209. /* Functions used for multicast addresses handling */
  2210. extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
  2211. extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
  2212. extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
  2213. extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
  2214. extern int dev_mc_sync(struct net_device *to, struct net_device *from);
  2215. extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  2216. extern void dev_mc_flush(struct net_device *dev);
  2217. extern void dev_mc_init(struct net_device *dev);
  2218. /* Functions used for secondary unicast and multicast support */
  2219. extern void dev_set_rx_mode(struct net_device *dev);
  2220. extern void __dev_set_rx_mode(struct net_device *dev);
  2221. extern int dev_set_promiscuity(struct net_device *dev, int inc);
  2222. extern int dev_set_allmulti(struct net_device *dev, int inc);
  2223. extern void netdev_state_change(struct net_device *dev);
  2224. extern int netdev_bonding_change(struct net_device *dev,
  2225. unsigned long event);
  2226. extern void netdev_features_change(struct net_device *dev);
  2227. /* Load a device via the kmod */
  2228. extern void dev_load(struct net *net, const char *name);
  2229. extern void dev_mcast_init(void);
  2230. extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  2231. struct rtnl_link_stats64 *storage);
  2232. extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
  2233. const struct net_device_stats *netdev_stats);
  2234. extern int netdev_max_backlog;
  2235. extern int netdev_tstamp_prequeue;
  2236. extern int weight_p;
  2237. extern int bpf_jit_enable;
  2238. extern int netdev_set_master(struct net_device *dev, struct net_device *master);
  2239. extern int netdev_set_bond_master(struct net_device *dev,
  2240. struct net_device *master);
  2241. extern int skb_checksum_help(struct sk_buff *skb);
  2242. extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
  2243. netdev_features_t features);
  2244. #ifdef CONFIG_BUG
  2245. extern void netdev_rx_csum_fault(struct net_device *dev);
  2246. #else
  2247. static inline void netdev_rx_csum_fault(struct net_device *dev)
  2248. {
  2249. }
  2250. #endif
  2251. /* rx skb timestamps */
  2252. extern void net_enable_timestamp(void);
  2253. extern void net_disable_timestamp(void);
  2254. #ifdef CONFIG_PROC_FS
  2255. extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  2256. extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  2257. extern void dev_seq_stop(struct seq_file *seq, void *v);
  2258. extern int dev_seq_open_ops(struct inode *inode, struct file *file,
  2259. const struct seq_operations *ops);
  2260. #endif
  2261. extern int netdev_class_create_file(struct class_attribute *class_attr);
  2262. extern void netdev_class_remove_file(struct class_attribute *class_attr);
  2263. extern struct kobj_ns_type_operations net_ns_type_operations;
  2264. extern const char *netdev_drivername(const struct net_device *dev);
  2265. extern void linkwatch_run_queue(void);
  2266. static inline netdev_features_t netdev_get_wanted_features(
  2267. struct net_device *dev)
  2268. {
  2269. return (dev->features & ~dev->hw_features) | dev->wanted_features;
  2270. }
  2271. netdev_features_t netdev_increment_features(netdev_features_t all,
  2272. netdev_features_t one, netdev_features_t mask);
  2273. int __netdev_update_features(struct net_device *dev);
  2274. void netdev_update_features(struct net_device *dev);
  2275. void netdev_change_features(struct net_device *dev);
  2276. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  2277. struct net_device *dev);
  2278. netdev_features_t netif_skb_features(struct sk_buff *skb);
  2279. static inline int net_gso_ok(netdev_features_t features, int gso_type)
  2280. {
  2281. netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
  2282. /* check flags correspondence */
  2283. BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
  2284. BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
  2285. BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
  2286. BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
  2287. BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
  2288. BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
  2289. return (features & feature) == feature;
  2290. }
  2291. static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
  2292. {
  2293. return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
  2294. (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
  2295. }
  2296. static inline int netif_needs_gso(struct sk_buff *skb,
  2297. netdev_features_t features)
  2298. {
  2299. return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
  2300. unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
  2301. }
  2302. static inline void netif_set_gso_max_size(struct net_device *dev,
  2303. unsigned int size)
  2304. {
  2305. dev->gso_max_size = size;
  2306. }
  2307. static inline int netif_is_bond_slave(struct net_device *dev)
  2308. {
  2309. return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
  2310. }
  2311. static inline bool netif_supports_nofcs(struct net_device *dev)
  2312. {
  2313. return dev->priv_flags & IFF_SUPP_NOFCS;
  2314. }
  2315. extern struct pernet_operations __net_initdata loopback_net_ops;
  2316. /* Logging, debugging and troubleshooting/diagnostic helpers. */
  2317. /* netdev_printk helpers, similar to dev_printk */
  2318. static inline const char *netdev_name(const struct net_device *dev)
  2319. {
  2320. if (dev->reg_state != NETREG_REGISTERED)
  2321. return "(unregistered net_device)";
  2322. return dev->name;
  2323. }
  2324. extern int __netdev_printk(const char *level, const struct net_device *dev,
  2325. struct va_format *vaf);
  2326. extern __printf(3, 4)
  2327. int netdev_printk(const char *level, const struct net_device *dev,
  2328. const char *format, ...);
  2329. extern __printf(2, 3)
  2330. int netdev_emerg(const struct net_device *dev, const char *format, ...);
  2331. extern __printf(2, 3)
  2332. int netdev_alert(const struct net_device *dev, const char *format, ...);
  2333. extern __printf(2, 3)
  2334. int netdev_crit(const struct net_device *dev, const char *format, ...);
  2335. extern __printf(2, 3)
  2336. int netdev_err(const struct net_device *dev, const char *format, ...);
  2337. extern __printf(2, 3)
  2338. int netdev_warn(const struct net_device *dev, const char *format, ...);
  2339. extern __printf(2, 3)
  2340. int netdev_notice(const struct net_device *dev, const char *format, ...);
  2341. extern __printf(2, 3)
  2342. int netdev_info(const struct net_device *dev, const char *format, ...);
  2343. #define MODULE_ALIAS_NETDEV(device) \
  2344. MODULE_ALIAS("netdev-" device)
  2345. #if defined(DEBUG)
  2346. #define netdev_dbg(__dev, format, args...) \
  2347. netdev_printk(KERN_DEBUG, __dev, format, ##args)
  2348. #elif defined(CONFIG_DYNAMIC_DEBUG)
  2349. #define netdev_dbg(__dev, format, args...) \
  2350. do { \
  2351. dynamic_netdev_dbg(__dev, format, ##args); \
  2352. } while (0)
  2353. #else
  2354. #define netdev_dbg(__dev, format, args...) \
  2355. ({ \
  2356. if (0) \
  2357. netdev_printk(KERN_DEBUG, __dev, format, ##args); \
  2358. 0; \
  2359. })
  2360. #endif
  2361. #if defined(VERBOSE_DEBUG)
  2362. #define netdev_vdbg netdev_dbg
  2363. #else
  2364. #define netdev_vdbg(dev, format, args...) \
  2365. ({ \
  2366. if (0) \
  2367. netdev_printk(KERN_DEBUG, dev, format, ##args); \
  2368. 0; \
  2369. })
  2370. #endif
  2371. /*
  2372. * netdev_WARN() acts like dev_printk(), but with the key difference
  2373. * of using a WARN/WARN_ON to get the message out, including the
  2374. * file/line information and a backtrace.
  2375. */
  2376. #define netdev_WARN(dev, format, args...) \
  2377. WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
  2378. /* netif printk helpers, similar to netdev_printk */
  2379. #define netif_printk(priv, type, level, dev, fmt, args...) \
  2380. do { \
  2381. if (netif_msg_##type(priv)) \
  2382. netdev_printk(level, (dev), fmt, ##args); \
  2383. } while (0)
  2384. #define netif_level(level, priv, type, dev, fmt, args...) \
  2385. do { \
  2386. if (netif_msg_##type(priv)) \
  2387. netdev_##level(dev, fmt, ##args); \
  2388. } while (0)
  2389. #define netif_emerg(priv, type, dev, fmt, args...) \
  2390. netif_level(emerg, priv, type, dev, fmt, ##args)
  2391. #define netif_alert(priv, type, dev, fmt, args...) \
  2392. netif_level(alert, priv, type, dev, fmt, ##args)
  2393. #define netif_crit(priv, type, dev, fmt, args...) \
  2394. netif_level(crit, priv, type, dev, fmt, ##args)
  2395. #define netif_err(priv, type, dev, fmt, args...) \
  2396. netif_level(err, priv, type, dev, fmt, ##args)
  2397. #define netif_warn(priv, type, dev, fmt, args...) \
  2398. netif_level(warn, priv, type, dev, fmt, ##args)
  2399. #define netif_notice(priv, type, dev, fmt, args...) \
  2400. netif_level(notice, priv, type, dev, fmt, ##args)
  2401. #define netif_info(priv, type, dev, fmt, args...) \
  2402. netif_level(info, priv, type, dev, fmt, ##args)
  2403. #if defined(DEBUG)
  2404. #define netif_dbg(priv, type, dev, format, args...) \
  2405. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
  2406. #elif defined(CONFIG_DYNAMIC_DEBUG)
  2407. #define netif_dbg(priv, type, netdev, format, args...) \
  2408. do { \
  2409. if (netif_msg_##type(priv)) \
  2410. dynamic_netdev_dbg(netdev, format, ##args); \
  2411. } while (0)
  2412. #else
  2413. #define netif_dbg(priv, type, dev, format, args...) \
  2414. ({ \
  2415. if (0) \
  2416. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
  2417. 0; \
  2418. })
  2419. #endif
  2420. #if defined(VERBOSE_DEBUG)
  2421. #define netif_vdbg netif_dbg
  2422. #else
  2423. #define netif_vdbg(priv, type, dev, format, args...) \
  2424. ({ \
  2425. if (0) \
  2426. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
  2427. 0; \
  2428. })
  2429. #endif
  2430. #endif /* __KERNEL__ */
  2431. #endif /* _LINUX_NETDEVICE_H */