netdevice.h 80 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <alan@lxorguk.ukuu.org.uk>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/if.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_packet.h>
  30. #include <linux/if_link.h>
  31. #ifdef __KERNEL__
  32. #include <linux/pm_qos.h>
  33. #include <linux/timer.h>
  34. #include <linux/delay.h>
  35. #include <linux/atomic.h>
  36. #include <asm/cache.h>
  37. #include <asm/byteorder.h>
  38. #include <linux/device.h>
  39. #include <linux/percpu.h>
  40. #include <linux/rculist.h>
  41. #include <linux/dmaengine.h>
  42. #include <linux/workqueue.h>
  43. #include <linux/ethtool.h>
  44. #include <net/net_namespace.h>
  45. #include <net/dsa.h>
  46. #ifdef CONFIG_DCB
  47. #include <net/dcbnl.h>
  48. #endif
  49. #include <net/netprio_cgroup.h>
  50. #include <linux/netdev_features.h>
  51. struct vlan_group;
  52. struct netpoll_info;
  53. struct phy_device;
  54. /* 802.11 specific */
  55. struct wireless_dev;
  56. /* source back-compat hooks */
  57. #define SET_ETHTOOL_OPS(netdev,ops) \
  58. ( (netdev)->ethtool_ops = (ops) )
  59. /* hardware address assignment types */
  60. #define NET_ADDR_PERM 0 /* address is permanent (default) */
  61. #define NET_ADDR_RANDOM 1 /* address is generated randomly */
  62. #define NET_ADDR_STOLEN 2 /* address is stolen from other device */
  63. /* Backlog congestion levels */
  64. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  65. #define NET_RX_DROP 1 /* packet dropped */
  66. /*
  67. * Transmit return codes: transmit return codes originate from three different
  68. * namespaces:
  69. *
  70. * - qdisc return codes
  71. * - driver transmit return codes
  72. * - errno values
  73. *
  74. * Drivers are allowed to return any one of those in their hard_start_xmit()
  75. * function. Real network devices commonly used with qdiscs should only return
  76. * the driver transmit return codes though - when qdiscs are used, the actual
  77. * transmission happens asynchronously, so the value is not propagated to
  78. * higher layers. Virtual network devices transmit synchronously, in this case
  79. * the driver transmit return codes are consumed by dev_queue_xmit(), all
  80. * others are propagated to higher layers.
  81. */
  82. /* qdisc ->enqueue() return codes. */
  83. #define NET_XMIT_SUCCESS 0x00
  84. #define NET_XMIT_DROP 0x01 /* skb dropped */
  85. #define NET_XMIT_CN 0x02 /* congestion notification */
  86. #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
  87. #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
  88. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  89. * indicates that the device will soon be dropping packets, or already drops
  90. * some packets of the same priority; prompting us to send less aggressively. */
  91. #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
  92. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  93. /* Driver transmit return codes */
  94. #define NETDEV_TX_MASK 0xf0
  95. enum netdev_tx {
  96. __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
  97. NETDEV_TX_OK = 0x00, /* driver took care of packet */
  98. NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
  99. NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
  100. };
  101. typedef enum netdev_tx netdev_tx_t;
  102. /*
  103. * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
  104. * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
  105. */
  106. static inline bool dev_xmit_complete(int rc)
  107. {
  108. /*
  109. * Positive cases with an skb consumed by a driver:
  110. * - successful transmission (rc == NETDEV_TX_OK)
  111. * - error while transmitting (rc < 0)
  112. * - error while queueing to a different device (rc & NET_XMIT_MASK)
  113. */
  114. if (likely(rc < NET_XMIT_MASK))
  115. return true;
  116. return false;
  117. }
  118. #endif
  119. #define MAX_ADDR_LEN 32 /* Largest hardware address length */
  120. /* Initial net device group. All devices belong to group 0 by default. */
  121. #define INIT_NETDEV_GROUP 0
  122. #ifdef __KERNEL__
  123. /*
  124. * Compute the worst case header length according to the protocols
  125. * used.
  126. */
  127. #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
  128. # if defined(CONFIG_MAC80211_MESH)
  129. # define LL_MAX_HEADER 128
  130. # else
  131. # define LL_MAX_HEADER 96
  132. # endif
  133. #elif IS_ENABLED(CONFIG_TR)
  134. # define LL_MAX_HEADER 48
  135. #else
  136. # define LL_MAX_HEADER 32
  137. #endif
  138. #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
  139. !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
  140. #define MAX_HEADER LL_MAX_HEADER
  141. #else
  142. #define MAX_HEADER (LL_MAX_HEADER + 48)
  143. #endif
  144. /*
  145. * Old network device statistics. Fields are native words
  146. * (unsigned long) so they can be read and written atomically.
  147. */
  148. struct net_device_stats {
  149. unsigned long rx_packets;
  150. unsigned long tx_packets;
  151. unsigned long rx_bytes;
  152. unsigned long tx_bytes;
  153. unsigned long rx_errors;
  154. unsigned long tx_errors;
  155. unsigned long rx_dropped;
  156. unsigned long tx_dropped;
  157. unsigned long multicast;
  158. unsigned long collisions;
  159. unsigned long rx_length_errors;
  160. unsigned long rx_over_errors;
  161. unsigned long rx_crc_errors;
  162. unsigned long rx_frame_errors;
  163. unsigned long rx_fifo_errors;
  164. unsigned long rx_missed_errors;
  165. unsigned long tx_aborted_errors;
  166. unsigned long tx_carrier_errors;
  167. unsigned long tx_fifo_errors;
  168. unsigned long tx_heartbeat_errors;
  169. unsigned long tx_window_errors;
  170. unsigned long rx_compressed;
  171. unsigned long tx_compressed;
  172. };
  173. #endif /* __KERNEL__ */
  174. /* Media selection options. */
  175. enum {
  176. IF_PORT_UNKNOWN = 0,
  177. IF_PORT_10BASE2,
  178. IF_PORT_10BASET,
  179. IF_PORT_AUI,
  180. IF_PORT_100BASET,
  181. IF_PORT_100BASETX,
  182. IF_PORT_100BASEFX
  183. };
  184. #ifdef __KERNEL__
  185. #include <linux/cache.h>
  186. #include <linux/skbuff.h>
  187. #ifdef CONFIG_RPS
  188. #include <linux/jump_label.h>
  189. extern struct jump_label_key rps_needed;
  190. #endif
  191. struct neighbour;
  192. struct neigh_parms;
  193. struct sk_buff;
  194. struct netdev_hw_addr {
  195. struct list_head list;
  196. unsigned char addr[MAX_ADDR_LEN];
  197. unsigned char type;
  198. #define NETDEV_HW_ADDR_T_LAN 1
  199. #define NETDEV_HW_ADDR_T_SAN 2
  200. #define NETDEV_HW_ADDR_T_SLAVE 3
  201. #define NETDEV_HW_ADDR_T_UNICAST 4
  202. #define NETDEV_HW_ADDR_T_MULTICAST 5
  203. bool synced;
  204. bool global_use;
  205. int refcount;
  206. struct rcu_head rcu_head;
  207. };
  208. struct netdev_hw_addr_list {
  209. struct list_head list;
  210. int count;
  211. };
  212. #define netdev_hw_addr_list_count(l) ((l)->count)
  213. #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
  214. #define netdev_hw_addr_list_for_each(ha, l) \
  215. list_for_each_entry(ha, &(l)->list, list)
  216. #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
  217. #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
  218. #define netdev_for_each_uc_addr(ha, dev) \
  219. netdev_hw_addr_list_for_each(ha, &(dev)->uc)
  220. #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
  221. #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
  222. #define netdev_for_each_mc_addr(ha, dev) \
  223. netdev_hw_addr_list_for_each(ha, &(dev)->mc)
  224. struct hh_cache {
  225. u16 hh_len;
  226. u16 __pad;
  227. seqlock_t hh_lock;
  228. /* cached hardware header; allow for machine alignment needs. */
  229. #define HH_DATA_MOD 16
  230. #define HH_DATA_OFF(__len) \
  231. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  232. #define HH_DATA_ALIGN(__len) \
  233. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  234. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  235. };
  236. /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  237. * Alternative is:
  238. * dev->hard_header_len ? (dev->hard_header_len +
  239. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  240. *
  241. * We could use other alignment values, but we must maintain the
  242. * relationship HH alignment <= LL alignment.
  243. */
  244. #define LL_RESERVED_SPACE(dev) \
  245. ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  246. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  247. ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  248. struct header_ops {
  249. int (*create) (struct sk_buff *skb, struct net_device *dev,
  250. unsigned short type, const void *daddr,
  251. const void *saddr, unsigned len);
  252. int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
  253. int (*rebuild)(struct sk_buff *skb);
  254. int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
  255. void (*cache_update)(struct hh_cache *hh,
  256. const struct net_device *dev,
  257. const unsigned char *haddr);
  258. };
  259. /* These flag bits are private to the generic network queueing
  260. * layer, they may not be explicitly referenced by any other
  261. * code.
  262. */
  263. enum netdev_state_t {
  264. __LINK_STATE_START,
  265. __LINK_STATE_PRESENT,
  266. __LINK_STATE_NOCARRIER,
  267. __LINK_STATE_LINKWATCH_PENDING,
  268. __LINK_STATE_DORMANT,
  269. };
  270. /*
  271. * This structure holds at boot time configured netdevice settings. They
  272. * are then used in the device probing.
  273. */
  274. struct netdev_boot_setup {
  275. char name[IFNAMSIZ];
  276. struct ifmap map;
  277. };
  278. #define NETDEV_BOOT_SETUP_MAX 8
  279. extern int __init netdev_boot_setup(char *str);
  280. /*
  281. * Structure for NAPI scheduling similar to tasklet but with weighting
  282. */
  283. struct napi_struct {
  284. /* The poll_list must only be managed by the entity which
  285. * changes the state of the NAPI_STATE_SCHED bit. This means
  286. * whoever atomically sets that bit can add this napi_struct
  287. * to the per-cpu poll_list, and whoever clears that bit
  288. * can remove from the list right before clearing the bit.
  289. */
  290. struct list_head poll_list;
  291. unsigned long state;
  292. int weight;
  293. int (*poll)(struct napi_struct *, int);
  294. #ifdef CONFIG_NETPOLL
  295. spinlock_t poll_lock;
  296. int poll_owner;
  297. #endif
  298. unsigned int gro_count;
  299. struct net_device *dev;
  300. struct list_head dev_list;
  301. struct sk_buff *gro_list;
  302. struct sk_buff *skb;
  303. };
  304. enum {
  305. NAPI_STATE_SCHED, /* Poll is scheduled */
  306. NAPI_STATE_DISABLE, /* Disable pending */
  307. NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
  308. };
  309. enum gro_result {
  310. GRO_MERGED,
  311. GRO_MERGED_FREE,
  312. GRO_HELD,
  313. GRO_NORMAL,
  314. GRO_DROP,
  315. };
  316. typedef enum gro_result gro_result_t;
  317. /*
  318. * enum rx_handler_result - Possible return values for rx_handlers.
  319. * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
  320. * further.
  321. * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
  322. * case skb->dev was changed by rx_handler.
  323. * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
  324. * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
  325. *
  326. * rx_handlers are functions called from inside __netif_receive_skb(), to do
  327. * special processing of the skb, prior to delivery to protocol handlers.
  328. *
  329. * Currently, a net_device can only have a single rx_handler registered. Trying
  330. * to register a second rx_handler will return -EBUSY.
  331. *
  332. * To register a rx_handler on a net_device, use netdev_rx_handler_register().
  333. * To unregister a rx_handler on a net_device, use
  334. * netdev_rx_handler_unregister().
  335. *
  336. * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
  337. * do with the skb.
  338. *
  339. * If the rx_handler consumed to skb in some way, it should return
  340. * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
  341. * the skb to be delivered in some other ways.
  342. *
  343. * If the rx_handler changed skb->dev, to divert the skb to another
  344. * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
  345. * new device will be called if it exists.
  346. *
  347. * If the rx_handler consider the skb should be ignored, it should return
  348. * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
  349. * are registred on exact device (ptype->dev == skb->dev).
  350. *
  351. * If the rx_handler didn't changed skb->dev, but want the skb to be normally
  352. * delivered, it should return RX_HANDLER_PASS.
  353. *
  354. * A device without a registered rx_handler will behave as if rx_handler
  355. * returned RX_HANDLER_PASS.
  356. */
  357. enum rx_handler_result {
  358. RX_HANDLER_CONSUMED,
  359. RX_HANDLER_ANOTHER,
  360. RX_HANDLER_EXACT,
  361. RX_HANDLER_PASS,
  362. };
  363. typedef enum rx_handler_result rx_handler_result_t;
  364. typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  365. extern void __napi_schedule(struct napi_struct *n);
  366. static inline int napi_disable_pending(struct napi_struct *n)
  367. {
  368. return test_bit(NAPI_STATE_DISABLE, &n->state);
  369. }
  370. /**
  371. * napi_schedule_prep - check if napi can be scheduled
  372. * @n: napi context
  373. *
  374. * Test if NAPI routine is already running, and if not mark
  375. * it as running. This is used as a condition variable
  376. * insure only one NAPI poll instance runs. We also make
  377. * sure there is no pending NAPI disable.
  378. */
  379. static inline int napi_schedule_prep(struct napi_struct *n)
  380. {
  381. return !napi_disable_pending(n) &&
  382. !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  383. }
  384. /**
  385. * napi_schedule - schedule NAPI poll
  386. * @n: napi context
  387. *
  388. * Schedule NAPI poll routine to be called if it is not already
  389. * running.
  390. */
  391. static inline void napi_schedule(struct napi_struct *n)
  392. {
  393. if (napi_schedule_prep(n))
  394. __napi_schedule(n);
  395. }
  396. /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
  397. static inline int napi_reschedule(struct napi_struct *napi)
  398. {
  399. if (napi_schedule_prep(napi)) {
  400. __napi_schedule(napi);
  401. return 1;
  402. }
  403. return 0;
  404. }
  405. /**
  406. * napi_complete - NAPI processing complete
  407. * @n: napi context
  408. *
  409. * Mark NAPI processing as complete.
  410. */
  411. extern void __napi_complete(struct napi_struct *n);
  412. extern void napi_complete(struct napi_struct *n);
  413. /**
  414. * napi_disable - prevent NAPI from scheduling
  415. * @n: napi context
  416. *
  417. * Stop NAPI from being scheduled on this context.
  418. * Waits till any outstanding processing completes.
  419. */
  420. static inline void napi_disable(struct napi_struct *n)
  421. {
  422. set_bit(NAPI_STATE_DISABLE, &n->state);
  423. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  424. msleep(1);
  425. clear_bit(NAPI_STATE_DISABLE, &n->state);
  426. }
  427. /**
  428. * napi_enable - enable NAPI scheduling
  429. * @n: napi context
  430. *
  431. * Resume NAPI from being scheduled on this context.
  432. * Must be paired with napi_disable.
  433. */
  434. static inline void napi_enable(struct napi_struct *n)
  435. {
  436. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  437. smp_mb__before_clear_bit();
  438. clear_bit(NAPI_STATE_SCHED, &n->state);
  439. }
  440. #ifdef CONFIG_SMP
  441. /**
  442. * napi_synchronize - wait until NAPI is not running
  443. * @n: napi context
  444. *
  445. * Wait until NAPI is done being scheduled on this context.
  446. * Waits till any outstanding processing completes but
  447. * does not disable future activations.
  448. */
  449. static inline void napi_synchronize(const struct napi_struct *n)
  450. {
  451. while (test_bit(NAPI_STATE_SCHED, &n->state))
  452. msleep(1);
  453. }
  454. #else
  455. # define napi_synchronize(n) barrier()
  456. #endif
  457. enum netdev_queue_state_t {
  458. __QUEUE_STATE_XOFF,
  459. __QUEUE_STATE_FROZEN,
  460. #define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
  461. (1 << __QUEUE_STATE_FROZEN))
  462. };
  463. struct netdev_queue {
  464. /*
  465. * read mostly part
  466. */
  467. struct net_device *dev;
  468. struct Qdisc *qdisc;
  469. unsigned long state;
  470. struct Qdisc *qdisc_sleeping;
  471. #ifdef CONFIG_SYSFS
  472. struct kobject kobj;
  473. #endif
  474. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  475. int numa_node;
  476. #endif
  477. /*
  478. * write mostly part
  479. */
  480. spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
  481. int xmit_lock_owner;
  482. /*
  483. * please use this field instead of dev->trans_start
  484. */
  485. unsigned long trans_start;
  486. /*
  487. * Number of TX timeouts for this queue
  488. * (/sys/class/net/DEV/Q/trans_timeout)
  489. */
  490. unsigned long trans_timeout;
  491. } ____cacheline_aligned_in_smp;
  492. static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
  493. {
  494. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  495. return q->numa_node;
  496. #else
  497. return NUMA_NO_NODE;
  498. #endif
  499. }
  500. static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
  501. {
  502. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  503. q->numa_node = node;
  504. #endif
  505. }
  506. #ifdef CONFIG_RPS
  507. /*
  508. * This structure holds an RPS map which can be of variable length. The
  509. * map is an array of CPUs.
  510. */
  511. struct rps_map {
  512. unsigned int len;
  513. struct rcu_head rcu;
  514. u16 cpus[0];
  515. };
  516. #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
  517. /*
  518. * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
  519. * tail pointer for that CPU's input queue at the time of last enqueue, and
  520. * a hardware filter index.
  521. */
  522. struct rps_dev_flow {
  523. u16 cpu;
  524. u16 filter;
  525. unsigned int last_qtail;
  526. };
  527. #define RPS_NO_FILTER 0xffff
  528. /*
  529. * The rps_dev_flow_table structure contains a table of flow mappings.
  530. */
  531. struct rps_dev_flow_table {
  532. unsigned int mask;
  533. struct rcu_head rcu;
  534. struct work_struct free_work;
  535. struct rps_dev_flow flows[0];
  536. };
  537. #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
  538. (_num * sizeof(struct rps_dev_flow)))
  539. /*
  540. * The rps_sock_flow_table contains mappings of flows to the last CPU
  541. * on which they were processed by the application (set in recvmsg).
  542. */
  543. struct rps_sock_flow_table {
  544. unsigned int mask;
  545. u16 ents[0];
  546. };
  547. #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
  548. (_num * sizeof(u16)))
  549. #define RPS_NO_CPU 0xffff
  550. static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
  551. u32 hash)
  552. {
  553. if (table && hash) {
  554. unsigned int cpu, index = hash & table->mask;
  555. /* We only give a hint, preemption can change cpu under us */
  556. cpu = raw_smp_processor_id();
  557. if (table->ents[index] != cpu)
  558. table->ents[index] = cpu;
  559. }
  560. }
  561. static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
  562. u32 hash)
  563. {
  564. if (table && hash)
  565. table->ents[hash & table->mask] = RPS_NO_CPU;
  566. }
  567. extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
  568. #ifdef CONFIG_RFS_ACCEL
  569. extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
  570. u32 flow_id, u16 filter_id);
  571. #endif
  572. /* This structure contains an instance of an RX queue. */
  573. struct netdev_rx_queue {
  574. struct rps_map __rcu *rps_map;
  575. struct rps_dev_flow_table __rcu *rps_flow_table;
  576. struct kobject kobj;
  577. struct net_device *dev;
  578. } ____cacheline_aligned_in_smp;
  579. #endif /* CONFIG_RPS */
  580. #ifdef CONFIG_XPS
  581. /*
  582. * This structure holds an XPS map which can be of variable length. The
  583. * map is an array of queues.
  584. */
  585. struct xps_map {
  586. unsigned int len;
  587. unsigned int alloc_len;
  588. struct rcu_head rcu;
  589. u16 queues[0];
  590. };
  591. #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
  592. #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
  593. / sizeof(u16))
  594. /*
  595. * This structure holds all XPS maps for device. Maps are indexed by CPU.
  596. */
  597. struct xps_dev_maps {
  598. struct rcu_head rcu;
  599. struct xps_map __rcu *cpu_map[0];
  600. };
  601. #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
  602. (nr_cpu_ids * sizeof(struct xps_map *)))
  603. #endif /* CONFIG_XPS */
  604. #define TC_MAX_QUEUE 16
  605. #define TC_BITMASK 15
  606. /* HW offloaded queuing disciplines txq count and offset maps */
  607. struct netdev_tc_txq {
  608. u16 count;
  609. u16 offset;
  610. };
  611. /*
  612. * This structure defines the management hooks for network devices.
  613. * The following hooks can be defined; unless noted otherwise, they are
  614. * optional and can be filled with a null pointer.
  615. *
  616. * int (*ndo_init)(struct net_device *dev);
  617. * This function is called once when network device is registered.
  618. * The network device can use this to any late stage initializaton
  619. * or semantic validattion. It can fail with an error code which will
  620. * be propogated back to register_netdev
  621. *
  622. * void (*ndo_uninit)(struct net_device *dev);
  623. * This function is called when device is unregistered or when registration
  624. * fails. It is not called if init fails.
  625. *
  626. * int (*ndo_open)(struct net_device *dev);
  627. * This function is called when network device transistions to the up
  628. * state.
  629. *
  630. * int (*ndo_stop)(struct net_device *dev);
  631. * This function is called when network device transistions to the down
  632. * state.
  633. *
  634. * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  635. * struct net_device *dev);
  636. * Called when a packet needs to be transmitted.
  637. * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
  638. * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
  639. * Required can not be NULL.
  640. *
  641. * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
  642. * Called to decide which queue to when device supports multiple
  643. * transmit queues.
  644. *
  645. * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  646. * This function is called to allow device receiver to make
  647. * changes to configuration when multicast or promiscious is enabled.
  648. *
  649. * void (*ndo_set_rx_mode)(struct net_device *dev);
  650. * This function is called device changes address list filtering.
  651. * If driver handles unicast address filtering, it should set
  652. * IFF_UNICAST_FLT to its priv_flags.
  653. *
  654. * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
  655. * This function is called when the Media Access Control address
  656. * needs to be changed. If this interface is not defined, the
  657. * mac address can not be changed.
  658. *
  659. * int (*ndo_validate_addr)(struct net_device *dev);
  660. * Test if Media Access Control address is valid for the device.
  661. *
  662. * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
  663. * Called when a user request an ioctl which can't be handled by
  664. * the generic interface code. If not defined ioctl's return
  665. * not supported error code.
  666. *
  667. * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
  668. * Used to set network devices bus interface parameters. This interface
  669. * is retained for legacy reason, new devices should use the bus
  670. * interface (PCI) for low level management.
  671. *
  672. * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  673. * Called when a user wants to change the Maximum Transfer Unit
  674. * of a device. If not defined, any request to change MTU will
  675. * will return an error.
  676. *
  677. * void (*ndo_tx_timeout)(struct net_device *dev);
  678. * Callback uses when the transmitter has not made any progress
  679. * for dev->watchdog ticks.
  680. *
  681. * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  682. * struct rtnl_link_stats64 *storage);
  683. * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  684. * Called when a user wants to get the network device usage
  685. * statistics. Drivers must do one of the following:
  686. * 1. Define @ndo_get_stats64 to fill in a zero-initialised
  687. * rtnl_link_stats64 structure passed by the caller.
  688. * 2. Define @ndo_get_stats to update a net_device_stats structure
  689. * (which should normally be dev->stats) and return a pointer to
  690. * it. The structure may be changed asynchronously only if each
  691. * field is written atomically.
  692. * 3. Update dev->stats asynchronously and atomically, and define
  693. * neither operation.
  694. *
  695. * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
  696. * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  697. * this function is called when a VLAN id is registered.
  698. *
  699. * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
  700. * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  701. * this function is called when a VLAN id is unregistered.
  702. *
  703. * void (*ndo_poll_controller)(struct net_device *dev);
  704. *
  705. * SR-IOV management functions.
  706. * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
  707. * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
  708. * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
  709. * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
  710. * int (*ndo_get_vf_config)(struct net_device *dev,
  711. * int vf, struct ifla_vf_info *ivf);
  712. * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  713. * struct nlattr *port[]);
  714. * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
  715. * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
  716. * Called to setup 'tc' number of traffic classes in the net device. This
  717. * is always called from the stack with the rtnl lock held and netif tx
  718. * queues stopped. This allows the netdevice to perform queue management
  719. * safely.
  720. *
  721. * Fiber Channel over Ethernet (FCoE) offload functions.
  722. * int (*ndo_fcoe_enable)(struct net_device *dev);
  723. * Called when the FCoE protocol stack wants to start using LLD for FCoE
  724. * so the underlying device can perform whatever needed configuration or
  725. * initialization to support acceleration of FCoE traffic.
  726. *
  727. * int (*ndo_fcoe_disable)(struct net_device *dev);
  728. * Called when the FCoE protocol stack wants to stop using LLD for FCoE
  729. * so the underlying device can perform whatever needed clean-ups to
  730. * stop supporting acceleration of FCoE traffic.
  731. *
  732. * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
  733. * struct scatterlist *sgl, unsigned int sgc);
  734. * Called when the FCoE Initiator wants to initialize an I/O that
  735. * is a possible candidate for Direct Data Placement (DDP). The LLD can
  736. * perform necessary setup and returns 1 to indicate the device is set up
  737. * successfully to perform DDP on this I/O, otherwise this returns 0.
  738. *
  739. * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
  740. * Called when the FCoE Initiator/Target is done with the DDPed I/O as
  741. * indicated by the FC exchange id 'xid', so the underlying device can
  742. * clean up and reuse resources for later DDP requests.
  743. *
  744. * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
  745. * struct scatterlist *sgl, unsigned int sgc);
  746. * Called when the FCoE Target wants to initialize an I/O that
  747. * is a possible candidate for Direct Data Placement (DDP). The LLD can
  748. * perform necessary setup and returns 1 to indicate the device is set up
  749. * successfully to perform DDP on this I/O, otherwise this returns 0.
  750. *
  751. * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
  752. * Called when the underlying device wants to override default World Wide
  753. * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
  754. * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
  755. * protocol stack to use.
  756. *
  757. * RFS acceleration.
  758. * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
  759. * u16 rxq_index, u32 flow_id);
  760. * Set hardware filter for RFS. rxq_index is the target queue index;
  761. * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
  762. * Return the filter ID on success, or a negative error code.
  763. *
  764. * Slave management functions (for bridge, bonding, etc). User should
  765. * call netdev_set_master() to set dev->master properly.
  766. * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
  767. * Called to make another netdev an underling.
  768. *
  769. * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
  770. * Called to release previously enslaved netdev.
  771. *
  772. * Feature/offload setting functions.
  773. * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  774. * netdev_features_t features);
  775. * Adjusts the requested feature flags according to device-specific
  776. * constraints, and returns the resulting flags. Must not modify
  777. * the device state.
  778. *
  779. * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  780. * Called to update device configuration to new features. Passed
  781. * feature set might be less than what was returned by ndo_fix_features()).
  782. * Must return >0 or -errno if it changed dev->features itself.
  783. *
  784. */
  785. struct net_device_ops {
  786. int (*ndo_init)(struct net_device *dev);
  787. void (*ndo_uninit)(struct net_device *dev);
  788. int (*ndo_open)(struct net_device *dev);
  789. int (*ndo_stop)(struct net_device *dev);
  790. netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
  791. struct net_device *dev);
  792. u16 (*ndo_select_queue)(struct net_device *dev,
  793. struct sk_buff *skb);
  794. void (*ndo_change_rx_flags)(struct net_device *dev,
  795. int flags);
  796. void (*ndo_set_rx_mode)(struct net_device *dev);
  797. int (*ndo_set_mac_address)(struct net_device *dev,
  798. void *addr);
  799. int (*ndo_validate_addr)(struct net_device *dev);
  800. int (*ndo_do_ioctl)(struct net_device *dev,
  801. struct ifreq *ifr, int cmd);
  802. int (*ndo_set_config)(struct net_device *dev,
  803. struct ifmap *map);
  804. int (*ndo_change_mtu)(struct net_device *dev,
  805. int new_mtu);
  806. int (*ndo_neigh_setup)(struct net_device *dev,
  807. struct neigh_parms *);
  808. void (*ndo_tx_timeout) (struct net_device *dev);
  809. struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  810. struct rtnl_link_stats64 *storage);
  811. struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  812. void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
  813. unsigned short vid);
  814. void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
  815. unsigned short vid);
  816. #ifdef CONFIG_NET_POLL_CONTROLLER
  817. void (*ndo_poll_controller)(struct net_device *dev);
  818. int (*ndo_netpoll_setup)(struct net_device *dev,
  819. struct netpoll_info *info);
  820. void (*ndo_netpoll_cleanup)(struct net_device *dev);
  821. #endif
  822. int (*ndo_set_vf_mac)(struct net_device *dev,
  823. int queue, u8 *mac);
  824. int (*ndo_set_vf_vlan)(struct net_device *dev,
  825. int queue, u16 vlan, u8 qos);
  826. int (*ndo_set_vf_tx_rate)(struct net_device *dev,
  827. int vf, int rate);
  828. int (*ndo_set_vf_spoofchk)(struct net_device *dev,
  829. int vf, bool setting);
  830. int (*ndo_get_vf_config)(struct net_device *dev,
  831. int vf,
  832. struct ifla_vf_info *ivf);
  833. int (*ndo_set_vf_port)(struct net_device *dev,
  834. int vf,
  835. struct nlattr *port[]);
  836. int (*ndo_get_vf_port)(struct net_device *dev,
  837. int vf, struct sk_buff *skb);
  838. int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
  839. #if IS_ENABLED(CONFIG_FCOE)
  840. int (*ndo_fcoe_enable)(struct net_device *dev);
  841. int (*ndo_fcoe_disable)(struct net_device *dev);
  842. int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
  843. u16 xid,
  844. struct scatterlist *sgl,
  845. unsigned int sgc);
  846. int (*ndo_fcoe_ddp_done)(struct net_device *dev,
  847. u16 xid);
  848. int (*ndo_fcoe_ddp_target)(struct net_device *dev,
  849. u16 xid,
  850. struct scatterlist *sgl,
  851. unsigned int sgc);
  852. #endif
  853. #if IS_ENABLED(CONFIG_LIBFCOE)
  854. #define NETDEV_FCOE_WWNN 0
  855. #define NETDEV_FCOE_WWPN 1
  856. int (*ndo_fcoe_get_wwn)(struct net_device *dev,
  857. u64 *wwn, int type);
  858. #endif
  859. #ifdef CONFIG_RFS_ACCEL
  860. int (*ndo_rx_flow_steer)(struct net_device *dev,
  861. const struct sk_buff *skb,
  862. u16 rxq_index,
  863. u32 flow_id);
  864. #endif
  865. int (*ndo_add_slave)(struct net_device *dev,
  866. struct net_device *slave_dev);
  867. int (*ndo_del_slave)(struct net_device *dev,
  868. struct net_device *slave_dev);
  869. netdev_features_t (*ndo_fix_features)(struct net_device *dev,
  870. netdev_features_t features);
  871. int (*ndo_set_features)(struct net_device *dev,
  872. netdev_features_t features);
  873. };
  874. /*
  875. * The DEVICE structure.
  876. * Actually, this whole structure is a big mistake. It mixes I/O
  877. * data with strictly "high-level" data, and it has to know about
  878. * almost every data structure used in the INET module.
  879. *
  880. * FIXME: cleanup struct net_device such that network protocol info
  881. * moves out.
  882. */
  883. struct net_device {
  884. /*
  885. * This is the first field of the "visible" part of this structure
  886. * (i.e. as seen by users in the "Space.c" file). It is the name
  887. * of the interface.
  888. */
  889. char name[IFNAMSIZ];
  890. struct pm_qos_request pm_qos_req;
  891. /* device name hash chain */
  892. struct hlist_node name_hlist;
  893. /* snmp alias */
  894. char *ifalias;
  895. /*
  896. * I/O specific fields
  897. * FIXME: Merge these and struct ifmap into one
  898. */
  899. unsigned long mem_end; /* shared mem end */
  900. unsigned long mem_start; /* shared mem start */
  901. unsigned long base_addr; /* device I/O address */
  902. unsigned int irq; /* device IRQ number */
  903. /*
  904. * Some hardware also needs these fields, but they are not
  905. * part of the usual set specified in Space.c.
  906. */
  907. unsigned long state;
  908. struct list_head dev_list;
  909. struct list_head napi_list;
  910. struct list_head unreg_list;
  911. /* currently active device features */
  912. netdev_features_t features;
  913. /* user-changeable features */
  914. netdev_features_t hw_features;
  915. /* user-requested features */
  916. netdev_features_t wanted_features;
  917. /* mask of features inheritable by VLAN devices */
  918. netdev_features_t vlan_features;
  919. /* Interface index. Unique device identifier */
  920. int ifindex;
  921. int iflink;
  922. struct net_device_stats stats;
  923. atomic_long_t rx_dropped; /* dropped packets by core network
  924. * Do not use this in drivers.
  925. */
  926. #ifdef CONFIG_WIRELESS_EXT
  927. /* List of functions to handle Wireless Extensions (instead of ioctl).
  928. * See <net/iw_handler.h> for details. Jean II */
  929. const struct iw_handler_def * wireless_handlers;
  930. /* Instance data managed by the core of Wireless Extensions. */
  931. struct iw_public_data * wireless_data;
  932. #endif
  933. /* Management operations */
  934. const struct net_device_ops *netdev_ops;
  935. const struct ethtool_ops *ethtool_ops;
  936. /* Hardware header description */
  937. const struct header_ops *header_ops;
  938. unsigned int flags; /* interface flags (a la BSD) */
  939. unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */
  940. unsigned short gflags;
  941. unsigned short padded; /* How much padding added by alloc_netdev() */
  942. unsigned char operstate; /* RFC2863 operstate */
  943. unsigned char link_mode; /* mapping policy to operstate */
  944. unsigned char if_port; /* Selectable AUI, TP,..*/
  945. unsigned char dma; /* DMA channel */
  946. unsigned int mtu; /* interface MTU value */
  947. unsigned short type; /* interface hardware type */
  948. unsigned short hard_header_len; /* hardware hdr length */
  949. /* extra head- and tailroom the hardware may need, but not in all cases
  950. * can this be guaranteed, especially tailroom. Some cases also use
  951. * LL_MAX_HEADER instead to allocate the skb.
  952. */
  953. unsigned short needed_headroom;
  954. unsigned short needed_tailroom;
  955. /* Interface address info. */
  956. unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
  957. unsigned char addr_assign_type; /* hw address assignment type */
  958. unsigned char addr_len; /* hardware address length */
  959. unsigned short dev_id; /* for shared network cards */
  960. spinlock_t addr_list_lock;
  961. struct netdev_hw_addr_list uc; /* Unicast mac addresses */
  962. struct netdev_hw_addr_list mc; /* Multicast mac addresses */
  963. bool uc_promisc;
  964. unsigned int promiscuity;
  965. unsigned int allmulti;
  966. /* Protocol specific pointers */
  967. #if IS_ENABLED(CONFIG_VLAN_8021Q)
  968. struct vlan_group __rcu *vlgrp; /* VLAN group */
  969. #endif
  970. #if IS_ENABLED(CONFIG_NET_DSA)
  971. struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
  972. #endif
  973. void *atalk_ptr; /* AppleTalk link */
  974. struct in_device __rcu *ip_ptr; /* IPv4 specific data */
  975. struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
  976. struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
  977. void *ec_ptr; /* Econet specific data */
  978. void *ax25_ptr; /* AX.25 specific data */
  979. struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
  980. assign before registering */
  981. /*
  982. * Cache lines mostly used on receive path (including eth_type_trans())
  983. */
  984. unsigned long last_rx; /* Time of last Rx
  985. * This should not be set in
  986. * drivers, unless really needed,
  987. * because network stack (bonding)
  988. * use it if/when necessary, to
  989. * avoid dirtying this cache line.
  990. */
  991. struct net_device *master; /* Pointer to master device of a group,
  992. * which this device is member of.
  993. */
  994. /* Interface address info used in eth_type_trans() */
  995. unsigned char *dev_addr; /* hw address, (before bcast
  996. because most packets are
  997. unicast) */
  998. struct netdev_hw_addr_list dev_addrs; /* list of device
  999. hw addresses */
  1000. unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
  1001. #ifdef CONFIG_SYSFS
  1002. struct kset *queues_kset;
  1003. #endif
  1004. #ifdef CONFIG_RPS
  1005. struct netdev_rx_queue *_rx;
  1006. /* Number of RX queues allocated at register_netdev() time */
  1007. unsigned int num_rx_queues;
  1008. /* Number of RX queues currently active in device */
  1009. unsigned int real_num_rx_queues;
  1010. #ifdef CONFIG_RFS_ACCEL
  1011. /* CPU reverse-mapping for RX completion interrupts, indexed
  1012. * by RX queue number. Assigned by driver. This must only be
  1013. * set if the ndo_rx_flow_steer operation is defined. */
  1014. struct cpu_rmap *rx_cpu_rmap;
  1015. #endif
  1016. #endif
  1017. rx_handler_func_t __rcu *rx_handler;
  1018. void __rcu *rx_handler_data;
  1019. struct netdev_queue __rcu *ingress_queue;
  1020. /*
  1021. * Cache lines mostly used on transmit path
  1022. */
  1023. struct netdev_queue *_tx ____cacheline_aligned_in_smp;
  1024. /* Number of TX queues allocated at alloc_netdev_mq() time */
  1025. unsigned int num_tx_queues;
  1026. /* Number of TX queues currently active in device */
  1027. unsigned int real_num_tx_queues;
  1028. /* root qdisc from userspace point of view */
  1029. struct Qdisc *qdisc;
  1030. unsigned long tx_queue_len; /* Max frames per queue allowed */
  1031. spinlock_t tx_global_lock;
  1032. #ifdef CONFIG_XPS
  1033. struct xps_dev_maps __rcu *xps_maps;
  1034. #endif
  1035. /* These may be needed for future network-power-down code. */
  1036. /*
  1037. * trans_start here is expensive for high speed devices on SMP,
  1038. * please use netdev_queue->trans_start instead.
  1039. */
  1040. unsigned long trans_start; /* Time (in jiffies) of last Tx */
  1041. int watchdog_timeo; /* used by dev_watchdog() */
  1042. struct timer_list watchdog_timer;
  1043. /* Number of references to this device */
  1044. int __percpu *pcpu_refcnt;
  1045. /* delayed register/unregister */
  1046. struct list_head todo_list;
  1047. /* device index hash chain */
  1048. struct hlist_node index_hlist;
  1049. struct list_head link_watch_list;
  1050. /* register/unregister state machine */
  1051. enum { NETREG_UNINITIALIZED=0,
  1052. NETREG_REGISTERED, /* completed register_netdevice */
  1053. NETREG_UNREGISTERING, /* called unregister_netdevice */
  1054. NETREG_UNREGISTERED, /* completed unregister todo */
  1055. NETREG_RELEASED, /* called free_netdev */
  1056. NETREG_DUMMY, /* dummy device for NAPI poll */
  1057. } reg_state:8;
  1058. bool dismantle; /* device is going do be freed */
  1059. enum {
  1060. RTNL_LINK_INITIALIZED,
  1061. RTNL_LINK_INITIALIZING,
  1062. } rtnl_link_state:16;
  1063. /* Called from unregister, can be used to call free_netdev */
  1064. void (*destructor)(struct net_device *dev);
  1065. #ifdef CONFIG_NETPOLL
  1066. struct netpoll_info *npinfo;
  1067. #endif
  1068. #ifdef CONFIG_NET_NS
  1069. /* Network namespace this network device is inside */
  1070. struct net *nd_net;
  1071. #endif
  1072. /* mid-layer private */
  1073. union {
  1074. void *ml_priv;
  1075. struct pcpu_lstats __percpu *lstats; /* loopback stats */
  1076. struct pcpu_tstats __percpu *tstats; /* tunnel stats */
  1077. struct pcpu_dstats __percpu *dstats; /* dummy stats */
  1078. };
  1079. /* GARP */
  1080. struct garp_port __rcu *garp_port;
  1081. /* class/net/name entry */
  1082. struct device dev;
  1083. /* space for optional device, statistics, and wireless sysfs groups */
  1084. const struct attribute_group *sysfs_groups[4];
  1085. /* rtnetlink link ops */
  1086. const struct rtnl_link_ops *rtnl_link_ops;
  1087. /* for setting kernel sock attribute on TCP connection setup */
  1088. #define GSO_MAX_SIZE 65536
  1089. unsigned int gso_max_size;
  1090. #ifdef CONFIG_DCB
  1091. /* Data Center Bridging netlink ops */
  1092. const struct dcbnl_rtnl_ops *dcbnl_ops;
  1093. #endif
  1094. u8 num_tc;
  1095. struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
  1096. u8 prio_tc_map[TC_BITMASK + 1];
  1097. #if IS_ENABLED(CONFIG_FCOE)
  1098. /* max exchange id for FCoE LRO by ddp */
  1099. unsigned int fcoe_ddp_xid;
  1100. #endif
  1101. #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
  1102. struct netprio_map __rcu *priomap;
  1103. #endif
  1104. /* phy device may attach itself for hardware timestamping */
  1105. struct phy_device *phydev;
  1106. /* group the device belongs to */
  1107. int group;
  1108. };
  1109. #define to_net_dev(d) container_of(d, struct net_device, dev)
  1110. #define NETDEV_ALIGN 32
  1111. static inline
  1112. int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
  1113. {
  1114. return dev->prio_tc_map[prio & TC_BITMASK];
  1115. }
  1116. static inline
  1117. int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
  1118. {
  1119. if (tc >= dev->num_tc)
  1120. return -EINVAL;
  1121. dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
  1122. return 0;
  1123. }
  1124. static inline
  1125. void netdev_reset_tc(struct net_device *dev)
  1126. {
  1127. dev->num_tc = 0;
  1128. memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
  1129. memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
  1130. }
  1131. static inline
  1132. int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
  1133. {
  1134. if (tc >= dev->num_tc)
  1135. return -EINVAL;
  1136. dev->tc_to_txq[tc].count = count;
  1137. dev->tc_to_txq[tc].offset = offset;
  1138. return 0;
  1139. }
  1140. static inline
  1141. int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
  1142. {
  1143. if (num_tc > TC_MAX_QUEUE)
  1144. return -EINVAL;
  1145. dev->num_tc = num_tc;
  1146. return 0;
  1147. }
  1148. static inline
  1149. int netdev_get_num_tc(struct net_device *dev)
  1150. {
  1151. return dev->num_tc;
  1152. }
  1153. static inline
  1154. struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
  1155. unsigned int index)
  1156. {
  1157. return &dev->_tx[index];
  1158. }
  1159. static inline void netdev_for_each_tx_queue(struct net_device *dev,
  1160. void (*f)(struct net_device *,
  1161. struct netdev_queue *,
  1162. void *),
  1163. void *arg)
  1164. {
  1165. unsigned int i;
  1166. for (i = 0; i < dev->num_tx_queues; i++)
  1167. f(dev, &dev->_tx[i], arg);
  1168. }
  1169. /*
  1170. * Net namespace inlines
  1171. */
  1172. static inline
  1173. struct net *dev_net(const struct net_device *dev)
  1174. {
  1175. return read_pnet(&dev->nd_net);
  1176. }
  1177. static inline
  1178. void dev_net_set(struct net_device *dev, struct net *net)
  1179. {
  1180. #ifdef CONFIG_NET_NS
  1181. release_net(dev->nd_net);
  1182. dev->nd_net = hold_net(net);
  1183. #endif
  1184. }
  1185. static inline bool netdev_uses_dsa_tags(struct net_device *dev)
  1186. {
  1187. #ifdef CONFIG_NET_DSA_TAG_DSA
  1188. if (dev->dsa_ptr != NULL)
  1189. return dsa_uses_dsa_tags(dev->dsa_ptr);
  1190. #endif
  1191. return 0;
  1192. }
  1193. #ifndef CONFIG_NET_NS
  1194. static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
  1195. {
  1196. skb->dev = dev;
  1197. }
  1198. #else /* CONFIG_NET_NS */
  1199. void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
  1200. #endif
  1201. static inline bool netdev_uses_trailer_tags(struct net_device *dev)
  1202. {
  1203. #ifdef CONFIG_NET_DSA_TAG_TRAILER
  1204. if (dev->dsa_ptr != NULL)
  1205. return dsa_uses_trailer_tags(dev->dsa_ptr);
  1206. #endif
  1207. return 0;
  1208. }
  1209. /**
  1210. * netdev_priv - access network device private data
  1211. * @dev: network device
  1212. *
  1213. * Get network device private data
  1214. */
  1215. static inline void *netdev_priv(const struct net_device *dev)
  1216. {
  1217. return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
  1218. }
  1219. /* Set the sysfs physical device reference for the network logical device
  1220. * if set prior to registration will cause a symlink during initialization.
  1221. */
  1222. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  1223. /* Set the sysfs device type for the network logical device to allow
  1224. * fin grained indentification of different network device types. For
  1225. * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
  1226. */
  1227. #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
  1228. /**
  1229. * netif_napi_add - initialize a napi context
  1230. * @dev: network device
  1231. * @napi: napi context
  1232. * @poll: polling function
  1233. * @weight: default weight
  1234. *
  1235. * netif_napi_add() must be used to initialize a napi context prior to calling
  1236. * *any* of the other napi related functions.
  1237. */
  1238. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  1239. int (*poll)(struct napi_struct *, int), int weight);
  1240. /**
  1241. * netif_napi_del - remove a napi context
  1242. * @napi: napi context
  1243. *
  1244. * netif_napi_del() removes a napi context from the network device napi list
  1245. */
  1246. void netif_napi_del(struct napi_struct *napi);
  1247. struct napi_gro_cb {
  1248. /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
  1249. void *frag0;
  1250. /* Length of frag0. */
  1251. unsigned int frag0_len;
  1252. /* This indicates where we are processing relative to skb->data. */
  1253. int data_offset;
  1254. /* This is non-zero if the packet may be of the same flow. */
  1255. int same_flow;
  1256. /* This is non-zero if the packet cannot be merged with the new skb. */
  1257. int flush;
  1258. /* Number of segments aggregated. */
  1259. int count;
  1260. /* Free the skb? */
  1261. int free;
  1262. };
  1263. #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
  1264. struct packet_type {
  1265. __be16 type; /* This is really htons(ether_type). */
  1266. struct net_device *dev; /* NULL is wildcarded here */
  1267. int (*func) (struct sk_buff *,
  1268. struct net_device *,
  1269. struct packet_type *,
  1270. struct net_device *);
  1271. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  1272. netdev_features_t features);
  1273. int (*gso_send_check)(struct sk_buff *skb);
  1274. struct sk_buff **(*gro_receive)(struct sk_buff **head,
  1275. struct sk_buff *skb);
  1276. int (*gro_complete)(struct sk_buff *skb);
  1277. void *af_packet_priv;
  1278. struct list_head list;
  1279. };
  1280. #include <linux/notifier.h>
  1281. /* netdevice notifier chain. Please remember to update the rtnetlink
  1282. * notification exclusion list in rtnetlink_event() when adding new
  1283. * types.
  1284. */
  1285. #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
  1286. #define NETDEV_DOWN 0x0002
  1287. #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
  1288. detected a hardware crash and restarted
  1289. - we can use this eg to kick tcp sessions
  1290. once done */
  1291. #define NETDEV_CHANGE 0x0004 /* Notify device state change */
  1292. #define NETDEV_REGISTER 0x0005
  1293. #define NETDEV_UNREGISTER 0x0006
  1294. #define NETDEV_CHANGEMTU 0x0007
  1295. #define NETDEV_CHANGEADDR 0x0008
  1296. #define NETDEV_GOING_DOWN 0x0009
  1297. #define NETDEV_CHANGENAME 0x000A
  1298. #define NETDEV_FEAT_CHANGE 0x000B
  1299. #define NETDEV_BONDING_FAILOVER 0x000C
  1300. #define NETDEV_PRE_UP 0x000D
  1301. #define NETDEV_PRE_TYPE_CHANGE 0x000E
  1302. #define NETDEV_POST_TYPE_CHANGE 0x000F
  1303. #define NETDEV_POST_INIT 0x0010
  1304. #define NETDEV_UNREGISTER_BATCH 0x0011
  1305. #define NETDEV_RELEASE 0x0012
  1306. #define NETDEV_NOTIFY_PEERS 0x0013
  1307. #define NETDEV_JOIN 0x0014
  1308. extern int register_netdevice_notifier(struct notifier_block *nb);
  1309. extern int unregister_netdevice_notifier(struct notifier_block *nb);
  1310. extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  1311. extern rwlock_t dev_base_lock; /* Device list lock */
  1312. #define for_each_netdev(net, d) \
  1313. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  1314. #define for_each_netdev_reverse(net, d) \
  1315. list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
  1316. #define for_each_netdev_rcu(net, d) \
  1317. list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
  1318. #define for_each_netdev_safe(net, d, n) \
  1319. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  1320. #define for_each_netdev_continue(net, d) \
  1321. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  1322. #define for_each_netdev_continue_rcu(net, d) \
  1323. list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
  1324. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  1325. static inline struct net_device *next_net_device(struct net_device *dev)
  1326. {
  1327. struct list_head *lh;
  1328. struct net *net;
  1329. net = dev_net(dev);
  1330. lh = dev->dev_list.next;
  1331. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  1332. }
  1333. static inline struct net_device *next_net_device_rcu(struct net_device *dev)
  1334. {
  1335. struct list_head *lh;
  1336. struct net *net;
  1337. net = dev_net(dev);
  1338. lh = rcu_dereference(list_next_rcu(&dev->dev_list));
  1339. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  1340. }
  1341. static inline struct net_device *first_net_device(struct net *net)
  1342. {
  1343. return list_empty(&net->dev_base_head) ? NULL :
  1344. net_device_entry(net->dev_base_head.next);
  1345. }
  1346. static inline struct net_device *first_net_device_rcu(struct net *net)
  1347. {
  1348. struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
  1349. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  1350. }
  1351. extern int netdev_boot_setup_check(struct net_device *dev);
  1352. extern unsigned long netdev_boot_base(const char *prefix, int unit);
  1353. extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
  1354. const char *hwaddr);
  1355. extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  1356. extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  1357. extern void dev_add_pack(struct packet_type *pt);
  1358. extern void dev_remove_pack(struct packet_type *pt);
  1359. extern void __dev_remove_pack(struct packet_type *pt);
  1360. extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
  1361. unsigned short mask);
  1362. extern struct net_device *dev_get_by_name(struct net *net, const char *name);
  1363. extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
  1364. extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
  1365. extern int dev_alloc_name(struct net_device *dev, const char *name);
  1366. extern int dev_open(struct net_device *dev);
  1367. extern int dev_close(struct net_device *dev);
  1368. extern void dev_disable_lro(struct net_device *dev);
  1369. extern int dev_queue_xmit(struct sk_buff *skb);
  1370. extern int register_netdevice(struct net_device *dev);
  1371. extern void unregister_netdevice_queue(struct net_device *dev,
  1372. struct list_head *head);
  1373. extern void unregister_netdevice_many(struct list_head *head);
  1374. static inline void unregister_netdevice(struct net_device *dev)
  1375. {
  1376. unregister_netdevice_queue(dev, NULL);
  1377. }
  1378. extern int netdev_refcnt_read(const struct net_device *dev);
  1379. extern void free_netdev(struct net_device *dev);
  1380. extern void synchronize_net(void);
  1381. extern int init_dummy_netdev(struct net_device *dev);
  1382. extern void netdev_resync_ops(struct net_device *dev);
  1383. extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
  1384. extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  1385. extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
  1386. extern int dev_restart(struct net_device *dev);
  1387. #ifdef CONFIG_NETPOLL_TRAP
  1388. extern int netpoll_trap(void);
  1389. #endif
  1390. extern int skb_gro_receive(struct sk_buff **head,
  1391. struct sk_buff *skb);
  1392. extern void skb_gro_reset_offset(struct sk_buff *skb);
  1393. static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
  1394. {
  1395. return NAPI_GRO_CB(skb)->data_offset;
  1396. }
  1397. static inline unsigned int skb_gro_len(const struct sk_buff *skb)
  1398. {
  1399. return skb->len - NAPI_GRO_CB(skb)->data_offset;
  1400. }
  1401. static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
  1402. {
  1403. NAPI_GRO_CB(skb)->data_offset += len;
  1404. }
  1405. static inline void *skb_gro_header_fast(struct sk_buff *skb,
  1406. unsigned int offset)
  1407. {
  1408. return NAPI_GRO_CB(skb)->frag0 + offset;
  1409. }
  1410. static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
  1411. {
  1412. return NAPI_GRO_CB(skb)->frag0_len < hlen;
  1413. }
  1414. static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
  1415. unsigned int offset)
  1416. {
  1417. if (!pskb_may_pull(skb, hlen))
  1418. return NULL;
  1419. NAPI_GRO_CB(skb)->frag0 = NULL;
  1420. NAPI_GRO_CB(skb)->frag0_len = 0;
  1421. return skb->data + offset;
  1422. }
  1423. static inline void *skb_gro_mac_header(struct sk_buff *skb)
  1424. {
  1425. return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
  1426. }
  1427. static inline void *skb_gro_network_header(struct sk_buff *skb)
  1428. {
  1429. return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
  1430. skb_network_offset(skb);
  1431. }
  1432. static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
  1433. unsigned short type,
  1434. const void *daddr, const void *saddr,
  1435. unsigned len)
  1436. {
  1437. if (!dev->header_ops || !dev->header_ops->create)
  1438. return 0;
  1439. return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
  1440. }
  1441. static inline int dev_parse_header(const struct sk_buff *skb,
  1442. unsigned char *haddr)
  1443. {
  1444. const struct net_device *dev = skb->dev;
  1445. if (!dev->header_ops || !dev->header_ops->parse)
  1446. return 0;
  1447. return dev->header_ops->parse(skb, haddr);
  1448. }
  1449. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  1450. extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
  1451. static inline int unregister_gifconf(unsigned int family)
  1452. {
  1453. return register_gifconf(family, NULL);
  1454. }
  1455. /*
  1456. * Incoming packets are placed on per-cpu queues
  1457. */
  1458. struct softnet_data {
  1459. struct Qdisc *output_queue;
  1460. struct Qdisc **output_queue_tailp;
  1461. struct list_head poll_list;
  1462. struct sk_buff *completion_queue;
  1463. struct sk_buff_head process_queue;
  1464. /* stats */
  1465. unsigned int processed;
  1466. unsigned int time_squeeze;
  1467. unsigned int cpu_collision;
  1468. unsigned int received_rps;
  1469. #ifdef CONFIG_RPS
  1470. struct softnet_data *rps_ipi_list;
  1471. /* Elements below can be accessed between CPUs for RPS */
  1472. struct call_single_data csd ____cacheline_aligned_in_smp;
  1473. struct softnet_data *rps_ipi_next;
  1474. unsigned int cpu;
  1475. unsigned int input_queue_head;
  1476. unsigned int input_queue_tail;
  1477. #endif
  1478. unsigned dropped;
  1479. struct sk_buff_head input_pkt_queue;
  1480. struct napi_struct backlog;
  1481. };
  1482. static inline void input_queue_head_incr(struct softnet_data *sd)
  1483. {
  1484. #ifdef CONFIG_RPS
  1485. sd->input_queue_head++;
  1486. #endif
  1487. }
  1488. static inline void input_queue_tail_incr_save(struct softnet_data *sd,
  1489. unsigned int *qtail)
  1490. {
  1491. #ifdef CONFIG_RPS
  1492. *qtail = ++sd->input_queue_tail;
  1493. #endif
  1494. }
  1495. DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  1496. extern void __netif_schedule(struct Qdisc *q);
  1497. static inline void netif_schedule_queue(struct netdev_queue *txq)
  1498. {
  1499. if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
  1500. __netif_schedule(txq->qdisc);
  1501. }
  1502. static inline void netif_tx_schedule_all(struct net_device *dev)
  1503. {
  1504. unsigned int i;
  1505. for (i = 0; i < dev->num_tx_queues; i++)
  1506. netif_schedule_queue(netdev_get_tx_queue(dev, i));
  1507. }
  1508. static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
  1509. {
  1510. clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  1511. }
  1512. /**
  1513. * netif_start_queue - allow transmit
  1514. * @dev: network device
  1515. *
  1516. * Allow upper layers to call the device hard_start_xmit routine.
  1517. */
  1518. static inline void netif_start_queue(struct net_device *dev)
  1519. {
  1520. netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
  1521. }
  1522. static inline void netif_tx_start_all_queues(struct net_device *dev)
  1523. {
  1524. unsigned int i;
  1525. for (i = 0; i < dev->num_tx_queues; i++) {
  1526. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1527. netif_tx_start_queue(txq);
  1528. }
  1529. }
  1530. static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  1531. {
  1532. #ifdef CONFIG_NETPOLL_TRAP
  1533. if (netpoll_trap()) {
  1534. netif_tx_start_queue(dev_queue);
  1535. return;
  1536. }
  1537. #endif
  1538. if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
  1539. __netif_schedule(dev_queue->qdisc);
  1540. }
  1541. /**
  1542. * netif_wake_queue - restart transmit
  1543. * @dev: network device
  1544. *
  1545. * Allow upper layers to call the device hard_start_xmit routine.
  1546. * Used for flow control when transmit resources are available.
  1547. */
  1548. static inline void netif_wake_queue(struct net_device *dev)
  1549. {
  1550. netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
  1551. }
  1552. static inline void netif_tx_wake_all_queues(struct net_device *dev)
  1553. {
  1554. unsigned int i;
  1555. for (i = 0; i < dev->num_tx_queues; i++) {
  1556. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1557. netif_tx_wake_queue(txq);
  1558. }
  1559. }
  1560. static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
  1561. {
  1562. if (WARN_ON(!dev_queue)) {
  1563. pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
  1564. return;
  1565. }
  1566. set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  1567. }
  1568. /**
  1569. * netif_stop_queue - stop transmitted packets
  1570. * @dev: network device
  1571. *
  1572. * Stop upper layers calling the device hard_start_xmit routine.
  1573. * Used for flow control when transmit resources are unavailable.
  1574. */
  1575. static inline void netif_stop_queue(struct net_device *dev)
  1576. {
  1577. netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
  1578. }
  1579. static inline void netif_tx_stop_all_queues(struct net_device *dev)
  1580. {
  1581. unsigned int i;
  1582. for (i = 0; i < dev->num_tx_queues; i++) {
  1583. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1584. netif_tx_stop_queue(txq);
  1585. }
  1586. }
  1587. static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
  1588. {
  1589. return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  1590. }
  1591. /**
  1592. * netif_queue_stopped - test if transmit queue is flowblocked
  1593. * @dev: network device
  1594. *
  1595. * Test if transmit queue on device is currently unable to send.
  1596. */
  1597. static inline int netif_queue_stopped(const struct net_device *dev)
  1598. {
  1599. return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
  1600. }
  1601. static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
  1602. {
  1603. return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
  1604. }
  1605. /**
  1606. * netif_running - test if up
  1607. * @dev: network device
  1608. *
  1609. * Test if the device has been brought up.
  1610. */
  1611. static inline int netif_running(const struct net_device *dev)
  1612. {
  1613. return test_bit(__LINK_STATE_START, &dev->state);
  1614. }
  1615. /*
  1616. * Routines to manage the subqueues on a device. We only need start
  1617. * stop, and a check if it's stopped. All other device management is
  1618. * done at the overall netdevice level.
  1619. * Also test the device if we're multiqueue.
  1620. */
  1621. /**
  1622. * netif_start_subqueue - allow sending packets on subqueue
  1623. * @dev: network device
  1624. * @queue_index: sub queue index
  1625. *
  1626. * Start individual transmit queue of a device with multiple transmit queues.
  1627. */
  1628. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  1629. {
  1630. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1631. netif_tx_start_queue(txq);
  1632. }
  1633. /**
  1634. * netif_stop_subqueue - stop sending packets on subqueue
  1635. * @dev: network device
  1636. * @queue_index: sub queue index
  1637. *
  1638. * Stop individual transmit queue of a device with multiple transmit queues.
  1639. */
  1640. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  1641. {
  1642. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1643. #ifdef CONFIG_NETPOLL_TRAP
  1644. if (netpoll_trap())
  1645. return;
  1646. #endif
  1647. netif_tx_stop_queue(txq);
  1648. }
  1649. /**
  1650. * netif_subqueue_stopped - test status of subqueue
  1651. * @dev: network device
  1652. * @queue_index: sub queue index
  1653. *
  1654. * Check individual transmit queue of a device with multiple transmit queues.
  1655. */
  1656. static inline int __netif_subqueue_stopped(const struct net_device *dev,
  1657. u16 queue_index)
  1658. {
  1659. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1660. return netif_tx_queue_stopped(txq);
  1661. }
  1662. static inline int netif_subqueue_stopped(const struct net_device *dev,
  1663. struct sk_buff *skb)
  1664. {
  1665. return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
  1666. }
  1667. /**
  1668. * netif_wake_subqueue - allow sending packets on subqueue
  1669. * @dev: network device
  1670. * @queue_index: sub queue index
  1671. *
  1672. * Resume individual transmit queue of a device with multiple transmit queues.
  1673. */
  1674. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  1675. {
  1676. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1677. #ifdef CONFIG_NETPOLL_TRAP
  1678. if (netpoll_trap())
  1679. return;
  1680. #endif
  1681. if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
  1682. __netif_schedule(txq->qdisc);
  1683. }
  1684. /*
  1685. * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
  1686. * as a distribution range limit for the returned value.
  1687. */
  1688. static inline u16 skb_tx_hash(const struct net_device *dev,
  1689. const struct sk_buff *skb)
  1690. {
  1691. return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
  1692. }
  1693. /**
  1694. * netif_is_multiqueue - test if device has multiple transmit queues
  1695. * @dev: network device
  1696. *
  1697. * Check if device has multiple transmit queues
  1698. */
  1699. static inline int netif_is_multiqueue(const struct net_device *dev)
  1700. {
  1701. return dev->num_tx_queues > 1;
  1702. }
  1703. extern int netif_set_real_num_tx_queues(struct net_device *dev,
  1704. unsigned int txq);
  1705. #ifdef CONFIG_RPS
  1706. extern int netif_set_real_num_rx_queues(struct net_device *dev,
  1707. unsigned int rxq);
  1708. #else
  1709. static inline int netif_set_real_num_rx_queues(struct net_device *dev,
  1710. unsigned int rxq)
  1711. {
  1712. return 0;
  1713. }
  1714. #endif
  1715. static inline int netif_copy_real_num_queues(struct net_device *to_dev,
  1716. const struct net_device *from_dev)
  1717. {
  1718. netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues);
  1719. #ifdef CONFIG_RPS
  1720. return netif_set_real_num_rx_queues(to_dev,
  1721. from_dev->real_num_rx_queues);
  1722. #else
  1723. return 0;
  1724. #endif
  1725. }
  1726. /* Use this variant when it is known for sure that it
  1727. * is executing from hardware interrupt context or with hardware interrupts
  1728. * disabled.
  1729. */
  1730. extern void dev_kfree_skb_irq(struct sk_buff *skb);
  1731. /* Use this variant in places where it could be invoked
  1732. * from either hardware interrupt or other context, with hardware interrupts
  1733. * either disabled or enabled.
  1734. */
  1735. extern void dev_kfree_skb_any(struct sk_buff *skb);
  1736. extern int netif_rx(struct sk_buff *skb);
  1737. extern int netif_rx_ni(struct sk_buff *skb);
  1738. extern int netif_receive_skb(struct sk_buff *skb);
  1739. extern gro_result_t dev_gro_receive(struct napi_struct *napi,
  1740. struct sk_buff *skb);
  1741. extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
  1742. extern gro_result_t napi_gro_receive(struct napi_struct *napi,
  1743. struct sk_buff *skb);
  1744. extern void napi_gro_flush(struct napi_struct *napi);
  1745. extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
  1746. extern gro_result_t napi_frags_finish(struct napi_struct *napi,
  1747. struct sk_buff *skb,
  1748. gro_result_t ret);
  1749. extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
  1750. extern gro_result_t napi_gro_frags(struct napi_struct *napi);
  1751. static inline void napi_free_frags(struct napi_struct *napi)
  1752. {
  1753. kfree_skb(napi->skb);
  1754. napi->skb = NULL;
  1755. }
  1756. extern int netdev_rx_handler_register(struct net_device *dev,
  1757. rx_handler_func_t *rx_handler,
  1758. void *rx_handler_data);
  1759. extern void netdev_rx_handler_unregister(struct net_device *dev);
  1760. extern int dev_valid_name(const char *name);
  1761. extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  1762. extern int dev_ethtool(struct net *net, struct ifreq *);
  1763. extern unsigned dev_get_flags(const struct net_device *);
  1764. extern int __dev_change_flags(struct net_device *, unsigned int flags);
  1765. extern int dev_change_flags(struct net_device *, unsigned);
  1766. extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
  1767. extern int dev_change_name(struct net_device *, const char *);
  1768. extern int dev_set_alias(struct net_device *, const char *, size_t);
  1769. extern int dev_change_net_namespace(struct net_device *,
  1770. struct net *, const char *);
  1771. extern int dev_set_mtu(struct net_device *, int);
  1772. extern void dev_set_group(struct net_device *, int);
  1773. extern int dev_set_mac_address(struct net_device *,
  1774. struct sockaddr *);
  1775. extern int dev_hard_start_xmit(struct sk_buff *skb,
  1776. struct net_device *dev,
  1777. struct netdev_queue *txq);
  1778. extern int dev_forward_skb(struct net_device *dev,
  1779. struct sk_buff *skb);
  1780. extern int netdev_budget;
  1781. /* Called by rtnetlink.c:rtnl_unlock() */
  1782. extern void netdev_run_todo(void);
  1783. /**
  1784. * dev_put - release reference to device
  1785. * @dev: network device
  1786. *
  1787. * Release reference to device to allow it to be freed.
  1788. */
  1789. static inline void dev_put(struct net_device *dev)
  1790. {
  1791. irqsafe_cpu_dec(*dev->pcpu_refcnt);
  1792. }
  1793. /**
  1794. * dev_hold - get reference to device
  1795. * @dev: network device
  1796. *
  1797. * Hold reference to device to keep it from being freed.
  1798. */
  1799. static inline void dev_hold(struct net_device *dev)
  1800. {
  1801. irqsafe_cpu_inc(*dev->pcpu_refcnt);
  1802. }
  1803. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  1804. * and _off may be called from IRQ context, but it is caller
  1805. * who is responsible for serialization of these calls.
  1806. *
  1807. * The name carrier is inappropriate, these functions should really be
  1808. * called netif_lowerlayer_*() because they represent the state of any
  1809. * kind of lower layer not just hardware media.
  1810. */
  1811. extern void linkwatch_fire_event(struct net_device *dev);
  1812. extern void linkwatch_forget_dev(struct net_device *dev);
  1813. /**
  1814. * netif_carrier_ok - test if carrier present
  1815. * @dev: network device
  1816. *
  1817. * Check if carrier is present on device
  1818. */
  1819. static inline int netif_carrier_ok(const struct net_device *dev)
  1820. {
  1821. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  1822. }
  1823. extern unsigned long dev_trans_start(struct net_device *dev);
  1824. extern void __netdev_watchdog_up(struct net_device *dev);
  1825. extern void netif_carrier_on(struct net_device *dev);
  1826. extern void netif_carrier_off(struct net_device *dev);
  1827. extern void netif_notify_peers(struct net_device *dev);
  1828. /**
  1829. * netif_dormant_on - mark device as dormant.
  1830. * @dev: network device
  1831. *
  1832. * Mark device as dormant (as per RFC2863).
  1833. *
  1834. * The dormant state indicates that the relevant interface is not
  1835. * actually in a condition to pass packets (i.e., it is not 'up') but is
  1836. * in a "pending" state, waiting for some external event. For "on-
  1837. * demand" interfaces, this new state identifies the situation where the
  1838. * interface is waiting for events to place it in the up state.
  1839. *
  1840. */
  1841. static inline void netif_dormant_on(struct net_device *dev)
  1842. {
  1843. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  1844. linkwatch_fire_event(dev);
  1845. }
  1846. /**
  1847. * netif_dormant_off - set device as not dormant.
  1848. * @dev: network device
  1849. *
  1850. * Device is not in dormant state.
  1851. */
  1852. static inline void netif_dormant_off(struct net_device *dev)
  1853. {
  1854. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  1855. linkwatch_fire_event(dev);
  1856. }
  1857. /**
  1858. * netif_dormant - test if carrier present
  1859. * @dev: network device
  1860. *
  1861. * Check if carrier is present on device
  1862. */
  1863. static inline int netif_dormant(const struct net_device *dev)
  1864. {
  1865. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  1866. }
  1867. /**
  1868. * netif_oper_up - test if device is operational
  1869. * @dev: network device
  1870. *
  1871. * Check if carrier is operational
  1872. */
  1873. static inline int netif_oper_up(const struct net_device *dev)
  1874. {
  1875. return (dev->operstate == IF_OPER_UP ||
  1876. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  1877. }
  1878. /**
  1879. * netif_device_present - is device available or removed
  1880. * @dev: network device
  1881. *
  1882. * Check if device has not been removed from system.
  1883. */
  1884. static inline int netif_device_present(struct net_device *dev)
  1885. {
  1886. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  1887. }
  1888. extern void netif_device_detach(struct net_device *dev);
  1889. extern void netif_device_attach(struct net_device *dev);
  1890. /*
  1891. * Network interface message level settings
  1892. */
  1893. enum {
  1894. NETIF_MSG_DRV = 0x0001,
  1895. NETIF_MSG_PROBE = 0x0002,
  1896. NETIF_MSG_LINK = 0x0004,
  1897. NETIF_MSG_TIMER = 0x0008,
  1898. NETIF_MSG_IFDOWN = 0x0010,
  1899. NETIF_MSG_IFUP = 0x0020,
  1900. NETIF_MSG_RX_ERR = 0x0040,
  1901. NETIF_MSG_TX_ERR = 0x0080,
  1902. NETIF_MSG_TX_QUEUED = 0x0100,
  1903. NETIF_MSG_INTR = 0x0200,
  1904. NETIF_MSG_TX_DONE = 0x0400,
  1905. NETIF_MSG_RX_STATUS = 0x0800,
  1906. NETIF_MSG_PKTDATA = 0x1000,
  1907. NETIF_MSG_HW = 0x2000,
  1908. NETIF_MSG_WOL = 0x4000,
  1909. };
  1910. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  1911. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  1912. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  1913. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  1914. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  1915. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  1916. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  1917. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  1918. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  1919. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  1920. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  1921. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  1922. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  1923. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  1924. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  1925. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  1926. {
  1927. /* use default */
  1928. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  1929. return default_msg_enable_bits;
  1930. if (debug_value == 0) /* no output */
  1931. return 0;
  1932. /* set low N bits */
  1933. return (1 << debug_value) - 1;
  1934. }
  1935. static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
  1936. {
  1937. spin_lock(&txq->_xmit_lock);
  1938. txq->xmit_lock_owner = cpu;
  1939. }
  1940. static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
  1941. {
  1942. spin_lock_bh(&txq->_xmit_lock);
  1943. txq->xmit_lock_owner = smp_processor_id();
  1944. }
  1945. static inline int __netif_tx_trylock(struct netdev_queue *txq)
  1946. {
  1947. int ok = spin_trylock(&txq->_xmit_lock);
  1948. if (likely(ok))
  1949. txq->xmit_lock_owner = smp_processor_id();
  1950. return ok;
  1951. }
  1952. static inline void __netif_tx_unlock(struct netdev_queue *txq)
  1953. {
  1954. txq->xmit_lock_owner = -1;
  1955. spin_unlock(&txq->_xmit_lock);
  1956. }
  1957. static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
  1958. {
  1959. txq->xmit_lock_owner = -1;
  1960. spin_unlock_bh(&txq->_xmit_lock);
  1961. }
  1962. static inline void txq_trans_update(struct netdev_queue *txq)
  1963. {
  1964. if (txq->xmit_lock_owner != -1)
  1965. txq->trans_start = jiffies;
  1966. }
  1967. /**
  1968. * netif_tx_lock - grab network device transmit lock
  1969. * @dev: network device
  1970. *
  1971. * Get network device transmit lock
  1972. */
  1973. static inline void netif_tx_lock(struct net_device *dev)
  1974. {
  1975. unsigned int i;
  1976. int cpu;
  1977. spin_lock(&dev->tx_global_lock);
  1978. cpu = smp_processor_id();
  1979. for (i = 0; i < dev->num_tx_queues; i++) {
  1980. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1981. /* We are the only thread of execution doing a
  1982. * freeze, but we have to grab the _xmit_lock in
  1983. * order to synchronize with threads which are in
  1984. * the ->hard_start_xmit() handler and already
  1985. * checked the frozen bit.
  1986. */
  1987. __netif_tx_lock(txq, cpu);
  1988. set_bit(__QUEUE_STATE_FROZEN, &txq->state);
  1989. __netif_tx_unlock(txq);
  1990. }
  1991. }
  1992. static inline void netif_tx_lock_bh(struct net_device *dev)
  1993. {
  1994. local_bh_disable();
  1995. netif_tx_lock(dev);
  1996. }
  1997. static inline void netif_tx_unlock(struct net_device *dev)
  1998. {
  1999. unsigned int i;
  2000. for (i = 0; i < dev->num_tx_queues; i++) {
  2001. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2002. /* No need to grab the _xmit_lock here. If the
  2003. * queue is not stopped for another reason, we
  2004. * force a schedule.
  2005. */
  2006. clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
  2007. netif_schedule_queue(txq);
  2008. }
  2009. spin_unlock(&dev->tx_global_lock);
  2010. }
  2011. static inline void netif_tx_unlock_bh(struct net_device *dev)
  2012. {
  2013. netif_tx_unlock(dev);
  2014. local_bh_enable();
  2015. }
  2016. #define HARD_TX_LOCK(dev, txq, cpu) { \
  2017. if ((dev->features & NETIF_F_LLTX) == 0) { \
  2018. __netif_tx_lock(txq, cpu); \
  2019. } \
  2020. }
  2021. #define HARD_TX_UNLOCK(dev, txq) { \
  2022. if ((dev->features & NETIF_F_LLTX) == 0) { \
  2023. __netif_tx_unlock(txq); \
  2024. } \
  2025. }
  2026. static inline void netif_tx_disable(struct net_device *dev)
  2027. {
  2028. unsigned int i;
  2029. int cpu;
  2030. local_bh_disable();
  2031. cpu = smp_processor_id();
  2032. for (i = 0; i < dev->num_tx_queues; i++) {
  2033. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2034. __netif_tx_lock(txq, cpu);
  2035. netif_tx_stop_queue(txq);
  2036. __netif_tx_unlock(txq);
  2037. }
  2038. local_bh_enable();
  2039. }
  2040. static inline void netif_addr_lock(struct net_device *dev)
  2041. {
  2042. spin_lock(&dev->addr_list_lock);
  2043. }
  2044. static inline void netif_addr_lock_bh(struct net_device *dev)
  2045. {
  2046. spin_lock_bh(&dev->addr_list_lock);
  2047. }
  2048. static inline void netif_addr_unlock(struct net_device *dev)
  2049. {
  2050. spin_unlock(&dev->addr_list_lock);
  2051. }
  2052. static inline void netif_addr_unlock_bh(struct net_device *dev)
  2053. {
  2054. spin_unlock_bh(&dev->addr_list_lock);
  2055. }
  2056. /*
  2057. * dev_addrs walker. Should be used only for read access. Call with
  2058. * rcu_read_lock held.
  2059. */
  2060. #define for_each_dev_addr(dev, ha) \
  2061. list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
  2062. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  2063. extern void ether_setup(struct net_device *dev);
  2064. /* Support for loadable net-drivers */
  2065. extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
  2066. void (*setup)(struct net_device *),
  2067. unsigned int txqs, unsigned int rxqs);
  2068. #define alloc_netdev(sizeof_priv, name, setup) \
  2069. alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
  2070. #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
  2071. alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
  2072. extern int register_netdev(struct net_device *dev);
  2073. extern void unregister_netdev(struct net_device *dev);
  2074. /* General hardware address lists handling functions */
  2075. extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
  2076. struct netdev_hw_addr_list *from_list,
  2077. int addr_len, unsigned char addr_type);
  2078. extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
  2079. struct netdev_hw_addr_list *from_list,
  2080. int addr_len, unsigned char addr_type);
  2081. extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
  2082. struct netdev_hw_addr_list *from_list,
  2083. int addr_len);
  2084. extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
  2085. struct netdev_hw_addr_list *from_list,
  2086. int addr_len);
  2087. extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
  2088. extern void __hw_addr_init(struct netdev_hw_addr_list *list);
  2089. /* Functions used for device addresses handling */
  2090. extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
  2091. unsigned char addr_type);
  2092. extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
  2093. unsigned char addr_type);
  2094. extern int dev_addr_add_multiple(struct net_device *to_dev,
  2095. struct net_device *from_dev,
  2096. unsigned char addr_type);
  2097. extern int dev_addr_del_multiple(struct net_device *to_dev,
  2098. struct net_device *from_dev,
  2099. unsigned char addr_type);
  2100. extern void dev_addr_flush(struct net_device *dev);
  2101. extern int dev_addr_init(struct net_device *dev);
  2102. /* Functions used for unicast addresses handling */
  2103. extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
  2104. extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
  2105. extern int dev_uc_sync(struct net_device *to, struct net_device *from);
  2106. extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
  2107. extern void dev_uc_flush(struct net_device *dev);
  2108. extern void dev_uc_init(struct net_device *dev);
  2109. /* Functions used for multicast addresses handling */
  2110. extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
  2111. extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
  2112. extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
  2113. extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
  2114. extern int dev_mc_sync(struct net_device *to, struct net_device *from);
  2115. extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  2116. extern void dev_mc_flush(struct net_device *dev);
  2117. extern void dev_mc_init(struct net_device *dev);
  2118. /* Functions used for secondary unicast and multicast support */
  2119. extern void dev_set_rx_mode(struct net_device *dev);
  2120. extern void __dev_set_rx_mode(struct net_device *dev);
  2121. extern int dev_set_promiscuity(struct net_device *dev, int inc);
  2122. extern int dev_set_allmulti(struct net_device *dev, int inc);
  2123. extern void netdev_state_change(struct net_device *dev);
  2124. extern int netdev_bonding_change(struct net_device *dev,
  2125. unsigned long event);
  2126. extern void netdev_features_change(struct net_device *dev);
  2127. /* Load a device via the kmod */
  2128. extern void dev_load(struct net *net, const char *name);
  2129. extern void dev_mcast_init(void);
  2130. extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  2131. struct rtnl_link_stats64 *storage);
  2132. extern int netdev_max_backlog;
  2133. extern int netdev_tstamp_prequeue;
  2134. extern int weight_p;
  2135. extern int bpf_jit_enable;
  2136. extern int netdev_set_master(struct net_device *dev, struct net_device *master);
  2137. extern int netdev_set_bond_master(struct net_device *dev,
  2138. struct net_device *master);
  2139. extern int skb_checksum_help(struct sk_buff *skb);
  2140. extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
  2141. netdev_features_t features);
  2142. #ifdef CONFIG_BUG
  2143. extern void netdev_rx_csum_fault(struct net_device *dev);
  2144. #else
  2145. static inline void netdev_rx_csum_fault(struct net_device *dev)
  2146. {
  2147. }
  2148. #endif
  2149. /* rx skb timestamps */
  2150. extern void net_enable_timestamp(void);
  2151. extern void net_disable_timestamp(void);
  2152. #ifdef CONFIG_PROC_FS
  2153. extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  2154. extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  2155. extern void dev_seq_stop(struct seq_file *seq, void *v);
  2156. #endif
  2157. extern int netdev_class_create_file(struct class_attribute *class_attr);
  2158. extern void netdev_class_remove_file(struct class_attribute *class_attr);
  2159. extern struct kobj_ns_type_operations net_ns_type_operations;
  2160. extern const char *netdev_drivername(const struct net_device *dev);
  2161. extern void linkwatch_run_queue(void);
  2162. static inline netdev_features_t netdev_get_wanted_features(
  2163. struct net_device *dev)
  2164. {
  2165. return (dev->features & ~dev->hw_features) | dev->wanted_features;
  2166. }
  2167. netdev_features_t netdev_increment_features(netdev_features_t all,
  2168. netdev_features_t one, netdev_features_t mask);
  2169. int __netdev_update_features(struct net_device *dev);
  2170. void netdev_update_features(struct net_device *dev);
  2171. void netdev_change_features(struct net_device *dev);
  2172. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  2173. struct net_device *dev);
  2174. netdev_features_t netif_skb_features(struct sk_buff *skb);
  2175. static inline int net_gso_ok(netdev_features_t features, int gso_type)
  2176. {
  2177. netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
  2178. /* check flags correspondence */
  2179. BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
  2180. BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
  2181. BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
  2182. BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
  2183. BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
  2184. BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
  2185. return (features & feature) == feature;
  2186. }
  2187. static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
  2188. {
  2189. return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
  2190. (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
  2191. }
  2192. static inline int netif_needs_gso(struct sk_buff *skb,
  2193. netdev_features_t features)
  2194. {
  2195. return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
  2196. unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
  2197. }
  2198. static inline void netif_set_gso_max_size(struct net_device *dev,
  2199. unsigned int size)
  2200. {
  2201. dev->gso_max_size = size;
  2202. }
  2203. static inline int netif_is_bond_slave(struct net_device *dev)
  2204. {
  2205. return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
  2206. }
  2207. extern struct pernet_operations __net_initdata loopback_net_ops;
  2208. /* Logging, debugging and troubleshooting/diagnostic helpers. */
  2209. /* netdev_printk helpers, similar to dev_printk */
  2210. static inline const char *netdev_name(const struct net_device *dev)
  2211. {
  2212. if (dev->reg_state != NETREG_REGISTERED)
  2213. return "(unregistered net_device)";
  2214. return dev->name;
  2215. }
  2216. extern int __netdev_printk(const char *level, const struct net_device *dev,
  2217. struct va_format *vaf);
  2218. extern __printf(3, 4)
  2219. int netdev_printk(const char *level, const struct net_device *dev,
  2220. const char *format, ...);
  2221. extern __printf(2, 3)
  2222. int netdev_emerg(const struct net_device *dev, const char *format, ...);
  2223. extern __printf(2, 3)
  2224. int netdev_alert(const struct net_device *dev, const char *format, ...);
  2225. extern __printf(2, 3)
  2226. int netdev_crit(const struct net_device *dev, const char *format, ...);
  2227. extern __printf(2, 3)
  2228. int netdev_err(const struct net_device *dev, const char *format, ...);
  2229. extern __printf(2, 3)
  2230. int netdev_warn(const struct net_device *dev, const char *format, ...);
  2231. extern __printf(2, 3)
  2232. int netdev_notice(const struct net_device *dev, const char *format, ...);
  2233. extern __printf(2, 3)
  2234. int netdev_info(const struct net_device *dev, const char *format, ...);
  2235. #define MODULE_ALIAS_NETDEV(device) \
  2236. MODULE_ALIAS("netdev-" device)
  2237. #if defined(DEBUG)
  2238. #define netdev_dbg(__dev, format, args...) \
  2239. netdev_printk(KERN_DEBUG, __dev, format, ##args)
  2240. #elif defined(CONFIG_DYNAMIC_DEBUG)
  2241. #define netdev_dbg(__dev, format, args...) \
  2242. do { \
  2243. dynamic_netdev_dbg(__dev, format, ##args); \
  2244. } while (0)
  2245. #else
  2246. #define netdev_dbg(__dev, format, args...) \
  2247. ({ \
  2248. if (0) \
  2249. netdev_printk(KERN_DEBUG, __dev, format, ##args); \
  2250. 0; \
  2251. })
  2252. #endif
  2253. #if defined(VERBOSE_DEBUG)
  2254. #define netdev_vdbg netdev_dbg
  2255. #else
  2256. #define netdev_vdbg(dev, format, args...) \
  2257. ({ \
  2258. if (0) \
  2259. netdev_printk(KERN_DEBUG, dev, format, ##args); \
  2260. 0; \
  2261. })
  2262. #endif
  2263. /*
  2264. * netdev_WARN() acts like dev_printk(), but with the key difference
  2265. * of using a WARN/WARN_ON to get the message out, including the
  2266. * file/line information and a backtrace.
  2267. */
  2268. #define netdev_WARN(dev, format, args...) \
  2269. WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
  2270. /* netif printk helpers, similar to netdev_printk */
  2271. #define netif_printk(priv, type, level, dev, fmt, args...) \
  2272. do { \
  2273. if (netif_msg_##type(priv)) \
  2274. netdev_printk(level, (dev), fmt, ##args); \
  2275. } while (0)
  2276. #define netif_level(level, priv, type, dev, fmt, args...) \
  2277. do { \
  2278. if (netif_msg_##type(priv)) \
  2279. netdev_##level(dev, fmt, ##args); \
  2280. } while (0)
  2281. #define netif_emerg(priv, type, dev, fmt, args...) \
  2282. netif_level(emerg, priv, type, dev, fmt, ##args)
  2283. #define netif_alert(priv, type, dev, fmt, args...) \
  2284. netif_level(alert, priv, type, dev, fmt, ##args)
  2285. #define netif_crit(priv, type, dev, fmt, args...) \
  2286. netif_level(crit, priv, type, dev, fmt, ##args)
  2287. #define netif_err(priv, type, dev, fmt, args...) \
  2288. netif_level(err, priv, type, dev, fmt, ##args)
  2289. #define netif_warn(priv, type, dev, fmt, args...) \
  2290. netif_level(warn, priv, type, dev, fmt, ##args)
  2291. #define netif_notice(priv, type, dev, fmt, args...) \
  2292. netif_level(notice, priv, type, dev, fmt, ##args)
  2293. #define netif_info(priv, type, dev, fmt, args...) \
  2294. netif_level(info, priv, type, dev, fmt, ##args)
  2295. #if defined(DEBUG)
  2296. #define netif_dbg(priv, type, dev, format, args...) \
  2297. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
  2298. #elif defined(CONFIG_DYNAMIC_DEBUG)
  2299. #define netif_dbg(priv, type, netdev, format, args...) \
  2300. do { \
  2301. if (netif_msg_##type(priv)) \
  2302. dynamic_netdev_dbg(netdev, format, ##args); \
  2303. } while (0)
  2304. #else
  2305. #define netif_dbg(priv, type, dev, format, args...) \
  2306. ({ \
  2307. if (0) \
  2308. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
  2309. 0; \
  2310. })
  2311. #endif
  2312. #if defined(VERBOSE_DEBUG)
  2313. #define netif_vdbg netif_dbg
  2314. #else
  2315. #define netif_vdbg(priv, type, dev, format, args...) \
  2316. ({ \
  2317. if (0) \
  2318. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
  2319. 0; \
  2320. })
  2321. #endif
  2322. #endif /* __KERNEL__ */
  2323. #endif /* _LINUX_NETDEVICE_H */