netdevice.h 83 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <alan@lxorguk.ukuu.org.uk>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/if.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_packet.h>
  30. #include <linux/if_link.h>
  31. #ifdef __KERNEL__
  32. #include <linux/pm_qos_params.h>
  33. #include <linux/timer.h>
  34. #include <linux/delay.h>
  35. #include <asm/atomic.h>
  36. #include <asm/cache.h>
  37. #include <asm/byteorder.h>
  38. #include <linux/device.h>
  39. #include <linux/percpu.h>
  40. #include <linux/rculist.h>
  41. #include <linux/dmaengine.h>
  42. #include <linux/workqueue.h>
  43. #include <linux/ethtool.h>
  44. #include <net/net_namespace.h>
  45. #include <net/dsa.h>
  46. #ifdef CONFIG_DCB
  47. #include <net/dcbnl.h>
  48. #endif
  49. struct vlan_group;
  50. struct netpoll_info;
  51. struct phy_device;
  52. /* 802.11 specific */
  53. struct wireless_dev;
  54. /* source back-compat hooks */
  55. #define SET_ETHTOOL_OPS(netdev,ops) \
  56. ( (netdev)->ethtool_ops = (ops) )
  57. /* hardware address assignment types */
  58. #define NET_ADDR_PERM 0 /* address is permanent (default) */
  59. #define NET_ADDR_RANDOM 1 /* address is generated randomly */
  60. #define NET_ADDR_STOLEN 2 /* address is stolen from other device */
  61. /* Backlog congestion levels */
  62. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  63. #define NET_RX_DROP 1 /* packet dropped */
  64. /*
  65. * Transmit return codes: transmit return codes originate from three different
  66. * namespaces:
  67. *
  68. * - qdisc return codes
  69. * - driver transmit return codes
  70. * - errno values
  71. *
  72. * Drivers are allowed to return any one of those in their hard_start_xmit()
  73. * function. Real network devices commonly used with qdiscs should only return
  74. * the driver transmit return codes though - when qdiscs are used, the actual
  75. * transmission happens asynchronously, so the value is not propagated to
  76. * higher layers. Virtual network devices transmit synchronously, in this case
  77. * the driver transmit return codes are consumed by dev_queue_xmit(), all
  78. * others are propagated to higher layers.
  79. */
  80. /* qdisc ->enqueue() return codes. */
  81. #define NET_XMIT_SUCCESS 0x00
  82. #define NET_XMIT_DROP 0x01 /* skb dropped */
  83. #define NET_XMIT_CN 0x02 /* congestion notification */
  84. #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
  85. #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
  86. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  87. * indicates that the device will soon be dropping packets, or already drops
  88. * some packets of the same priority; prompting us to send less aggressively. */
  89. #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
  90. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  91. /* Driver transmit return codes */
  92. #define NETDEV_TX_MASK 0xf0
  93. enum netdev_tx {
  94. __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
  95. NETDEV_TX_OK = 0x00, /* driver took care of packet */
  96. NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
  97. NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
  98. };
  99. typedef enum netdev_tx netdev_tx_t;
  100. /*
  101. * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
  102. * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
  103. */
  104. static inline bool dev_xmit_complete(int rc)
  105. {
  106. /*
  107. * Positive cases with an skb consumed by a driver:
  108. * - successful transmission (rc == NETDEV_TX_OK)
  109. * - error while transmitting (rc < 0)
  110. * - error while queueing to a different device (rc & NET_XMIT_MASK)
  111. */
  112. if (likely(rc < NET_XMIT_MASK))
  113. return true;
  114. return false;
  115. }
  116. #endif
  117. #define MAX_ADDR_LEN 32 /* Largest hardware address length */
  118. /* Initial net device group. All devices belong to group 0 by default. */
  119. #define INIT_NETDEV_GROUP 0
  120. #ifdef __KERNEL__
  121. /*
  122. * Compute the worst case header length according to the protocols
  123. * used.
  124. */
  125. #if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
  126. # if defined(CONFIG_MAC80211_MESH)
  127. # define LL_MAX_HEADER 128
  128. # else
  129. # define LL_MAX_HEADER 96
  130. # endif
  131. #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
  132. # define LL_MAX_HEADER 48
  133. #else
  134. # define LL_MAX_HEADER 32
  135. #endif
  136. #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
  137. !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
  138. !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
  139. !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
  140. #define MAX_HEADER LL_MAX_HEADER
  141. #else
  142. #define MAX_HEADER (LL_MAX_HEADER + 48)
  143. #endif
  144. /*
  145. * Old network device statistics. Fields are native words
  146. * (unsigned long) so they can be read and written atomically.
  147. */
  148. struct net_device_stats {
  149. unsigned long rx_packets;
  150. unsigned long tx_packets;
  151. unsigned long rx_bytes;
  152. unsigned long tx_bytes;
  153. unsigned long rx_errors;
  154. unsigned long tx_errors;
  155. unsigned long rx_dropped;
  156. unsigned long tx_dropped;
  157. unsigned long multicast;
  158. unsigned long collisions;
  159. unsigned long rx_length_errors;
  160. unsigned long rx_over_errors;
  161. unsigned long rx_crc_errors;
  162. unsigned long rx_frame_errors;
  163. unsigned long rx_fifo_errors;
  164. unsigned long rx_missed_errors;
  165. unsigned long tx_aborted_errors;
  166. unsigned long tx_carrier_errors;
  167. unsigned long tx_fifo_errors;
  168. unsigned long tx_heartbeat_errors;
  169. unsigned long tx_window_errors;
  170. unsigned long rx_compressed;
  171. unsigned long tx_compressed;
  172. };
  173. #endif /* __KERNEL__ */
  174. /* Media selection options. */
  175. enum {
  176. IF_PORT_UNKNOWN = 0,
  177. IF_PORT_10BASE2,
  178. IF_PORT_10BASET,
  179. IF_PORT_AUI,
  180. IF_PORT_100BASET,
  181. IF_PORT_100BASETX,
  182. IF_PORT_100BASEFX
  183. };
  184. #ifdef __KERNEL__
  185. #include <linux/cache.h>
  186. #include <linux/skbuff.h>
  187. struct neighbour;
  188. struct neigh_parms;
  189. struct sk_buff;
  190. struct netdev_hw_addr {
  191. struct list_head list;
  192. unsigned char addr[MAX_ADDR_LEN];
  193. unsigned char type;
  194. #define NETDEV_HW_ADDR_T_LAN 1
  195. #define NETDEV_HW_ADDR_T_SAN 2
  196. #define NETDEV_HW_ADDR_T_SLAVE 3
  197. #define NETDEV_HW_ADDR_T_UNICAST 4
  198. #define NETDEV_HW_ADDR_T_MULTICAST 5
  199. bool synced;
  200. bool global_use;
  201. int refcount;
  202. struct rcu_head rcu_head;
  203. };
  204. struct netdev_hw_addr_list {
  205. struct list_head list;
  206. int count;
  207. };
  208. #define netdev_hw_addr_list_count(l) ((l)->count)
  209. #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
  210. #define netdev_hw_addr_list_for_each(ha, l) \
  211. list_for_each_entry(ha, &(l)->list, list)
  212. #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
  213. #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
  214. #define netdev_for_each_uc_addr(ha, dev) \
  215. netdev_hw_addr_list_for_each(ha, &(dev)->uc)
  216. #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
  217. #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
  218. #define netdev_for_each_mc_addr(ha, dev) \
  219. netdev_hw_addr_list_for_each(ha, &(dev)->mc)
  220. struct hh_cache {
  221. u16 hh_len;
  222. u16 __pad;
  223. seqlock_t hh_lock;
  224. /* cached hardware header; allow for machine alignment needs. */
  225. #define HH_DATA_MOD 16
  226. #define HH_DATA_OFF(__len) \
  227. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  228. #define HH_DATA_ALIGN(__len) \
  229. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  230. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  231. };
  232. /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  233. * Alternative is:
  234. * dev->hard_header_len ? (dev->hard_header_len +
  235. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  236. *
  237. * We could use other alignment values, but we must maintain the
  238. * relationship HH alignment <= LL alignment.
  239. *
  240. * LL_ALLOCATED_SPACE also takes into account the tailroom the device
  241. * may need.
  242. */
  243. #define LL_RESERVED_SPACE(dev) \
  244. ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  245. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  246. ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  247. #define LL_ALLOCATED_SPACE(dev) \
  248. ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  249. struct header_ops {
  250. int (*create) (struct sk_buff *skb, struct net_device *dev,
  251. unsigned short type, const void *daddr,
  252. const void *saddr, unsigned len);
  253. int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
  254. int (*rebuild)(struct sk_buff *skb);
  255. int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
  256. void (*cache_update)(struct hh_cache *hh,
  257. const struct net_device *dev,
  258. const unsigned char *haddr);
  259. };
  260. /* These flag bits are private to the generic network queueing
  261. * layer, they may not be explicitly referenced by any other
  262. * code.
  263. */
  264. enum netdev_state_t {
  265. __LINK_STATE_START,
  266. __LINK_STATE_PRESENT,
  267. __LINK_STATE_NOCARRIER,
  268. __LINK_STATE_LINKWATCH_PENDING,
  269. __LINK_STATE_DORMANT,
  270. };
  271. /*
  272. * This structure holds at boot time configured netdevice settings. They
  273. * are then used in the device probing.
  274. */
  275. struct netdev_boot_setup {
  276. char name[IFNAMSIZ];
  277. struct ifmap map;
  278. };
  279. #define NETDEV_BOOT_SETUP_MAX 8
  280. extern int __init netdev_boot_setup(char *str);
  281. /*
  282. * Structure for NAPI scheduling similar to tasklet but with weighting
  283. */
  284. struct napi_struct {
  285. /* The poll_list must only be managed by the entity which
  286. * changes the state of the NAPI_STATE_SCHED bit. This means
  287. * whoever atomically sets that bit can add this napi_struct
  288. * to the per-cpu poll_list, and whoever clears that bit
  289. * can remove from the list right before clearing the bit.
  290. */
  291. struct list_head poll_list;
  292. unsigned long state;
  293. int weight;
  294. int (*poll)(struct napi_struct *, int);
  295. #ifdef CONFIG_NETPOLL
  296. spinlock_t poll_lock;
  297. int poll_owner;
  298. #endif
  299. unsigned int gro_count;
  300. struct net_device *dev;
  301. struct list_head dev_list;
  302. struct sk_buff *gro_list;
  303. struct sk_buff *skb;
  304. };
  305. enum {
  306. NAPI_STATE_SCHED, /* Poll is scheduled */
  307. NAPI_STATE_DISABLE, /* Disable pending */
  308. NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
  309. };
  310. enum gro_result {
  311. GRO_MERGED,
  312. GRO_MERGED_FREE,
  313. GRO_HELD,
  314. GRO_NORMAL,
  315. GRO_DROP,
  316. };
  317. typedef enum gro_result gro_result_t;
  318. /*
  319. * enum rx_handler_result - Possible return values for rx_handlers.
  320. * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
  321. * further.
  322. * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
  323. * case skb->dev was changed by rx_handler.
  324. * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
  325. * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
  326. *
  327. * rx_handlers are functions called from inside __netif_receive_skb(), to do
  328. * special processing of the skb, prior to delivery to protocol handlers.
  329. *
  330. * Currently, a net_device can only have a single rx_handler registered. Trying
  331. * to register a second rx_handler will return -EBUSY.
  332. *
  333. * To register a rx_handler on a net_device, use netdev_rx_handler_register().
  334. * To unregister a rx_handler on a net_device, use
  335. * netdev_rx_handler_unregister().
  336. *
  337. * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
  338. * do with the skb.
  339. *
  340. * If the rx_handler consumed to skb in some way, it should return
  341. * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
  342. * the skb to be delivered in some other ways.
  343. *
  344. * If the rx_handler changed skb->dev, to divert the skb to another
  345. * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
  346. * new device will be called if it exists.
  347. *
  348. * If the rx_handler consider the skb should be ignored, it should return
  349. * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
  350. * are registred on exact device (ptype->dev == skb->dev).
  351. *
  352. * If the rx_handler didn't changed skb->dev, but want the skb to be normally
  353. * delivered, it should return RX_HANDLER_PASS.
  354. *
  355. * A device without a registered rx_handler will behave as if rx_handler
  356. * returned RX_HANDLER_PASS.
  357. */
  358. enum rx_handler_result {
  359. RX_HANDLER_CONSUMED,
  360. RX_HANDLER_ANOTHER,
  361. RX_HANDLER_EXACT,
  362. RX_HANDLER_PASS,
  363. };
  364. typedef enum rx_handler_result rx_handler_result_t;
  365. typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
  366. extern void __napi_schedule(struct napi_struct *n);
  367. static inline int napi_disable_pending(struct napi_struct *n)
  368. {
  369. return test_bit(NAPI_STATE_DISABLE, &n->state);
  370. }
  371. /**
  372. * napi_schedule_prep - check if napi can be scheduled
  373. * @n: napi context
  374. *
  375. * Test if NAPI routine is already running, and if not mark
  376. * it as running. This is used as a condition variable
  377. * insure only one NAPI poll instance runs. We also make
  378. * sure there is no pending NAPI disable.
  379. */
  380. static inline int napi_schedule_prep(struct napi_struct *n)
  381. {
  382. return !napi_disable_pending(n) &&
  383. !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  384. }
  385. /**
  386. * napi_schedule - schedule NAPI poll
  387. * @n: napi context
  388. *
  389. * Schedule NAPI poll routine to be called if it is not already
  390. * running.
  391. */
  392. static inline void napi_schedule(struct napi_struct *n)
  393. {
  394. if (napi_schedule_prep(n))
  395. __napi_schedule(n);
  396. }
  397. /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
  398. static inline int napi_reschedule(struct napi_struct *napi)
  399. {
  400. if (napi_schedule_prep(napi)) {
  401. __napi_schedule(napi);
  402. return 1;
  403. }
  404. return 0;
  405. }
  406. /**
  407. * napi_complete - NAPI processing complete
  408. * @n: napi context
  409. *
  410. * Mark NAPI processing as complete.
  411. */
  412. extern void __napi_complete(struct napi_struct *n);
  413. extern void napi_complete(struct napi_struct *n);
  414. /**
  415. * napi_disable - prevent NAPI from scheduling
  416. * @n: napi context
  417. *
  418. * Stop NAPI from being scheduled on this context.
  419. * Waits till any outstanding processing completes.
  420. */
  421. static inline void napi_disable(struct napi_struct *n)
  422. {
  423. set_bit(NAPI_STATE_DISABLE, &n->state);
  424. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  425. msleep(1);
  426. clear_bit(NAPI_STATE_DISABLE, &n->state);
  427. }
  428. /**
  429. * napi_enable - enable NAPI scheduling
  430. * @n: napi context
  431. *
  432. * Resume NAPI from being scheduled on this context.
  433. * Must be paired with napi_disable.
  434. */
  435. static inline void napi_enable(struct napi_struct *n)
  436. {
  437. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  438. smp_mb__before_clear_bit();
  439. clear_bit(NAPI_STATE_SCHED, &n->state);
  440. }
  441. #ifdef CONFIG_SMP
  442. /**
  443. * napi_synchronize - wait until NAPI is not running
  444. * @n: napi context
  445. *
  446. * Wait until NAPI is done being scheduled on this context.
  447. * Waits till any outstanding processing completes but
  448. * does not disable future activations.
  449. */
  450. static inline void napi_synchronize(const struct napi_struct *n)
  451. {
  452. while (test_bit(NAPI_STATE_SCHED, &n->state))
  453. msleep(1);
  454. }
  455. #else
  456. # define napi_synchronize(n) barrier()
  457. #endif
  458. enum netdev_queue_state_t {
  459. __QUEUE_STATE_XOFF,
  460. __QUEUE_STATE_FROZEN,
  461. #define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
  462. (1 << __QUEUE_STATE_FROZEN))
  463. };
  464. struct netdev_queue {
  465. /*
  466. * read mostly part
  467. */
  468. struct net_device *dev;
  469. struct Qdisc *qdisc;
  470. unsigned long state;
  471. struct Qdisc *qdisc_sleeping;
  472. #ifdef CONFIG_RPS
  473. struct kobject kobj;
  474. #endif
  475. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  476. int numa_node;
  477. #endif
  478. /*
  479. * write mostly part
  480. */
  481. spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
  482. int xmit_lock_owner;
  483. /*
  484. * please use this field instead of dev->trans_start
  485. */
  486. unsigned long trans_start;
  487. } ____cacheline_aligned_in_smp;
  488. static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
  489. {
  490. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  491. return q->numa_node;
  492. #else
  493. return NUMA_NO_NODE;
  494. #endif
  495. }
  496. static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
  497. {
  498. #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
  499. q->numa_node = node;
  500. #endif
  501. }
  502. #ifdef CONFIG_RPS
  503. /*
  504. * This structure holds an RPS map which can be of variable length. The
  505. * map is an array of CPUs.
  506. */
  507. struct rps_map {
  508. unsigned int len;
  509. struct rcu_head rcu;
  510. u16 cpus[0];
  511. };
  512. #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
  513. /*
  514. * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
  515. * tail pointer for that CPU's input queue at the time of last enqueue, and
  516. * a hardware filter index.
  517. */
  518. struct rps_dev_flow {
  519. u16 cpu;
  520. u16 filter;
  521. unsigned int last_qtail;
  522. };
  523. #define RPS_NO_FILTER 0xffff
  524. /*
  525. * The rps_dev_flow_table structure contains a table of flow mappings.
  526. */
  527. struct rps_dev_flow_table {
  528. unsigned int mask;
  529. struct rcu_head rcu;
  530. struct work_struct free_work;
  531. struct rps_dev_flow flows[0];
  532. };
  533. #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
  534. (_num * sizeof(struct rps_dev_flow)))
  535. /*
  536. * The rps_sock_flow_table contains mappings of flows to the last CPU
  537. * on which they were processed by the application (set in recvmsg).
  538. */
  539. struct rps_sock_flow_table {
  540. unsigned int mask;
  541. u16 ents[0];
  542. };
  543. #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
  544. (_num * sizeof(u16)))
  545. #define RPS_NO_CPU 0xffff
  546. static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
  547. u32 hash)
  548. {
  549. if (table && hash) {
  550. unsigned int cpu, index = hash & table->mask;
  551. /* We only give a hint, preemption can change cpu under us */
  552. cpu = raw_smp_processor_id();
  553. if (table->ents[index] != cpu)
  554. table->ents[index] = cpu;
  555. }
  556. }
  557. static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
  558. u32 hash)
  559. {
  560. if (table && hash)
  561. table->ents[hash & table->mask] = RPS_NO_CPU;
  562. }
  563. extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
  564. #ifdef CONFIG_RFS_ACCEL
  565. extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
  566. u32 flow_id, u16 filter_id);
  567. #endif
  568. /* This structure contains an instance of an RX queue. */
  569. struct netdev_rx_queue {
  570. struct rps_map __rcu *rps_map;
  571. struct rps_dev_flow_table __rcu *rps_flow_table;
  572. struct kobject kobj;
  573. struct net_device *dev;
  574. } ____cacheline_aligned_in_smp;
  575. #endif /* CONFIG_RPS */
  576. #ifdef CONFIG_XPS
  577. /*
  578. * This structure holds an XPS map which can be of variable length. The
  579. * map is an array of queues.
  580. */
  581. struct xps_map {
  582. unsigned int len;
  583. unsigned int alloc_len;
  584. struct rcu_head rcu;
  585. u16 queues[0];
  586. };
  587. #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
  588. #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
  589. / sizeof(u16))
  590. /*
  591. * This structure holds all XPS maps for device. Maps are indexed by CPU.
  592. */
  593. struct xps_dev_maps {
  594. struct rcu_head rcu;
  595. struct xps_map __rcu *cpu_map[0];
  596. };
  597. #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
  598. (nr_cpu_ids * sizeof(struct xps_map *)))
  599. #endif /* CONFIG_XPS */
  600. #define TC_MAX_QUEUE 16
  601. #define TC_BITMASK 15
  602. /* HW offloaded queuing disciplines txq count and offset maps */
  603. struct netdev_tc_txq {
  604. u16 count;
  605. u16 offset;
  606. };
  607. /*
  608. * This structure defines the management hooks for network devices.
  609. * The following hooks can be defined; unless noted otherwise, they are
  610. * optional and can be filled with a null pointer.
  611. *
  612. * int (*ndo_init)(struct net_device *dev);
  613. * This function is called once when network device is registered.
  614. * The network device can use this to any late stage initializaton
  615. * or semantic validattion. It can fail with an error code which will
  616. * be propogated back to register_netdev
  617. *
  618. * void (*ndo_uninit)(struct net_device *dev);
  619. * This function is called when device is unregistered or when registration
  620. * fails. It is not called if init fails.
  621. *
  622. * int (*ndo_open)(struct net_device *dev);
  623. * This function is called when network device transistions to the up
  624. * state.
  625. *
  626. * int (*ndo_stop)(struct net_device *dev);
  627. * This function is called when network device transistions to the down
  628. * state.
  629. *
  630. * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
  631. * struct net_device *dev);
  632. * Called when a packet needs to be transmitted.
  633. * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
  634. * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
  635. * Required can not be NULL.
  636. *
  637. * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
  638. * Called to decide which queue to when device supports multiple
  639. * transmit queues.
  640. *
  641. * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  642. * This function is called to allow device receiver to make
  643. * changes to configuration when multicast or promiscious is enabled.
  644. *
  645. * void (*ndo_set_rx_mode)(struct net_device *dev);
  646. * This function is called device changes address list filtering.
  647. *
  648. * void (*ndo_set_multicast_list)(struct net_device *dev);
  649. * This function is called when the multicast address list changes.
  650. *
  651. * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
  652. * This function is called when the Media Access Control address
  653. * needs to be changed. If this interface is not defined, the
  654. * mac address can not be changed.
  655. *
  656. * int (*ndo_validate_addr)(struct net_device *dev);
  657. * Test if Media Access Control address is valid for the device.
  658. *
  659. * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
  660. * Called when a user request an ioctl which can't be handled by
  661. * the generic interface code. If not defined ioctl's return
  662. * not supported error code.
  663. *
  664. * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
  665. * Used to set network devices bus interface parameters. This interface
  666. * is retained for legacy reason, new devices should use the bus
  667. * interface (PCI) for low level management.
  668. *
  669. * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  670. * Called when a user wants to change the Maximum Transfer Unit
  671. * of a device. If not defined, any request to change MTU will
  672. * will return an error.
  673. *
  674. * void (*ndo_tx_timeout)(struct net_device *dev);
  675. * Callback uses when the transmitter has not made any progress
  676. * for dev->watchdog ticks.
  677. *
  678. * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  679. * struct rtnl_link_stats64 *storage);
  680. * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  681. * Called when a user wants to get the network device usage
  682. * statistics. Drivers must do one of the following:
  683. * 1. Define @ndo_get_stats64 to fill in a zero-initialised
  684. * rtnl_link_stats64 structure passed by the caller.
  685. * 2. Define @ndo_get_stats to update a net_device_stats structure
  686. * (which should normally be dev->stats) and return a pointer to
  687. * it. The structure may be changed asynchronously only if each
  688. * field is written atomically.
  689. * 3. Update dev->stats asynchronously and atomically, and define
  690. * neither operation.
  691. *
  692. * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
  693. * If device support VLAN receive acceleration
  694. * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
  695. * when vlan groups for the device changes. Note: grp is NULL
  696. * if no vlan's groups are being used.
  697. *
  698. * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
  699. * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  700. * this function is called when a VLAN id is registered.
  701. *
  702. * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
  703. * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  704. * this function is called when a VLAN id is unregistered.
  705. *
  706. * void (*ndo_poll_controller)(struct net_device *dev);
  707. *
  708. * SR-IOV management functions.
  709. * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
  710. * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
  711. * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
  712. * int (*ndo_get_vf_config)(struct net_device *dev,
  713. * int vf, struct ifla_vf_info *ivf);
  714. * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  715. * struct nlattr *port[]);
  716. * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
  717. * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
  718. * Called to setup 'tc' number of traffic classes in the net device. This
  719. * is always called from the stack with the rtnl lock held and netif tx
  720. * queues stopped. This allows the netdevice to perform queue management
  721. * safely.
  722. *
  723. * Fiber Channel over Ethernet (FCoE) offload functions.
  724. * int (*ndo_fcoe_enable)(struct net_device *dev);
  725. * Called when the FCoE protocol stack wants to start using LLD for FCoE
  726. * so the underlying device can perform whatever needed configuration or
  727. * initialization to support acceleration of FCoE traffic.
  728. *
  729. * int (*ndo_fcoe_disable)(struct net_device *dev);
  730. * Called when the FCoE protocol stack wants to stop using LLD for FCoE
  731. * so the underlying device can perform whatever needed clean-ups to
  732. * stop supporting acceleration of FCoE traffic.
  733. *
  734. * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
  735. * struct scatterlist *sgl, unsigned int sgc);
  736. * Called when the FCoE Initiator wants to initialize an I/O that
  737. * is a possible candidate for Direct Data Placement (DDP). The LLD can
  738. * perform necessary setup and returns 1 to indicate the device is set up
  739. * successfully to perform DDP on this I/O, otherwise this returns 0.
  740. *
  741. * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
  742. * Called when the FCoE Initiator/Target is done with the DDPed I/O as
  743. * indicated by the FC exchange id 'xid', so the underlying device can
  744. * clean up and reuse resources for later DDP requests.
  745. *
  746. * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
  747. * struct scatterlist *sgl, unsigned int sgc);
  748. * Called when the FCoE Target wants to initialize an I/O that
  749. * is a possible candidate for Direct Data Placement (DDP). The LLD can
  750. * perform necessary setup and returns 1 to indicate the device is set up
  751. * successfully to perform DDP on this I/O, otherwise this returns 0.
  752. *
  753. * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
  754. * Called when the underlying device wants to override default World Wide
  755. * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
  756. * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
  757. * protocol stack to use.
  758. *
  759. * RFS acceleration.
  760. * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
  761. * u16 rxq_index, u32 flow_id);
  762. * Set hardware filter for RFS. rxq_index is the target queue index;
  763. * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
  764. * Return the filter ID on success, or a negative error code.
  765. *
  766. * Slave management functions (for bridge, bonding, etc). User should
  767. * call netdev_set_master() to set dev->master properly.
  768. * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
  769. * Called to make another netdev an underling.
  770. *
  771. * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
  772. * Called to release previously enslaved netdev.
  773. *
  774. * Feature/offload setting functions.
  775. * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
  776. * Adjusts the requested feature flags according to device-specific
  777. * constraints, and returns the resulting flags. Must not modify
  778. * the device state.
  779. *
  780. * int (*ndo_set_features)(struct net_device *dev, u32 features);
  781. * Called to update device configuration to new features. Passed
  782. * feature set might be less than what was returned by ndo_fix_features()).
  783. * Must return >0 or -errno if it changed dev->features itself.
  784. *
  785. */
  786. struct net_device_ops {
  787. int (*ndo_init)(struct net_device *dev);
  788. void (*ndo_uninit)(struct net_device *dev);
  789. int (*ndo_open)(struct net_device *dev);
  790. int (*ndo_stop)(struct net_device *dev);
  791. netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
  792. struct net_device *dev);
  793. u16 (*ndo_select_queue)(struct net_device *dev,
  794. struct sk_buff *skb);
  795. void (*ndo_change_rx_flags)(struct net_device *dev,
  796. int flags);
  797. void (*ndo_set_rx_mode)(struct net_device *dev);
  798. void (*ndo_set_multicast_list)(struct net_device *dev);
  799. int (*ndo_set_mac_address)(struct net_device *dev,
  800. void *addr);
  801. int (*ndo_validate_addr)(struct net_device *dev);
  802. int (*ndo_do_ioctl)(struct net_device *dev,
  803. struct ifreq *ifr, int cmd);
  804. int (*ndo_set_config)(struct net_device *dev,
  805. struct ifmap *map);
  806. int (*ndo_change_mtu)(struct net_device *dev,
  807. int new_mtu);
  808. int (*ndo_neigh_setup)(struct net_device *dev,
  809. struct neigh_parms *);
  810. void (*ndo_tx_timeout) (struct net_device *dev);
  811. struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
  812. struct rtnl_link_stats64 *storage);
  813. struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  814. void (*ndo_vlan_rx_register)(struct net_device *dev,
  815. struct vlan_group *grp);
  816. void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
  817. unsigned short vid);
  818. void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
  819. unsigned short vid);
  820. #ifdef CONFIG_NET_POLL_CONTROLLER
  821. void (*ndo_poll_controller)(struct net_device *dev);
  822. int (*ndo_netpoll_setup)(struct net_device *dev,
  823. struct netpoll_info *info);
  824. void (*ndo_netpoll_cleanup)(struct net_device *dev);
  825. #endif
  826. int (*ndo_set_vf_mac)(struct net_device *dev,
  827. int queue, u8 *mac);
  828. int (*ndo_set_vf_vlan)(struct net_device *dev,
  829. int queue, u16 vlan, u8 qos);
  830. int (*ndo_set_vf_tx_rate)(struct net_device *dev,
  831. int vf, int rate);
  832. int (*ndo_get_vf_config)(struct net_device *dev,
  833. int vf,
  834. struct ifla_vf_info *ivf);
  835. int (*ndo_set_vf_port)(struct net_device *dev,
  836. int vf,
  837. struct nlattr *port[]);
  838. int (*ndo_get_vf_port)(struct net_device *dev,
  839. int vf, struct sk_buff *skb);
  840. int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
  841. #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
  842. int (*ndo_fcoe_enable)(struct net_device *dev);
  843. int (*ndo_fcoe_disable)(struct net_device *dev);
  844. int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
  845. u16 xid,
  846. struct scatterlist *sgl,
  847. unsigned int sgc);
  848. int (*ndo_fcoe_ddp_done)(struct net_device *dev,
  849. u16 xid);
  850. int (*ndo_fcoe_ddp_target)(struct net_device *dev,
  851. u16 xid,
  852. struct scatterlist *sgl,
  853. unsigned int sgc);
  854. #define NETDEV_FCOE_WWNN 0
  855. #define NETDEV_FCOE_WWPN 1
  856. int (*ndo_fcoe_get_wwn)(struct net_device *dev,
  857. u64 *wwn, int type);
  858. #endif
  859. #ifdef CONFIG_RFS_ACCEL
  860. int (*ndo_rx_flow_steer)(struct net_device *dev,
  861. const struct sk_buff *skb,
  862. u16 rxq_index,
  863. u32 flow_id);
  864. #endif
  865. int (*ndo_add_slave)(struct net_device *dev,
  866. struct net_device *slave_dev);
  867. int (*ndo_del_slave)(struct net_device *dev,
  868. struct net_device *slave_dev);
  869. u32 (*ndo_fix_features)(struct net_device *dev,
  870. u32 features);
  871. int (*ndo_set_features)(struct net_device *dev,
  872. u32 features);
  873. };
  874. /*
  875. * The DEVICE structure.
  876. * Actually, this whole structure is a big mistake. It mixes I/O
  877. * data with strictly "high-level" data, and it has to know about
  878. * almost every data structure used in the INET module.
  879. *
  880. * FIXME: cleanup struct net_device such that network protocol info
  881. * moves out.
  882. */
  883. struct net_device {
  884. /*
  885. * This is the first field of the "visible" part of this structure
  886. * (i.e. as seen by users in the "Space.c" file). It is the name
  887. * of the interface.
  888. */
  889. char name[IFNAMSIZ];
  890. struct pm_qos_request_list pm_qos_req;
  891. /* device name hash chain */
  892. struct hlist_node name_hlist;
  893. /* snmp alias */
  894. char *ifalias;
  895. /*
  896. * I/O specific fields
  897. * FIXME: Merge these and struct ifmap into one
  898. */
  899. unsigned long mem_end; /* shared mem end */
  900. unsigned long mem_start; /* shared mem start */
  901. unsigned long base_addr; /* device I/O address */
  902. unsigned int irq; /* device IRQ number */
  903. /*
  904. * Some hardware also needs these fields, but they are not
  905. * part of the usual set specified in Space.c.
  906. */
  907. unsigned long state;
  908. struct list_head dev_list;
  909. struct list_head napi_list;
  910. struct list_head unreg_list;
  911. /* currently active device features */
  912. u32 features;
  913. /* user-changeable features */
  914. u32 hw_features;
  915. /* user-requested features */
  916. u32 wanted_features;
  917. /* mask of features inheritable by VLAN devices */
  918. u32 vlan_features;
  919. /* Net device feature bits; if you change something,
  920. * also update netdev_features_strings[] in ethtool.c */
  921. #define NETIF_F_SG 1 /* Scatter/gather IO. */
  922. #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
  923. #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
  924. #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
  925. #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
  926. #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
  927. #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
  928. #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
  929. #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
  930. #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
  931. #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
  932. #define NETIF_F_GSO 2048 /* Enable software GSO. */
  933. #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
  934. /* do not use LLTX in new drivers */
  935. #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
  936. #define NETIF_F_GRO 16384 /* Generic receive offload */
  937. #define NETIF_F_LRO 32768 /* large receive offload */
  938. /* the GSO_MASK reserves bits 16 through 23 */
  939. #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
  940. #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
  941. #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
  942. #define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
  943. #define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
  944. #define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */
  945. #define NETIF_F_NOCACHE_COPY (1 << 30) /* Use no-cache copyfromuser */
  946. #define NETIF_F_LOOPBACK (1 << 31) /* Enable loopback */
  947. /* Segmentation offload features */
  948. #define NETIF_F_GSO_SHIFT 16
  949. #define NETIF_F_GSO_MASK 0x00ff0000
  950. #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  951. #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
  952. #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  953. #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
  954. #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
  955. #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
  956. /* Features valid for ethtool to change */
  957. /* = all defined minus driver/device-class-related */
  958. #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
  959. NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
  960. #define NETIF_F_ETHTOOL_BITS (0xff3fffff & ~NETIF_F_NEVER_CHANGE)
  961. /* List of features with software fallbacks. */
  962. #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
  963. NETIF_F_TSO6 | NETIF_F_UFO)
  964. #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  965. #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
  966. #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
  967. #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
  968. #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
  969. #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
  970. NETIF_F_FSO)
  971. /*
  972. * If one device supports one of these features, then enable them
  973. * for all in netdev_increment_features.
  974. */
  975. #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
  976. NETIF_F_SG | NETIF_F_HIGHDMA | \
  977. NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
  978. /*
  979. * If one device doesn't support one of these features, then disable it
  980. * for all in netdev_increment_features.
  981. */
  982. #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
  983. /* changeable features with no special hardware requirements */
  984. #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
  985. /* Interface index. Unique device identifier */
  986. int ifindex;
  987. int iflink;
  988. struct net_device_stats stats;
  989. atomic_long_t rx_dropped; /* dropped packets by core network
  990. * Do not use this in drivers.
  991. */
  992. #ifdef CONFIG_WIRELESS_EXT
  993. /* List of functions to handle Wireless Extensions (instead of ioctl).
  994. * See <net/iw_handler.h> for details. Jean II */
  995. const struct iw_handler_def * wireless_handlers;
  996. /* Instance data managed by the core of Wireless Extensions. */
  997. struct iw_public_data * wireless_data;
  998. #endif
  999. /* Management operations */
  1000. const struct net_device_ops *netdev_ops;
  1001. const struct ethtool_ops *ethtool_ops;
  1002. /* Hardware header description */
  1003. const struct header_ops *header_ops;
  1004. unsigned int flags; /* interface flags (a la BSD) */
  1005. unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */
  1006. unsigned short gflags;
  1007. unsigned short padded; /* How much padding added by alloc_netdev() */
  1008. unsigned char operstate; /* RFC2863 operstate */
  1009. unsigned char link_mode; /* mapping policy to operstate */
  1010. unsigned char if_port; /* Selectable AUI, TP,..*/
  1011. unsigned char dma; /* DMA channel */
  1012. unsigned int mtu; /* interface MTU value */
  1013. unsigned short type; /* interface hardware type */
  1014. unsigned short hard_header_len; /* hardware hdr length */
  1015. /* extra head- and tailroom the hardware may need, but not in all cases
  1016. * can this be guaranteed, especially tailroom. Some cases also use
  1017. * LL_MAX_HEADER instead to allocate the skb.
  1018. */
  1019. unsigned short needed_headroom;
  1020. unsigned short needed_tailroom;
  1021. /* Interface address info. */
  1022. unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
  1023. unsigned char addr_assign_type; /* hw address assignment type */
  1024. unsigned char addr_len; /* hardware address length */
  1025. unsigned short dev_id; /* for shared network cards */
  1026. spinlock_t addr_list_lock;
  1027. struct netdev_hw_addr_list uc; /* Unicast mac addresses */
  1028. struct netdev_hw_addr_list mc; /* Multicast mac addresses */
  1029. int uc_promisc;
  1030. unsigned int promiscuity;
  1031. unsigned int allmulti;
  1032. /* Protocol specific pointers */
  1033. #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  1034. struct vlan_group __rcu *vlgrp; /* VLAN group */
  1035. #endif
  1036. #ifdef CONFIG_NET_DSA
  1037. void *dsa_ptr; /* dsa specific data */
  1038. #endif
  1039. void *atalk_ptr; /* AppleTalk link */
  1040. struct in_device __rcu *ip_ptr; /* IPv4 specific data */
  1041. struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
  1042. struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
  1043. void *ec_ptr; /* Econet specific data */
  1044. void *ax25_ptr; /* AX.25 specific data */
  1045. struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
  1046. assign before registering */
  1047. /*
  1048. * Cache lines mostly used on receive path (including eth_type_trans())
  1049. */
  1050. unsigned long last_rx; /* Time of last Rx
  1051. * This should not be set in
  1052. * drivers, unless really needed,
  1053. * because network stack (bonding)
  1054. * use it if/when necessary, to
  1055. * avoid dirtying this cache line.
  1056. */
  1057. struct net_device *master; /* Pointer to master device of a group,
  1058. * which this device is member of.
  1059. */
  1060. /* Interface address info used in eth_type_trans() */
  1061. unsigned char *dev_addr; /* hw address, (before bcast
  1062. because most packets are
  1063. unicast) */
  1064. struct netdev_hw_addr_list dev_addrs; /* list of device
  1065. hw addresses */
  1066. unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
  1067. #ifdef CONFIG_RPS
  1068. struct kset *queues_kset;
  1069. struct netdev_rx_queue *_rx;
  1070. /* Number of RX queues allocated at register_netdev() time */
  1071. unsigned int num_rx_queues;
  1072. /* Number of RX queues currently active in device */
  1073. unsigned int real_num_rx_queues;
  1074. #ifdef CONFIG_RFS_ACCEL
  1075. /* CPU reverse-mapping for RX completion interrupts, indexed
  1076. * by RX queue number. Assigned by driver. This must only be
  1077. * set if the ndo_rx_flow_steer operation is defined. */
  1078. struct cpu_rmap *rx_cpu_rmap;
  1079. #endif
  1080. #endif
  1081. rx_handler_func_t __rcu *rx_handler;
  1082. void __rcu *rx_handler_data;
  1083. struct netdev_queue __rcu *ingress_queue;
  1084. /*
  1085. * Cache lines mostly used on transmit path
  1086. */
  1087. struct netdev_queue *_tx ____cacheline_aligned_in_smp;
  1088. /* Number of TX queues allocated at alloc_netdev_mq() time */
  1089. unsigned int num_tx_queues;
  1090. /* Number of TX queues currently active in device */
  1091. unsigned int real_num_tx_queues;
  1092. /* root qdisc from userspace point of view */
  1093. struct Qdisc *qdisc;
  1094. unsigned long tx_queue_len; /* Max frames per queue allowed */
  1095. spinlock_t tx_global_lock;
  1096. #ifdef CONFIG_XPS
  1097. struct xps_dev_maps __rcu *xps_maps;
  1098. #endif
  1099. /* These may be needed for future network-power-down code. */
  1100. /*
  1101. * trans_start here is expensive for high speed devices on SMP,
  1102. * please use netdev_queue->trans_start instead.
  1103. */
  1104. unsigned long trans_start; /* Time (in jiffies) of last Tx */
  1105. int watchdog_timeo; /* used by dev_watchdog() */
  1106. struct timer_list watchdog_timer;
  1107. /* Number of references to this device */
  1108. int __percpu *pcpu_refcnt;
  1109. /* delayed register/unregister */
  1110. struct list_head todo_list;
  1111. /* device index hash chain */
  1112. struct hlist_node index_hlist;
  1113. struct list_head link_watch_list;
  1114. /* register/unregister state machine */
  1115. enum { NETREG_UNINITIALIZED=0,
  1116. NETREG_REGISTERED, /* completed register_netdevice */
  1117. NETREG_UNREGISTERING, /* called unregister_netdevice */
  1118. NETREG_UNREGISTERED, /* completed unregister todo */
  1119. NETREG_RELEASED, /* called free_netdev */
  1120. NETREG_DUMMY, /* dummy device for NAPI poll */
  1121. } reg_state:8;
  1122. bool dismantle; /* device is going do be freed */
  1123. enum {
  1124. RTNL_LINK_INITIALIZED,
  1125. RTNL_LINK_INITIALIZING,
  1126. } rtnl_link_state:16;
  1127. /* Called from unregister, can be used to call free_netdev */
  1128. void (*destructor)(struct net_device *dev);
  1129. #ifdef CONFIG_NETPOLL
  1130. struct netpoll_info *npinfo;
  1131. #endif
  1132. #ifdef CONFIG_NET_NS
  1133. /* Network namespace this network device is inside */
  1134. struct net *nd_net;
  1135. #endif
  1136. /* mid-layer private */
  1137. union {
  1138. void *ml_priv;
  1139. struct pcpu_lstats __percpu *lstats; /* loopback stats */
  1140. struct pcpu_tstats __percpu *tstats; /* tunnel stats */
  1141. struct pcpu_dstats __percpu *dstats; /* dummy stats */
  1142. };
  1143. /* GARP */
  1144. struct garp_port __rcu *garp_port;
  1145. /* class/net/name entry */
  1146. struct device dev;
  1147. /* space for optional device, statistics, and wireless sysfs groups */
  1148. const struct attribute_group *sysfs_groups[4];
  1149. /* rtnetlink link ops */
  1150. const struct rtnl_link_ops *rtnl_link_ops;
  1151. /* for setting kernel sock attribute on TCP connection setup */
  1152. #define GSO_MAX_SIZE 65536
  1153. unsigned int gso_max_size;
  1154. #ifdef CONFIG_DCB
  1155. /* Data Center Bridging netlink ops */
  1156. const struct dcbnl_rtnl_ops *dcbnl_ops;
  1157. #endif
  1158. u8 num_tc;
  1159. struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
  1160. u8 prio_tc_map[TC_BITMASK + 1];
  1161. #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
  1162. /* max exchange id for FCoE LRO by ddp */
  1163. unsigned int fcoe_ddp_xid;
  1164. #endif
  1165. /* phy device may attach itself for hardware timestamping */
  1166. struct phy_device *phydev;
  1167. /* group the device belongs to */
  1168. int group;
  1169. };
  1170. #define to_net_dev(d) container_of(d, struct net_device, dev)
  1171. #define NETDEV_ALIGN 32
  1172. static inline
  1173. int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
  1174. {
  1175. return dev->prio_tc_map[prio & TC_BITMASK];
  1176. }
  1177. static inline
  1178. int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
  1179. {
  1180. if (tc >= dev->num_tc)
  1181. return -EINVAL;
  1182. dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
  1183. return 0;
  1184. }
  1185. static inline
  1186. void netdev_reset_tc(struct net_device *dev)
  1187. {
  1188. dev->num_tc = 0;
  1189. memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
  1190. memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
  1191. }
  1192. static inline
  1193. int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
  1194. {
  1195. if (tc >= dev->num_tc)
  1196. return -EINVAL;
  1197. dev->tc_to_txq[tc].count = count;
  1198. dev->tc_to_txq[tc].offset = offset;
  1199. return 0;
  1200. }
  1201. static inline
  1202. int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
  1203. {
  1204. if (num_tc > TC_MAX_QUEUE)
  1205. return -EINVAL;
  1206. dev->num_tc = num_tc;
  1207. return 0;
  1208. }
  1209. static inline
  1210. int netdev_get_num_tc(struct net_device *dev)
  1211. {
  1212. return dev->num_tc;
  1213. }
  1214. static inline
  1215. struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
  1216. unsigned int index)
  1217. {
  1218. return &dev->_tx[index];
  1219. }
  1220. static inline void netdev_for_each_tx_queue(struct net_device *dev,
  1221. void (*f)(struct net_device *,
  1222. struct netdev_queue *,
  1223. void *),
  1224. void *arg)
  1225. {
  1226. unsigned int i;
  1227. for (i = 0; i < dev->num_tx_queues; i++)
  1228. f(dev, &dev->_tx[i], arg);
  1229. }
  1230. /*
  1231. * Net namespace inlines
  1232. */
  1233. static inline
  1234. struct net *dev_net(const struct net_device *dev)
  1235. {
  1236. return read_pnet(&dev->nd_net);
  1237. }
  1238. static inline
  1239. void dev_net_set(struct net_device *dev, struct net *net)
  1240. {
  1241. #ifdef CONFIG_NET_NS
  1242. release_net(dev->nd_net);
  1243. dev->nd_net = hold_net(net);
  1244. #endif
  1245. }
  1246. static inline bool netdev_uses_dsa_tags(struct net_device *dev)
  1247. {
  1248. #ifdef CONFIG_NET_DSA_TAG_DSA
  1249. if (dev->dsa_ptr != NULL)
  1250. return dsa_uses_dsa_tags(dev->dsa_ptr);
  1251. #endif
  1252. return 0;
  1253. }
  1254. #ifndef CONFIG_NET_NS
  1255. static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
  1256. {
  1257. skb->dev = dev;
  1258. }
  1259. #else /* CONFIG_NET_NS */
  1260. void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
  1261. #endif
  1262. static inline bool netdev_uses_trailer_tags(struct net_device *dev)
  1263. {
  1264. #ifdef CONFIG_NET_DSA_TAG_TRAILER
  1265. if (dev->dsa_ptr != NULL)
  1266. return dsa_uses_trailer_tags(dev->dsa_ptr);
  1267. #endif
  1268. return 0;
  1269. }
  1270. /**
  1271. * netdev_priv - access network device private data
  1272. * @dev: network device
  1273. *
  1274. * Get network device private data
  1275. */
  1276. static inline void *netdev_priv(const struct net_device *dev)
  1277. {
  1278. return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
  1279. }
  1280. /* Set the sysfs physical device reference for the network logical device
  1281. * if set prior to registration will cause a symlink during initialization.
  1282. */
  1283. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  1284. /* Set the sysfs device type for the network logical device to allow
  1285. * fin grained indentification of different network device types. For
  1286. * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
  1287. */
  1288. #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
  1289. /**
  1290. * netif_napi_add - initialize a napi context
  1291. * @dev: network device
  1292. * @napi: napi context
  1293. * @poll: polling function
  1294. * @weight: default weight
  1295. *
  1296. * netif_napi_add() must be used to initialize a napi context prior to calling
  1297. * *any* of the other napi related functions.
  1298. */
  1299. void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
  1300. int (*poll)(struct napi_struct *, int), int weight);
  1301. /**
  1302. * netif_napi_del - remove a napi context
  1303. * @napi: napi context
  1304. *
  1305. * netif_napi_del() removes a napi context from the network device napi list
  1306. */
  1307. void netif_napi_del(struct napi_struct *napi);
  1308. struct napi_gro_cb {
  1309. /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
  1310. void *frag0;
  1311. /* Length of frag0. */
  1312. unsigned int frag0_len;
  1313. /* This indicates where we are processing relative to skb->data. */
  1314. int data_offset;
  1315. /* This is non-zero if the packet may be of the same flow. */
  1316. int same_flow;
  1317. /* This is non-zero if the packet cannot be merged with the new skb. */
  1318. int flush;
  1319. /* Number of segments aggregated. */
  1320. int count;
  1321. /* Free the skb? */
  1322. int free;
  1323. };
  1324. #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
  1325. struct packet_type {
  1326. __be16 type; /* This is really htons(ether_type). */
  1327. struct net_device *dev; /* NULL is wildcarded here */
  1328. int (*func) (struct sk_buff *,
  1329. struct net_device *,
  1330. struct packet_type *,
  1331. struct net_device *);
  1332. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  1333. u32 features);
  1334. int (*gso_send_check)(struct sk_buff *skb);
  1335. struct sk_buff **(*gro_receive)(struct sk_buff **head,
  1336. struct sk_buff *skb);
  1337. int (*gro_complete)(struct sk_buff *skb);
  1338. void *af_packet_priv;
  1339. struct list_head list;
  1340. };
  1341. #include <linux/notifier.h>
  1342. extern rwlock_t dev_base_lock; /* Device list lock */
  1343. #define for_each_netdev(net, d) \
  1344. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  1345. #define for_each_netdev_reverse(net, d) \
  1346. list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
  1347. #define for_each_netdev_rcu(net, d) \
  1348. list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
  1349. #define for_each_netdev_safe(net, d, n) \
  1350. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  1351. #define for_each_netdev_continue(net, d) \
  1352. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  1353. #define for_each_netdev_continue_rcu(net, d) \
  1354. list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
  1355. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  1356. static inline struct net_device *next_net_device(struct net_device *dev)
  1357. {
  1358. struct list_head *lh;
  1359. struct net *net;
  1360. net = dev_net(dev);
  1361. lh = dev->dev_list.next;
  1362. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  1363. }
  1364. static inline struct net_device *next_net_device_rcu(struct net_device *dev)
  1365. {
  1366. struct list_head *lh;
  1367. struct net *net;
  1368. net = dev_net(dev);
  1369. lh = rcu_dereference(list_next_rcu(&dev->dev_list));
  1370. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  1371. }
  1372. static inline struct net_device *first_net_device(struct net *net)
  1373. {
  1374. return list_empty(&net->dev_base_head) ? NULL :
  1375. net_device_entry(net->dev_base_head.next);
  1376. }
  1377. static inline struct net_device *first_net_device_rcu(struct net *net)
  1378. {
  1379. struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
  1380. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  1381. }
  1382. extern int netdev_boot_setup_check(struct net_device *dev);
  1383. extern unsigned long netdev_boot_base(const char *prefix, int unit);
  1384. extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
  1385. const char *hwaddr);
  1386. extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  1387. extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  1388. extern void dev_add_pack(struct packet_type *pt);
  1389. extern void dev_remove_pack(struct packet_type *pt);
  1390. extern void __dev_remove_pack(struct packet_type *pt);
  1391. extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
  1392. unsigned short mask);
  1393. extern struct net_device *dev_get_by_name(struct net *net, const char *name);
  1394. extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
  1395. extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
  1396. extern int dev_alloc_name(struct net_device *dev, const char *name);
  1397. extern int dev_open(struct net_device *dev);
  1398. extern int dev_close(struct net_device *dev);
  1399. extern void dev_disable_lro(struct net_device *dev);
  1400. extern int dev_queue_xmit(struct sk_buff *skb);
  1401. extern int register_netdevice(struct net_device *dev);
  1402. extern void unregister_netdevice_queue(struct net_device *dev,
  1403. struct list_head *head);
  1404. extern void unregister_netdevice_many(struct list_head *head);
  1405. static inline void unregister_netdevice(struct net_device *dev)
  1406. {
  1407. unregister_netdevice_queue(dev, NULL);
  1408. }
  1409. extern int netdev_refcnt_read(const struct net_device *dev);
  1410. extern void free_netdev(struct net_device *dev);
  1411. extern void synchronize_net(void);
  1412. extern int register_netdevice_notifier(struct notifier_block *nb);
  1413. extern int unregister_netdevice_notifier(struct notifier_block *nb);
  1414. extern int init_dummy_netdev(struct net_device *dev);
  1415. extern void netdev_resync_ops(struct net_device *dev);
  1416. extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  1417. extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
  1418. extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  1419. extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
  1420. extern int dev_restart(struct net_device *dev);
  1421. #ifdef CONFIG_NETPOLL_TRAP
  1422. extern int netpoll_trap(void);
  1423. #endif
  1424. extern int skb_gro_receive(struct sk_buff **head,
  1425. struct sk_buff *skb);
  1426. extern void skb_gro_reset_offset(struct sk_buff *skb);
  1427. static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
  1428. {
  1429. return NAPI_GRO_CB(skb)->data_offset;
  1430. }
  1431. static inline unsigned int skb_gro_len(const struct sk_buff *skb)
  1432. {
  1433. return skb->len - NAPI_GRO_CB(skb)->data_offset;
  1434. }
  1435. static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
  1436. {
  1437. NAPI_GRO_CB(skb)->data_offset += len;
  1438. }
  1439. static inline void *skb_gro_header_fast(struct sk_buff *skb,
  1440. unsigned int offset)
  1441. {
  1442. return NAPI_GRO_CB(skb)->frag0 + offset;
  1443. }
  1444. static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
  1445. {
  1446. return NAPI_GRO_CB(skb)->frag0_len < hlen;
  1447. }
  1448. static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
  1449. unsigned int offset)
  1450. {
  1451. NAPI_GRO_CB(skb)->frag0 = NULL;
  1452. NAPI_GRO_CB(skb)->frag0_len = 0;
  1453. return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
  1454. }
  1455. static inline void *skb_gro_mac_header(struct sk_buff *skb)
  1456. {
  1457. return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
  1458. }
  1459. static inline void *skb_gro_network_header(struct sk_buff *skb)
  1460. {
  1461. return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
  1462. skb_network_offset(skb);
  1463. }
  1464. static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
  1465. unsigned short type,
  1466. const void *daddr, const void *saddr,
  1467. unsigned len)
  1468. {
  1469. if (!dev->header_ops || !dev->header_ops->create)
  1470. return 0;
  1471. return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
  1472. }
  1473. static inline int dev_parse_header(const struct sk_buff *skb,
  1474. unsigned char *haddr)
  1475. {
  1476. const struct net_device *dev = skb->dev;
  1477. if (!dev->header_ops || !dev->header_ops->parse)
  1478. return 0;
  1479. return dev->header_ops->parse(skb, haddr);
  1480. }
  1481. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  1482. extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
  1483. static inline int unregister_gifconf(unsigned int family)
  1484. {
  1485. return register_gifconf(family, NULL);
  1486. }
  1487. /*
  1488. * Incoming packets are placed on per-cpu queues
  1489. */
  1490. struct softnet_data {
  1491. struct Qdisc *output_queue;
  1492. struct Qdisc **output_queue_tailp;
  1493. struct list_head poll_list;
  1494. struct sk_buff *completion_queue;
  1495. struct sk_buff_head process_queue;
  1496. /* stats */
  1497. unsigned int processed;
  1498. unsigned int time_squeeze;
  1499. unsigned int cpu_collision;
  1500. unsigned int received_rps;
  1501. #ifdef CONFIG_RPS
  1502. struct softnet_data *rps_ipi_list;
  1503. /* Elements below can be accessed between CPUs for RPS */
  1504. struct call_single_data csd ____cacheline_aligned_in_smp;
  1505. struct softnet_data *rps_ipi_next;
  1506. unsigned int cpu;
  1507. unsigned int input_queue_head;
  1508. unsigned int input_queue_tail;
  1509. #endif
  1510. unsigned dropped;
  1511. struct sk_buff_head input_pkt_queue;
  1512. struct napi_struct backlog;
  1513. };
  1514. static inline void input_queue_head_incr(struct softnet_data *sd)
  1515. {
  1516. #ifdef CONFIG_RPS
  1517. sd->input_queue_head++;
  1518. #endif
  1519. }
  1520. static inline void input_queue_tail_incr_save(struct softnet_data *sd,
  1521. unsigned int *qtail)
  1522. {
  1523. #ifdef CONFIG_RPS
  1524. *qtail = ++sd->input_queue_tail;
  1525. #endif
  1526. }
  1527. DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
  1528. extern void __netif_schedule(struct Qdisc *q);
  1529. static inline void netif_schedule_queue(struct netdev_queue *txq)
  1530. {
  1531. if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
  1532. __netif_schedule(txq->qdisc);
  1533. }
  1534. static inline void netif_tx_schedule_all(struct net_device *dev)
  1535. {
  1536. unsigned int i;
  1537. for (i = 0; i < dev->num_tx_queues; i++)
  1538. netif_schedule_queue(netdev_get_tx_queue(dev, i));
  1539. }
  1540. static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
  1541. {
  1542. clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  1543. }
  1544. /**
  1545. * netif_start_queue - allow transmit
  1546. * @dev: network device
  1547. *
  1548. * Allow upper layers to call the device hard_start_xmit routine.
  1549. */
  1550. static inline void netif_start_queue(struct net_device *dev)
  1551. {
  1552. netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
  1553. }
  1554. static inline void netif_tx_start_all_queues(struct net_device *dev)
  1555. {
  1556. unsigned int i;
  1557. for (i = 0; i < dev->num_tx_queues; i++) {
  1558. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1559. netif_tx_start_queue(txq);
  1560. }
  1561. }
  1562. static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  1563. {
  1564. #ifdef CONFIG_NETPOLL_TRAP
  1565. if (netpoll_trap()) {
  1566. netif_tx_start_queue(dev_queue);
  1567. return;
  1568. }
  1569. #endif
  1570. if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
  1571. __netif_schedule(dev_queue->qdisc);
  1572. }
  1573. /**
  1574. * netif_wake_queue - restart transmit
  1575. * @dev: network device
  1576. *
  1577. * Allow upper layers to call the device hard_start_xmit routine.
  1578. * Used for flow control when transmit resources are available.
  1579. */
  1580. static inline void netif_wake_queue(struct net_device *dev)
  1581. {
  1582. netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
  1583. }
  1584. static inline void netif_tx_wake_all_queues(struct net_device *dev)
  1585. {
  1586. unsigned int i;
  1587. for (i = 0; i < dev->num_tx_queues; i++) {
  1588. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1589. netif_tx_wake_queue(txq);
  1590. }
  1591. }
  1592. static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
  1593. {
  1594. if (WARN_ON(!dev_queue)) {
  1595. pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
  1596. return;
  1597. }
  1598. set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  1599. }
  1600. /**
  1601. * netif_stop_queue - stop transmitted packets
  1602. * @dev: network device
  1603. *
  1604. * Stop upper layers calling the device hard_start_xmit routine.
  1605. * Used for flow control when transmit resources are unavailable.
  1606. */
  1607. static inline void netif_stop_queue(struct net_device *dev)
  1608. {
  1609. netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
  1610. }
  1611. static inline void netif_tx_stop_all_queues(struct net_device *dev)
  1612. {
  1613. unsigned int i;
  1614. for (i = 0; i < dev->num_tx_queues; i++) {
  1615. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1616. netif_tx_stop_queue(txq);
  1617. }
  1618. }
  1619. static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
  1620. {
  1621. return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  1622. }
  1623. /**
  1624. * netif_queue_stopped - test if transmit queue is flowblocked
  1625. * @dev: network device
  1626. *
  1627. * Test if transmit queue on device is currently unable to send.
  1628. */
  1629. static inline int netif_queue_stopped(const struct net_device *dev)
  1630. {
  1631. return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
  1632. }
  1633. static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
  1634. {
  1635. return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
  1636. }
  1637. /**
  1638. * netif_running - test if up
  1639. * @dev: network device
  1640. *
  1641. * Test if the device has been brought up.
  1642. */
  1643. static inline int netif_running(const struct net_device *dev)
  1644. {
  1645. return test_bit(__LINK_STATE_START, &dev->state);
  1646. }
  1647. /*
  1648. * Routines to manage the subqueues on a device. We only need start
  1649. * stop, and a check if it's stopped. All other device management is
  1650. * done at the overall netdevice level.
  1651. * Also test the device if we're multiqueue.
  1652. */
  1653. /**
  1654. * netif_start_subqueue - allow sending packets on subqueue
  1655. * @dev: network device
  1656. * @queue_index: sub queue index
  1657. *
  1658. * Start individual transmit queue of a device with multiple transmit queues.
  1659. */
  1660. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  1661. {
  1662. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1663. netif_tx_start_queue(txq);
  1664. }
  1665. /**
  1666. * netif_stop_subqueue - stop sending packets on subqueue
  1667. * @dev: network device
  1668. * @queue_index: sub queue index
  1669. *
  1670. * Stop individual transmit queue of a device with multiple transmit queues.
  1671. */
  1672. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  1673. {
  1674. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1675. #ifdef CONFIG_NETPOLL_TRAP
  1676. if (netpoll_trap())
  1677. return;
  1678. #endif
  1679. netif_tx_stop_queue(txq);
  1680. }
  1681. /**
  1682. * netif_subqueue_stopped - test status of subqueue
  1683. * @dev: network device
  1684. * @queue_index: sub queue index
  1685. *
  1686. * Check individual transmit queue of a device with multiple transmit queues.
  1687. */
  1688. static inline int __netif_subqueue_stopped(const struct net_device *dev,
  1689. u16 queue_index)
  1690. {
  1691. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1692. return netif_tx_queue_stopped(txq);
  1693. }
  1694. static inline int netif_subqueue_stopped(const struct net_device *dev,
  1695. struct sk_buff *skb)
  1696. {
  1697. return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
  1698. }
  1699. /**
  1700. * netif_wake_subqueue - allow sending packets on subqueue
  1701. * @dev: network device
  1702. * @queue_index: sub queue index
  1703. *
  1704. * Resume individual transmit queue of a device with multiple transmit queues.
  1705. */
  1706. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  1707. {
  1708. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1709. #ifdef CONFIG_NETPOLL_TRAP
  1710. if (netpoll_trap())
  1711. return;
  1712. #endif
  1713. if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
  1714. __netif_schedule(txq->qdisc);
  1715. }
  1716. /*
  1717. * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
  1718. * as a distribution range limit for the returned value.
  1719. */
  1720. static inline u16 skb_tx_hash(const struct net_device *dev,
  1721. const struct sk_buff *skb)
  1722. {
  1723. return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
  1724. }
  1725. /**
  1726. * netif_is_multiqueue - test if device has multiple transmit queues
  1727. * @dev: network device
  1728. *
  1729. * Check if device has multiple transmit queues
  1730. */
  1731. static inline int netif_is_multiqueue(const struct net_device *dev)
  1732. {
  1733. return dev->num_tx_queues > 1;
  1734. }
  1735. extern int netif_set_real_num_tx_queues(struct net_device *dev,
  1736. unsigned int txq);
  1737. #ifdef CONFIG_RPS
  1738. extern int netif_set_real_num_rx_queues(struct net_device *dev,
  1739. unsigned int rxq);
  1740. #else
  1741. static inline int netif_set_real_num_rx_queues(struct net_device *dev,
  1742. unsigned int rxq)
  1743. {
  1744. return 0;
  1745. }
  1746. #endif
  1747. static inline int netif_copy_real_num_queues(struct net_device *to_dev,
  1748. const struct net_device *from_dev)
  1749. {
  1750. netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues);
  1751. #ifdef CONFIG_RPS
  1752. return netif_set_real_num_rx_queues(to_dev,
  1753. from_dev->real_num_rx_queues);
  1754. #else
  1755. return 0;
  1756. #endif
  1757. }
  1758. /* Use this variant when it is known for sure that it
  1759. * is executing from hardware interrupt context or with hardware interrupts
  1760. * disabled.
  1761. */
  1762. extern void dev_kfree_skb_irq(struct sk_buff *skb);
  1763. /* Use this variant in places where it could be invoked
  1764. * from either hardware interrupt or other context, with hardware interrupts
  1765. * either disabled or enabled.
  1766. */
  1767. extern void dev_kfree_skb_any(struct sk_buff *skb);
  1768. extern int netif_rx(struct sk_buff *skb);
  1769. extern int netif_rx_ni(struct sk_buff *skb);
  1770. extern int netif_receive_skb(struct sk_buff *skb);
  1771. extern gro_result_t dev_gro_receive(struct napi_struct *napi,
  1772. struct sk_buff *skb);
  1773. extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
  1774. extern gro_result_t napi_gro_receive(struct napi_struct *napi,
  1775. struct sk_buff *skb);
  1776. extern void napi_gro_flush(struct napi_struct *napi);
  1777. extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
  1778. extern gro_result_t napi_frags_finish(struct napi_struct *napi,
  1779. struct sk_buff *skb,
  1780. gro_result_t ret);
  1781. extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
  1782. extern gro_result_t napi_gro_frags(struct napi_struct *napi);
  1783. static inline void napi_free_frags(struct napi_struct *napi)
  1784. {
  1785. kfree_skb(napi->skb);
  1786. napi->skb = NULL;
  1787. }
  1788. extern int netdev_rx_handler_register(struct net_device *dev,
  1789. rx_handler_func_t *rx_handler,
  1790. void *rx_handler_data);
  1791. extern void netdev_rx_handler_unregister(struct net_device *dev);
  1792. extern int dev_valid_name(const char *name);
  1793. extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  1794. extern int dev_ethtool(struct net *net, struct ifreq *);
  1795. extern unsigned dev_get_flags(const struct net_device *);
  1796. extern int __dev_change_flags(struct net_device *, unsigned int flags);
  1797. extern int dev_change_flags(struct net_device *, unsigned);
  1798. extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
  1799. extern int dev_change_name(struct net_device *, const char *);
  1800. extern int dev_set_alias(struct net_device *, const char *, size_t);
  1801. extern int dev_change_net_namespace(struct net_device *,
  1802. struct net *, const char *);
  1803. extern int dev_set_mtu(struct net_device *, int);
  1804. extern void dev_set_group(struct net_device *, int);
  1805. extern int dev_set_mac_address(struct net_device *,
  1806. struct sockaddr *);
  1807. extern int dev_hard_start_xmit(struct sk_buff *skb,
  1808. struct net_device *dev,
  1809. struct netdev_queue *txq);
  1810. extern int dev_forward_skb(struct net_device *dev,
  1811. struct sk_buff *skb);
  1812. extern int netdev_budget;
  1813. /* Called by rtnetlink.c:rtnl_unlock() */
  1814. extern void netdev_run_todo(void);
  1815. /**
  1816. * dev_put - release reference to device
  1817. * @dev: network device
  1818. *
  1819. * Release reference to device to allow it to be freed.
  1820. */
  1821. static inline void dev_put(struct net_device *dev)
  1822. {
  1823. irqsafe_cpu_dec(*dev->pcpu_refcnt);
  1824. }
  1825. /**
  1826. * dev_hold - get reference to device
  1827. * @dev: network device
  1828. *
  1829. * Hold reference to device to keep it from being freed.
  1830. */
  1831. static inline void dev_hold(struct net_device *dev)
  1832. {
  1833. irqsafe_cpu_inc(*dev->pcpu_refcnt);
  1834. }
  1835. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  1836. * and _off may be called from IRQ context, but it is caller
  1837. * who is responsible for serialization of these calls.
  1838. *
  1839. * The name carrier is inappropriate, these functions should really be
  1840. * called netif_lowerlayer_*() because they represent the state of any
  1841. * kind of lower layer not just hardware media.
  1842. */
  1843. extern void linkwatch_fire_event(struct net_device *dev);
  1844. extern void linkwatch_forget_dev(struct net_device *dev);
  1845. /**
  1846. * netif_carrier_ok - test if carrier present
  1847. * @dev: network device
  1848. *
  1849. * Check if carrier is present on device
  1850. */
  1851. static inline int netif_carrier_ok(const struct net_device *dev)
  1852. {
  1853. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  1854. }
  1855. extern unsigned long dev_trans_start(struct net_device *dev);
  1856. extern void __netdev_watchdog_up(struct net_device *dev);
  1857. extern void netif_carrier_on(struct net_device *dev);
  1858. extern void netif_carrier_off(struct net_device *dev);
  1859. extern void netif_notify_peers(struct net_device *dev);
  1860. /**
  1861. * netif_dormant_on - mark device as dormant.
  1862. * @dev: network device
  1863. *
  1864. * Mark device as dormant (as per RFC2863).
  1865. *
  1866. * The dormant state indicates that the relevant interface is not
  1867. * actually in a condition to pass packets (i.e., it is not 'up') but is
  1868. * in a "pending" state, waiting for some external event. For "on-
  1869. * demand" interfaces, this new state identifies the situation where the
  1870. * interface is waiting for events to place it in the up state.
  1871. *
  1872. */
  1873. static inline void netif_dormant_on(struct net_device *dev)
  1874. {
  1875. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  1876. linkwatch_fire_event(dev);
  1877. }
  1878. /**
  1879. * netif_dormant_off - set device as not dormant.
  1880. * @dev: network device
  1881. *
  1882. * Device is not in dormant state.
  1883. */
  1884. static inline void netif_dormant_off(struct net_device *dev)
  1885. {
  1886. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  1887. linkwatch_fire_event(dev);
  1888. }
  1889. /**
  1890. * netif_dormant - test if carrier present
  1891. * @dev: network device
  1892. *
  1893. * Check if carrier is present on device
  1894. */
  1895. static inline int netif_dormant(const struct net_device *dev)
  1896. {
  1897. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  1898. }
  1899. /**
  1900. * netif_oper_up - test if device is operational
  1901. * @dev: network device
  1902. *
  1903. * Check if carrier is operational
  1904. */
  1905. static inline int netif_oper_up(const struct net_device *dev)
  1906. {
  1907. return (dev->operstate == IF_OPER_UP ||
  1908. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  1909. }
  1910. /**
  1911. * netif_device_present - is device available or removed
  1912. * @dev: network device
  1913. *
  1914. * Check if device has not been removed from system.
  1915. */
  1916. static inline int netif_device_present(struct net_device *dev)
  1917. {
  1918. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  1919. }
  1920. extern void netif_device_detach(struct net_device *dev);
  1921. extern void netif_device_attach(struct net_device *dev);
  1922. /*
  1923. * Network interface message level settings
  1924. */
  1925. enum {
  1926. NETIF_MSG_DRV = 0x0001,
  1927. NETIF_MSG_PROBE = 0x0002,
  1928. NETIF_MSG_LINK = 0x0004,
  1929. NETIF_MSG_TIMER = 0x0008,
  1930. NETIF_MSG_IFDOWN = 0x0010,
  1931. NETIF_MSG_IFUP = 0x0020,
  1932. NETIF_MSG_RX_ERR = 0x0040,
  1933. NETIF_MSG_TX_ERR = 0x0080,
  1934. NETIF_MSG_TX_QUEUED = 0x0100,
  1935. NETIF_MSG_INTR = 0x0200,
  1936. NETIF_MSG_TX_DONE = 0x0400,
  1937. NETIF_MSG_RX_STATUS = 0x0800,
  1938. NETIF_MSG_PKTDATA = 0x1000,
  1939. NETIF_MSG_HW = 0x2000,
  1940. NETIF_MSG_WOL = 0x4000,
  1941. };
  1942. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  1943. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  1944. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  1945. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  1946. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  1947. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  1948. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  1949. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  1950. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  1951. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  1952. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  1953. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  1954. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  1955. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  1956. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  1957. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  1958. {
  1959. /* use default */
  1960. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  1961. return default_msg_enable_bits;
  1962. if (debug_value == 0) /* no output */
  1963. return 0;
  1964. /* set low N bits */
  1965. return (1 << debug_value) - 1;
  1966. }
  1967. static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
  1968. {
  1969. spin_lock(&txq->_xmit_lock);
  1970. txq->xmit_lock_owner = cpu;
  1971. }
  1972. static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
  1973. {
  1974. spin_lock_bh(&txq->_xmit_lock);
  1975. txq->xmit_lock_owner = smp_processor_id();
  1976. }
  1977. static inline int __netif_tx_trylock(struct netdev_queue *txq)
  1978. {
  1979. int ok = spin_trylock(&txq->_xmit_lock);
  1980. if (likely(ok))
  1981. txq->xmit_lock_owner = smp_processor_id();
  1982. return ok;
  1983. }
  1984. static inline void __netif_tx_unlock(struct netdev_queue *txq)
  1985. {
  1986. txq->xmit_lock_owner = -1;
  1987. spin_unlock(&txq->_xmit_lock);
  1988. }
  1989. static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
  1990. {
  1991. txq->xmit_lock_owner = -1;
  1992. spin_unlock_bh(&txq->_xmit_lock);
  1993. }
  1994. static inline void txq_trans_update(struct netdev_queue *txq)
  1995. {
  1996. if (txq->xmit_lock_owner != -1)
  1997. txq->trans_start = jiffies;
  1998. }
  1999. /**
  2000. * netif_tx_lock - grab network device transmit lock
  2001. * @dev: network device
  2002. *
  2003. * Get network device transmit lock
  2004. */
  2005. static inline void netif_tx_lock(struct net_device *dev)
  2006. {
  2007. unsigned int i;
  2008. int cpu;
  2009. spin_lock(&dev->tx_global_lock);
  2010. cpu = smp_processor_id();
  2011. for (i = 0; i < dev->num_tx_queues; i++) {
  2012. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2013. /* We are the only thread of execution doing a
  2014. * freeze, but we have to grab the _xmit_lock in
  2015. * order to synchronize with threads which are in
  2016. * the ->hard_start_xmit() handler and already
  2017. * checked the frozen bit.
  2018. */
  2019. __netif_tx_lock(txq, cpu);
  2020. set_bit(__QUEUE_STATE_FROZEN, &txq->state);
  2021. __netif_tx_unlock(txq);
  2022. }
  2023. }
  2024. static inline void netif_tx_lock_bh(struct net_device *dev)
  2025. {
  2026. local_bh_disable();
  2027. netif_tx_lock(dev);
  2028. }
  2029. static inline void netif_tx_unlock(struct net_device *dev)
  2030. {
  2031. unsigned int i;
  2032. for (i = 0; i < dev->num_tx_queues; i++) {
  2033. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2034. /* No need to grab the _xmit_lock here. If the
  2035. * queue is not stopped for another reason, we
  2036. * force a schedule.
  2037. */
  2038. clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
  2039. netif_schedule_queue(txq);
  2040. }
  2041. spin_unlock(&dev->tx_global_lock);
  2042. }
  2043. static inline void netif_tx_unlock_bh(struct net_device *dev)
  2044. {
  2045. netif_tx_unlock(dev);
  2046. local_bh_enable();
  2047. }
  2048. #define HARD_TX_LOCK(dev, txq, cpu) { \
  2049. if ((dev->features & NETIF_F_LLTX) == 0) { \
  2050. __netif_tx_lock(txq, cpu); \
  2051. } \
  2052. }
  2053. #define HARD_TX_UNLOCK(dev, txq) { \
  2054. if ((dev->features & NETIF_F_LLTX) == 0) { \
  2055. __netif_tx_unlock(txq); \
  2056. } \
  2057. }
  2058. static inline void netif_tx_disable(struct net_device *dev)
  2059. {
  2060. unsigned int i;
  2061. int cpu;
  2062. local_bh_disable();
  2063. cpu = smp_processor_id();
  2064. for (i = 0; i < dev->num_tx_queues; i++) {
  2065. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  2066. __netif_tx_lock(txq, cpu);
  2067. netif_tx_stop_queue(txq);
  2068. __netif_tx_unlock(txq);
  2069. }
  2070. local_bh_enable();
  2071. }
  2072. static inline void netif_addr_lock(struct net_device *dev)
  2073. {
  2074. spin_lock(&dev->addr_list_lock);
  2075. }
  2076. static inline void netif_addr_lock_bh(struct net_device *dev)
  2077. {
  2078. spin_lock_bh(&dev->addr_list_lock);
  2079. }
  2080. static inline void netif_addr_unlock(struct net_device *dev)
  2081. {
  2082. spin_unlock(&dev->addr_list_lock);
  2083. }
  2084. static inline void netif_addr_unlock_bh(struct net_device *dev)
  2085. {
  2086. spin_unlock_bh(&dev->addr_list_lock);
  2087. }
  2088. /*
  2089. * dev_addrs walker. Should be used only for read access. Call with
  2090. * rcu_read_lock held.
  2091. */
  2092. #define for_each_dev_addr(dev, ha) \
  2093. list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
  2094. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  2095. extern void ether_setup(struct net_device *dev);
  2096. /* Support for loadable net-drivers */
  2097. extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
  2098. void (*setup)(struct net_device *),
  2099. unsigned int txqs, unsigned int rxqs);
  2100. #define alloc_netdev(sizeof_priv, name, setup) \
  2101. alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
  2102. #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
  2103. alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
  2104. extern int register_netdev(struct net_device *dev);
  2105. extern void unregister_netdev(struct net_device *dev);
  2106. /* General hardware address lists handling functions */
  2107. extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
  2108. struct netdev_hw_addr_list *from_list,
  2109. int addr_len, unsigned char addr_type);
  2110. extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
  2111. struct netdev_hw_addr_list *from_list,
  2112. int addr_len, unsigned char addr_type);
  2113. extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
  2114. struct netdev_hw_addr_list *from_list,
  2115. int addr_len);
  2116. extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
  2117. struct netdev_hw_addr_list *from_list,
  2118. int addr_len);
  2119. extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
  2120. extern void __hw_addr_init(struct netdev_hw_addr_list *list);
  2121. /* Functions used for device addresses handling */
  2122. extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
  2123. unsigned char addr_type);
  2124. extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
  2125. unsigned char addr_type);
  2126. extern int dev_addr_add_multiple(struct net_device *to_dev,
  2127. struct net_device *from_dev,
  2128. unsigned char addr_type);
  2129. extern int dev_addr_del_multiple(struct net_device *to_dev,
  2130. struct net_device *from_dev,
  2131. unsigned char addr_type);
  2132. extern void dev_addr_flush(struct net_device *dev);
  2133. extern int dev_addr_init(struct net_device *dev);
  2134. /* Functions used for unicast addresses handling */
  2135. extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
  2136. extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
  2137. extern int dev_uc_sync(struct net_device *to, struct net_device *from);
  2138. extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
  2139. extern void dev_uc_flush(struct net_device *dev);
  2140. extern void dev_uc_init(struct net_device *dev);
  2141. /* Functions used for multicast addresses handling */
  2142. extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
  2143. extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
  2144. extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
  2145. extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
  2146. extern int dev_mc_sync(struct net_device *to, struct net_device *from);
  2147. extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  2148. extern void dev_mc_flush(struct net_device *dev);
  2149. extern void dev_mc_init(struct net_device *dev);
  2150. /* Functions used for secondary unicast and multicast support */
  2151. extern void dev_set_rx_mode(struct net_device *dev);
  2152. extern void __dev_set_rx_mode(struct net_device *dev);
  2153. extern int dev_set_promiscuity(struct net_device *dev, int inc);
  2154. extern int dev_set_allmulti(struct net_device *dev, int inc);
  2155. extern void netdev_state_change(struct net_device *dev);
  2156. extern int netdev_bonding_change(struct net_device *dev,
  2157. unsigned long event);
  2158. extern void netdev_features_change(struct net_device *dev);
  2159. /* Load a device via the kmod */
  2160. extern void dev_load(struct net *net, const char *name);
  2161. extern void dev_mcast_init(void);
  2162. extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  2163. struct rtnl_link_stats64 *storage);
  2164. extern int netdev_max_backlog;
  2165. extern int netdev_tstamp_prequeue;
  2166. extern int weight_p;
  2167. extern int bpf_jit_enable;
  2168. extern int netdev_set_master(struct net_device *dev, struct net_device *master);
  2169. extern int netdev_set_bond_master(struct net_device *dev,
  2170. struct net_device *master);
  2171. extern int skb_checksum_help(struct sk_buff *skb);
  2172. extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
  2173. #ifdef CONFIG_BUG
  2174. extern void netdev_rx_csum_fault(struct net_device *dev);
  2175. #else
  2176. static inline void netdev_rx_csum_fault(struct net_device *dev)
  2177. {
  2178. }
  2179. #endif
  2180. /* rx skb timestamps */
  2181. extern void net_enable_timestamp(void);
  2182. extern void net_disable_timestamp(void);
  2183. #ifdef CONFIG_PROC_FS
  2184. extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  2185. extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  2186. extern void dev_seq_stop(struct seq_file *seq, void *v);
  2187. #endif
  2188. extern int netdev_class_create_file(struct class_attribute *class_attr);
  2189. extern void netdev_class_remove_file(struct class_attribute *class_attr);
  2190. extern struct kobj_ns_type_operations net_ns_type_operations;
  2191. extern const char *netdev_drivername(const struct net_device *dev);
  2192. extern void linkwatch_run_queue(void);
  2193. static inline u32 netdev_get_wanted_features(struct net_device *dev)
  2194. {
  2195. return (dev->features & ~dev->hw_features) | dev->wanted_features;
  2196. }
  2197. u32 netdev_increment_features(u32 all, u32 one, u32 mask);
  2198. int __netdev_update_features(struct net_device *dev);
  2199. void netdev_update_features(struct net_device *dev);
  2200. void netdev_change_features(struct net_device *dev);
  2201. void netif_stacked_transfer_operstate(const struct net_device *rootdev,
  2202. struct net_device *dev);
  2203. u32 netif_skb_features(struct sk_buff *skb);
  2204. static inline int net_gso_ok(u32 features, int gso_type)
  2205. {
  2206. int feature = gso_type << NETIF_F_GSO_SHIFT;
  2207. return (features & feature) == feature;
  2208. }
  2209. static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
  2210. {
  2211. return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
  2212. (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
  2213. }
  2214. static inline int netif_needs_gso(struct sk_buff *skb, int features)
  2215. {
  2216. return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
  2217. unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
  2218. }
  2219. static inline void netif_set_gso_max_size(struct net_device *dev,
  2220. unsigned int size)
  2221. {
  2222. dev->gso_max_size = size;
  2223. }
  2224. static inline int netif_is_bond_slave(struct net_device *dev)
  2225. {
  2226. return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
  2227. }
  2228. extern struct pernet_operations __net_initdata loopback_net_ops;
  2229. int dev_ethtool_get_settings(struct net_device *dev,
  2230. struct ethtool_cmd *cmd);
  2231. static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
  2232. {
  2233. if (dev->features & NETIF_F_RXCSUM)
  2234. return 1;
  2235. if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
  2236. return 0;
  2237. return dev->ethtool_ops->get_rx_csum(dev);
  2238. }
  2239. static inline u32 dev_ethtool_get_flags(struct net_device *dev)
  2240. {
  2241. if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
  2242. return 0;
  2243. return dev->ethtool_ops->get_flags(dev);
  2244. }
  2245. /* Logging, debugging and troubleshooting/diagnostic helpers. */
  2246. /* netdev_printk helpers, similar to dev_printk */
  2247. static inline const char *netdev_name(const struct net_device *dev)
  2248. {
  2249. if (dev->reg_state != NETREG_REGISTERED)
  2250. return "(unregistered net_device)";
  2251. return dev->name;
  2252. }
  2253. extern int netdev_printk(const char *level, const struct net_device *dev,
  2254. const char *format, ...)
  2255. __attribute__ ((format (printf, 3, 4)));
  2256. extern int netdev_emerg(const struct net_device *dev, const char *format, ...)
  2257. __attribute__ ((format (printf, 2, 3)));
  2258. extern int netdev_alert(const struct net_device *dev, const char *format, ...)
  2259. __attribute__ ((format (printf, 2, 3)));
  2260. extern int netdev_crit(const struct net_device *dev, const char *format, ...)
  2261. __attribute__ ((format (printf, 2, 3)));
  2262. extern int netdev_err(const struct net_device *dev, const char *format, ...)
  2263. __attribute__ ((format (printf, 2, 3)));
  2264. extern int netdev_warn(const struct net_device *dev, const char *format, ...)
  2265. __attribute__ ((format (printf, 2, 3)));
  2266. extern int netdev_notice(const struct net_device *dev, const char *format, ...)
  2267. __attribute__ ((format (printf, 2, 3)));
  2268. extern int netdev_info(const struct net_device *dev, const char *format, ...)
  2269. __attribute__ ((format (printf, 2, 3)));
  2270. #define MODULE_ALIAS_NETDEV(device) \
  2271. MODULE_ALIAS("netdev-" device)
  2272. #if defined(DEBUG)
  2273. #define netdev_dbg(__dev, format, args...) \
  2274. netdev_printk(KERN_DEBUG, __dev, format, ##args)
  2275. #elif defined(CONFIG_DYNAMIC_DEBUG)
  2276. #define netdev_dbg(__dev, format, args...) \
  2277. do { \
  2278. dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
  2279. netdev_name(__dev), ##args); \
  2280. } while (0)
  2281. #else
  2282. #define netdev_dbg(__dev, format, args...) \
  2283. ({ \
  2284. if (0) \
  2285. netdev_printk(KERN_DEBUG, __dev, format, ##args); \
  2286. 0; \
  2287. })
  2288. #endif
  2289. #if defined(VERBOSE_DEBUG)
  2290. #define netdev_vdbg netdev_dbg
  2291. #else
  2292. #define netdev_vdbg(dev, format, args...) \
  2293. ({ \
  2294. if (0) \
  2295. netdev_printk(KERN_DEBUG, dev, format, ##args); \
  2296. 0; \
  2297. })
  2298. #endif
  2299. /*
  2300. * netdev_WARN() acts like dev_printk(), but with the key difference
  2301. * of using a WARN/WARN_ON to get the message out, including the
  2302. * file/line information and a backtrace.
  2303. */
  2304. #define netdev_WARN(dev, format, args...) \
  2305. WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
  2306. /* netif printk helpers, similar to netdev_printk */
  2307. #define netif_printk(priv, type, level, dev, fmt, args...) \
  2308. do { \
  2309. if (netif_msg_##type(priv)) \
  2310. netdev_printk(level, (dev), fmt, ##args); \
  2311. } while (0)
  2312. #define netif_level(level, priv, type, dev, fmt, args...) \
  2313. do { \
  2314. if (netif_msg_##type(priv)) \
  2315. netdev_##level(dev, fmt, ##args); \
  2316. } while (0)
  2317. #define netif_emerg(priv, type, dev, fmt, args...) \
  2318. netif_level(emerg, priv, type, dev, fmt, ##args)
  2319. #define netif_alert(priv, type, dev, fmt, args...) \
  2320. netif_level(alert, priv, type, dev, fmt, ##args)
  2321. #define netif_crit(priv, type, dev, fmt, args...) \
  2322. netif_level(crit, priv, type, dev, fmt, ##args)
  2323. #define netif_err(priv, type, dev, fmt, args...) \
  2324. netif_level(err, priv, type, dev, fmt, ##args)
  2325. #define netif_warn(priv, type, dev, fmt, args...) \
  2326. netif_level(warn, priv, type, dev, fmt, ##args)
  2327. #define netif_notice(priv, type, dev, fmt, args...) \
  2328. netif_level(notice, priv, type, dev, fmt, ##args)
  2329. #define netif_info(priv, type, dev, fmt, args...) \
  2330. netif_level(info, priv, type, dev, fmt, ##args)
  2331. #if defined(DEBUG)
  2332. #define netif_dbg(priv, type, dev, format, args...) \
  2333. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
  2334. #elif defined(CONFIG_DYNAMIC_DEBUG)
  2335. #define netif_dbg(priv, type, netdev, format, args...) \
  2336. do { \
  2337. if (netif_msg_##type(priv)) \
  2338. dynamic_dev_dbg((netdev)->dev.parent, \
  2339. "%s: " format, \
  2340. netdev_name(netdev), ##args); \
  2341. } while (0)
  2342. #else
  2343. #define netif_dbg(priv, type, dev, format, args...) \
  2344. ({ \
  2345. if (0) \
  2346. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
  2347. 0; \
  2348. })
  2349. #endif
  2350. #if defined(VERBOSE_DEBUG)
  2351. #define netif_vdbg netif_dbg
  2352. #else
  2353. #define netif_vdbg(priv, type, dev, format, args...) \
  2354. ({ \
  2355. if (0) \
  2356. netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
  2357. 0; \
  2358. })
  2359. #endif
  2360. #endif /* __KERNEL__ */
  2361. #endif /* _LINUX_NETDEVICE_H */