netdevice.h 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <alan@lxorguk.ukuu.org.uk>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/if.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_packet.h>
  30. #ifdef __KERNEL__
  31. #include <linux/timer.h>
  32. #include <linux/delay.h>
  33. #include <asm/atomic.h>
  34. #include <asm/cache.h>
  35. #include <asm/byteorder.h>
  36. #include <linux/device.h>
  37. #include <linux/percpu.h>
  38. #include <linux/dmaengine.h>
  39. #include <linux/workqueue.h>
  40. #include <net/net_namespace.h>
  41. #include <net/dsa.h>
  42. #ifdef CONFIG_DCBNL
  43. #include <net/dcbnl.h>
  44. #endif
  45. struct vlan_group;
  46. struct ethtool_ops;
  47. struct netpoll_info;
  48. /* 802.11 specific */
  49. struct wireless_dev;
  50. /* source back-compat hooks */
  51. #define SET_ETHTOOL_OPS(netdev,ops) \
  52. ( (netdev)->ethtool_ops = (ops) )
  53. #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
  54. functions are available. */
  55. #define HAVE_FREE_NETDEV /* free_netdev() */
  56. #define HAVE_NETDEV_PRIV /* netdev_priv() */
  57. #define NET_XMIT_SUCCESS 0
  58. #define NET_XMIT_DROP 1 /* skb dropped */
  59. #define NET_XMIT_CN 2 /* congestion notification */
  60. #define NET_XMIT_POLICED 3 /* skb is shot by police */
  61. #define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
  62. /* Backlog congestion levels */
  63. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  64. #define NET_RX_DROP 1 /* packet dropped */
  65. #define NET_RX_CN_LOW 2 /* storm alert, just in case */
  66. #define NET_RX_CN_MOD 3 /* Storm on its way! */
  67. #define NET_RX_CN_HIGH 4 /* The storm is here */
  68. #define NET_RX_BAD 5 /* packet dropped due to kernel error */
  69. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  70. * indicates that the device will soon be dropping packets, or already drops
  71. * some packets of the same priority; prompting us to send less aggressively. */
  72. #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
  73. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  74. #endif
  75. #define MAX_ADDR_LEN 32 /* Largest hardware address length */
  76. /* Driver transmit return codes */
  77. #define NETDEV_TX_OK 0 /* driver took care of packet */
  78. #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
  79. #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
  80. #ifdef __KERNEL__
  81. /*
  82. * Compute the worst case header length according to the protocols
  83. * used.
  84. */
  85. #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
  86. # if defined(CONFIG_MAC80211_MESH)
  87. # define LL_MAX_HEADER 128
  88. # else
  89. # define LL_MAX_HEADER 96
  90. # endif
  91. #elif defined(CONFIG_TR)
  92. # define LL_MAX_HEADER 48
  93. #else
  94. # define LL_MAX_HEADER 32
  95. #endif
  96. #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
  97. !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
  98. !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
  99. !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
  100. #define MAX_HEADER LL_MAX_HEADER
  101. #else
  102. #define MAX_HEADER (LL_MAX_HEADER + 48)
  103. #endif
  104. #endif /* __KERNEL__ */
  105. /*
  106. * Network device statistics. Akin to the 2.0 ether stats but
  107. * with byte counters.
  108. */
  109. struct net_device_stats
  110. {
  111. unsigned long rx_packets; /* total packets received */
  112. unsigned long tx_packets; /* total packets transmitted */
  113. unsigned long rx_bytes; /* total bytes received */
  114. unsigned long tx_bytes; /* total bytes transmitted */
  115. unsigned long rx_errors; /* bad packets received */
  116. unsigned long tx_errors; /* packet transmit problems */
  117. unsigned long rx_dropped; /* no space in linux buffers */
  118. unsigned long tx_dropped; /* no space available in linux */
  119. unsigned long multicast; /* multicast packets received */
  120. unsigned long collisions;
  121. /* detailed rx_errors: */
  122. unsigned long rx_length_errors;
  123. unsigned long rx_over_errors; /* receiver ring buff overflow */
  124. unsigned long rx_crc_errors; /* recved pkt with crc error */
  125. unsigned long rx_frame_errors; /* recv'd frame alignment error */
  126. unsigned long rx_fifo_errors; /* recv'r fifo overrun */
  127. unsigned long rx_missed_errors; /* receiver missed packet */
  128. /* detailed tx_errors */
  129. unsigned long tx_aborted_errors;
  130. unsigned long tx_carrier_errors;
  131. unsigned long tx_fifo_errors;
  132. unsigned long tx_heartbeat_errors;
  133. unsigned long tx_window_errors;
  134. /* for cslip etc */
  135. unsigned long rx_compressed;
  136. unsigned long tx_compressed;
  137. };
  138. /* Media selection options. */
  139. enum {
  140. IF_PORT_UNKNOWN = 0,
  141. IF_PORT_10BASE2,
  142. IF_PORT_10BASET,
  143. IF_PORT_AUI,
  144. IF_PORT_100BASET,
  145. IF_PORT_100BASETX,
  146. IF_PORT_100BASEFX
  147. };
  148. #ifdef __KERNEL__
  149. #include <linux/cache.h>
  150. #include <linux/skbuff.h>
  151. struct neighbour;
  152. struct neigh_parms;
  153. struct sk_buff;
  154. struct netif_rx_stats
  155. {
  156. unsigned total;
  157. unsigned dropped;
  158. unsigned time_squeeze;
  159. unsigned cpu_collision;
  160. };
  161. DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
  162. struct dev_addr_list
  163. {
  164. struct dev_addr_list *next;
  165. u8 da_addr[MAX_ADDR_LEN];
  166. u8 da_addrlen;
  167. u8 da_synced;
  168. int da_users;
  169. int da_gusers;
  170. };
  171. /*
  172. * We tag multicasts with these structures.
  173. */
  174. #define dev_mc_list dev_addr_list
  175. #define dmi_addr da_addr
  176. #define dmi_addrlen da_addrlen
  177. #define dmi_users da_users
  178. #define dmi_gusers da_gusers
  179. struct hh_cache
  180. {
  181. struct hh_cache *hh_next; /* Next entry */
  182. atomic_t hh_refcnt; /* number of users */
  183. /*
  184. * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
  185. * cache line on SMP.
  186. * They are mostly read, but hh_refcnt may be changed quite frequently,
  187. * incurring cache line ping pongs.
  188. */
  189. __be16 hh_type ____cacheline_aligned_in_smp;
  190. /* protocol identifier, f.e ETH_P_IP
  191. * NOTE: For VLANs, this will be the
  192. * encapuslated type. --BLG
  193. */
  194. u16 hh_len; /* length of header */
  195. int (*hh_output)(struct sk_buff *skb);
  196. seqlock_t hh_lock;
  197. /* cached hardware header; allow for machine alignment needs. */
  198. #define HH_DATA_MOD 16
  199. #define HH_DATA_OFF(__len) \
  200. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  201. #define HH_DATA_ALIGN(__len) \
  202. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  203. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  204. };
  205. /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  206. * Alternative is:
  207. * dev->hard_header_len ? (dev->hard_header_len +
  208. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  209. *
  210. * We could use other alignment values, but we must maintain the
  211. * relationship HH alignment <= LL alignment.
  212. *
  213. * LL_ALLOCATED_SPACE also takes into account the tailroom the device
  214. * may need.
  215. */
  216. #define LL_RESERVED_SPACE(dev) \
  217. ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  218. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  219. ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  220. #define LL_ALLOCATED_SPACE(dev) \
  221. ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  222. struct header_ops {
  223. int (*create) (struct sk_buff *skb, struct net_device *dev,
  224. unsigned short type, const void *daddr,
  225. const void *saddr, unsigned len);
  226. int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
  227. int (*rebuild)(struct sk_buff *skb);
  228. #define HAVE_HEADER_CACHE
  229. int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
  230. void (*cache_update)(struct hh_cache *hh,
  231. const struct net_device *dev,
  232. const unsigned char *haddr);
  233. };
  234. /* These flag bits are private to the generic network queueing
  235. * layer, they may not be explicitly referenced by any other
  236. * code.
  237. */
  238. enum netdev_state_t
  239. {
  240. __LINK_STATE_START,
  241. __LINK_STATE_PRESENT,
  242. __LINK_STATE_NOCARRIER,
  243. __LINK_STATE_LINKWATCH_PENDING,
  244. __LINK_STATE_DORMANT,
  245. };
  246. /*
  247. * This structure holds at boot time configured netdevice settings. They
  248. * are then used in the device probing.
  249. */
  250. struct netdev_boot_setup {
  251. char name[IFNAMSIZ];
  252. struct ifmap map;
  253. };
  254. #define NETDEV_BOOT_SETUP_MAX 8
  255. extern int __init netdev_boot_setup(char *str);
  256. /*
  257. * Structure for NAPI scheduling similar to tasklet but with weighting
  258. */
  259. struct napi_struct {
  260. /* The poll_list must only be managed by the entity which
  261. * changes the state of the NAPI_STATE_SCHED bit. This means
  262. * whoever atomically sets that bit can add this napi_struct
  263. * to the per-cpu poll_list, and whoever clears that bit
  264. * can remove from the list right before clearing the bit.
  265. */
  266. struct list_head poll_list;
  267. unsigned long state;
  268. int weight;
  269. int (*poll)(struct napi_struct *, int);
  270. #ifdef CONFIG_NETPOLL
  271. spinlock_t poll_lock;
  272. int poll_owner;
  273. struct net_device *dev;
  274. struct list_head dev_list;
  275. #endif
  276. };
  277. enum
  278. {
  279. NAPI_STATE_SCHED, /* Poll is scheduled */
  280. NAPI_STATE_DISABLE, /* Disable pending */
  281. };
  282. extern void __napi_schedule(struct napi_struct *n);
  283. static inline int napi_disable_pending(struct napi_struct *n)
  284. {
  285. return test_bit(NAPI_STATE_DISABLE, &n->state);
  286. }
  287. /**
  288. * napi_schedule_prep - check if napi can be scheduled
  289. * @n: napi context
  290. *
  291. * Test if NAPI routine is already running, and if not mark
  292. * it as running. This is used as a condition variable
  293. * insure only one NAPI poll instance runs. We also make
  294. * sure there is no pending NAPI disable.
  295. */
  296. static inline int napi_schedule_prep(struct napi_struct *n)
  297. {
  298. return !napi_disable_pending(n) &&
  299. !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  300. }
  301. /**
  302. * napi_schedule - schedule NAPI poll
  303. * @n: napi context
  304. *
  305. * Schedule NAPI poll routine to be called if it is not already
  306. * running.
  307. */
  308. static inline void napi_schedule(struct napi_struct *n)
  309. {
  310. if (napi_schedule_prep(n))
  311. __napi_schedule(n);
  312. }
  313. /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
  314. static inline int napi_reschedule(struct napi_struct *napi)
  315. {
  316. if (napi_schedule_prep(napi)) {
  317. __napi_schedule(napi);
  318. return 1;
  319. }
  320. return 0;
  321. }
  322. /**
  323. * napi_complete - NAPI processing complete
  324. * @n: napi context
  325. *
  326. * Mark NAPI processing as complete.
  327. */
  328. static inline void __napi_complete(struct napi_struct *n)
  329. {
  330. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  331. list_del(&n->poll_list);
  332. smp_mb__before_clear_bit();
  333. clear_bit(NAPI_STATE_SCHED, &n->state);
  334. }
  335. static inline void napi_complete(struct napi_struct *n)
  336. {
  337. unsigned long flags;
  338. local_irq_save(flags);
  339. __napi_complete(n);
  340. local_irq_restore(flags);
  341. }
  342. /**
  343. * napi_disable - prevent NAPI from scheduling
  344. * @n: napi context
  345. *
  346. * Stop NAPI from being scheduled on this context.
  347. * Waits till any outstanding processing completes.
  348. */
  349. static inline void napi_disable(struct napi_struct *n)
  350. {
  351. set_bit(NAPI_STATE_DISABLE, &n->state);
  352. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  353. msleep(1);
  354. clear_bit(NAPI_STATE_DISABLE, &n->state);
  355. }
  356. /**
  357. * napi_enable - enable NAPI scheduling
  358. * @n: napi context
  359. *
  360. * Resume NAPI from being scheduled on this context.
  361. * Must be paired with napi_disable.
  362. */
  363. static inline void napi_enable(struct napi_struct *n)
  364. {
  365. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  366. smp_mb__before_clear_bit();
  367. clear_bit(NAPI_STATE_SCHED, &n->state);
  368. }
  369. #ifdef CONFIG_SMP
  370. /**
  371. * napi_synchronize - wait until NAPI is not running
  372. * @n: napi context
  373. *
  374. * Wait until NAPI is done being scheduled on this context.
  375. * Waits till any outstanding processing completes but
  376. * does not disable future activations.
  377. */
  378. static inline void napi_synchronize(const struct napi_struct *n)
  379. {
  380. while (test_bit(NAPI_STATE_SCHED, &n->state))
  381. msleep(1);
  382. }
  383. #else
  384. # define napi_synchronize(n) barrier()
  385. #endif
  386. enum netdev_queue_state_t
  387. {
  388. __QUEUE_STATE_XOFF,
  389. __QUEUE_STATE_FROZEN,
  390. };
  391. struct netdev_queue {
  392. struct net_device *dev;
  393. struct Qdisc *qdisc;
  394. unsigned long state;
  395. spinlock_t _xmit_lock;
  396. int xmit_lock_owner;
  397. struct Qdisc *qdisc_sleeping;
  398. } ____cacheline_aligned_in_smp;
  399. /*
  400. * This structure defines the management hooks for network devices.
  401. * The following hooks can be defined; unless noted otherwise, they are
  402. * optional and can be filled with a null pointer.
  403. *
  404. * int (*ndo_init)(struct net_device *dev);
  405. * This function is called once when network device is registered.
  406. * The network device can use this to any late stage initializaton
  407. * or semantic validattion. It can fail with an error code which will
  408. * be propogated back to register_netdev
  409. *
  410. * void (*ndo_uninit)(struct net_device *dev);
  411. * This function is called when device is unregistered or when registration
  412. * fails. It is not called if init fails.
  413. *
  414. * int (*ndo_open)(struct net_device *dev);
  415. * This function is called when network device transistions to the up
  416. * state.
  417. *
  418. * int (*ndo_stop)(struct net_device *dev);
  419. * This function is called when network device transistions to the down
  420. * state.
  421. *
  422. * int (*ndo_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev);
  423. * Called when a packet needs to be transmitted.
  424. * Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED,
  425. * Required can not be NULL.
  426. *
  427. * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
  428. * Called to decide which queue to when device supports multiple
  429. * transmit queues.
  430. *
  431. * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
  432. * This function is called to allow device receiver to make
  433. * changes to configuration when multicast or promiscious is enabled.
  434. *
  435. * void (*ndo_set_rx_mode)(struct net_device *dev);
  436. * This function is called device changes address list filtering.
  437. *
  438. * void (*ndo_set_multicast_list)(struct net_device *dev);
  439. * This function is called when the multicast address list changes.
  440. *
  441. * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
  442. * This function is called when the Media Access Control address
  443. * needs to be changed. If not this interface is not defined, the
  444. * mac address can not be changed.
  445. *
  446. * int (*ndo_validate_addr)(struct net_device *dev);
  447. * Test if Media Access Control address is valid for the device.
  448. *
  449. * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
  450. * Called when a user request an ioctl which can't be handled by
  451. * the generic interface code. If not defined ioctl's return
  452. * not supported error code.
  453. *
  454. * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
  455. * Used to set network devices bus interface parameters. This interface
  456. * is retained for legacy reason, new devices should use the bus
  457. * interface (PCI) for low level management.
  458. *
  459. * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  460. * Called when a user wants to change the Maximum Transfer Unit
  461. * of a device. If not defined, any request to change MTU will
  462. * will return an error.
  463. *
  464. * void (*ndo_tx_timeout)(struct net_device *dev);
  465. * Callback uses when the transmitter has not made any progress
  466. * for dev->watchdog ticks.
  467. *
  468. * struct net_device_stats* (*get_stats)(struct net_device *dev);
  469. * Called when a user wants to get the network device usage
  470. * statistics. If not defined, the counters in dev->stats will
  471. * be used.
  472. *
  473. * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
  474. * If device support VLAN receive accleration
  475. * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
  476. * when vlan groups for the device changes. Note: grp is NULL
  477. * if no vlan's groups are being used.
  478. *
  479. * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
  480. * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  481. * this function is called when a VLAN id is registered.
  482. *
  483. * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
  484. * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  485. * this function is called when a VLAN id is unregistered.
  486. *
  487. * void (*ndo_poll_controller)(struct net_device *dev);
  488. */
  489. struct net_device_ops {
  490. int (*ndo_init)(struct net_device *dev);
  491. void (*ndo_uninit)(struct net_device *dev);
  492. int (*ndo_open)(struct net_device *dev);
  493. int (*ndo_stop)(struct net_device *dev);
  494. int (*ndo_start_xmit) (struct sk_buff *skb,
  495. struct net_device *dev);
  496. u16 (*ndo_select_queue)(struct net_device *dev,
  497. struct sk_buff *skb);
  498. #define HAVE_CHANGE_RX_FLAGS
  499. void (*ndo_change_rx_flags)(struct net_device *dev,
  500. int flags);
  501. #define HAVE_SET_RX_MODE
  502. void (*ndo_set_rx_mode)(struct net_device *dev);
  503. #define HAVE_MULTICAST
  504. void (*ndo_set_multicast_list)(struct net_device *dev);
  505. #define HAVE_SET_MAC_ADDR
  506. int (*ndo_set_mac_address)(struct net_device *dev,
  507. void *addr);
  508. #define HAVE_VALIDATE_ADDR
  509. int (*ndo_validate_addr)(struct net_device *dev);
  510. #define HAVE_PRIVATE_IOCTL
  511. int (*ndo_do_ioctl)(struct net_device *dev,
  512. struct ifreq *ifr, int cmd);
  513. #define HAVE_SET_CONFIG
  514. int (*ndo_set_config)(struct net_device *dev,
  515. struct ifmap *map);
  516. #define HAVE_CHANGE_MTU
  517. int (*ndo_change_mtu)(struct net_device *dev,
  518. int new_mtu);
  519. int (*ndo_neigh_setup)(struct net_device *dev,
  520. struct neigh_parms *);
  521. #define HAVE_TX_TIMEOUT
  522. void (*ndo_tx_timeout) (struct net_device *dev);
  523. struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  524. void (*ndo_vlan_rx_register)(struct net_device *dev,
  525. struct vlan_group *grp);
  526. void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
  527. unsigned short vid);
  528. void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
  529. unsigned short vid);
  530. #ifdef CONFIG_NET_POLL_CONTROLLER
  531. #define HAVE_NETDEV_POLL
  532. void (*ndo_poll_controller)(struct net_device *dev);
  533. #endif
  534. };
  535. /*
  536. * The DEVICE structure.
  537. * Actually, this whole structure is a big mistake. It mixes I/O
  538. * data with strictly "high-level" data, and it has to know about
  539. * almost every data structure used in the INET module.
  540. *
  541. * FIXME: cleanup struct net_device such that network protocol info
  542. * moves out.
  543. */
  544. struct net_device
  545. {
  546. /*
  547. * This is the first field of the "visible" part of this structure
  548. * (i.e. as seen by users in the "Space.c" file). It is the name
  549. * the interface.
  550. */
  551. char name[IFNAMSIZ];
  552. /* device name hash chain */
  553. struct hlist_node name_hlist;
  554. /* snmp alias */
  555. char *ifalias;
  556. /*
  557. * I/O specific fields
  558. * FIXME: Merge these and struct ifmap into one
  559. */
  560. unsigned long mem_end; /* shared mem end */
  561. unsigned long mem_start; /* shared mem start */
  562. unsigned long base_addr; /* device I/O address */
  563. unsigned int irq; /* device IRQ number */
  564. /*
  565. * Some hardware also needs these fields, but they are not
  566. * part of the usual set specified in Space.c.
  567. */
  568. unsigned char if_port; /* Selectable AUI, TP,..*/
  569. unsigned char dma; /* DMA channel */
  570. unsigned long state;
  571. struct list_head dev_list;
  572. #ifdef CONFIG_NETPOLL
  573. struct list_head napi_list;
  574. #endif
  575. /* Net device features */
  576. unsigned long features;
  577. #define NETIF_F_SG 1 /* Scatter/gather IO. */
  578. #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
  579. #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
  580. #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
  581. #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
  582. #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
  583. #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
  584. #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
  585. #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
  586. #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
  587. #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
  588. #define NETIF_F_GSO 2048 /* Enable software GSO. */
  589. #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
  590. /* do not use LLTX in new drivers */
  591. #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
  592. #define NETIF_F_LRO 32768 /* large receive offload */
  593. /* Segmentation offload features */
  594. #define NETIF_F_GSO_SHIFT 16
  595. #define NETIF_F_GSO_MASK 0xffff0000
  596. #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  597. #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
  598. #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  599. #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
  600. #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
  601. /* List of features with software fallbacks. */
  602. #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
  603. #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  604. #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
  605. #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
  606. #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
  607. /*
  608. * If one device supports one of these features, then enable them
  609. * for all in netdev_increment_features.
  610. */
  611. #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
  612. NETIF_F_SG | NETIF_F_HIGHDMA | \
  613. NETIF_F_FRAGLIST)
  614. /* Interface index. Unique device identifier */
  615. int ifindex;
  616. int iflink;
  617. struct net_device_stats stats;
  618. #ifdef CONFIG_WIRELESS_EXT
  619. /* List of functions to handle Wireless Extensions (instead of ioctl).
  620. * See <net/iw_handler.h> for details. Jean II */
  621. const struct iw_handler_def * wireless_handlers;
  622. /* Instance data managed by the core of Wireless Extensions. */
  623. struct iw_public_data * wireless_data;
  624. #endif
  625. /* Management operations */
  626. const struct net_device_ops *netdev_ops;
  627. const struct ethtool_ops *ethtool_ops;
  628. /* Hardware header description */
  629. const struct header_ops *header_ops;
  630. unsigned int flags; /* interface flags (a la BSD) */
  631. unsigned short gflags;
  632. unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
  633. unsigned short padded; /* How much padding added by alloc_netdev() */
  634. unsigned char operstate; /* RFC2863 operstate */
  635. unsigned char link_mode; /* mapping policy to operstate */
  636. unsigned mtu; /* interface MTU value */
  637. unsigned short type; /* interface hardware type */
  638. unsigned short hard_header_len; /* hardware hdr length */
  639. /* extra head- and tailroom the hardware may need, but not in all cases
  640. * can this be guaranteed, especially tailroom. Some cases also use
  641. * LL_MAX_HEADER instead to allocate the skb.
  642. */
  643. unsigned short needed_headroom;
  644. unsigned short needed_tailroom;
  645. struct net_device *master; /* Pointer to master device of a group,
  646. * which this device is member of.
  647. */
  648. /* Interface address info. */
  649. unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
  650. unsigned char addr_len; /* hardware address length */
  651. unsigned short dev_id; /* for shared network cards */
  652. spinlock_t addr_list_lock;
  653. struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
  654. int uc_count; /* Number of installed ucasts */
  655. int uc_promisc;
  656. struct dev_addr_list *mc_list; /* Multicast mac addresses */
  657. int mc_count; /* Number of installed mcasts */
  658. unsigned int promiscuity;
  659. unsigned int allmulti;
  660. /* Protocol specific pointers */
  661. #ifdef CONFIG_NET_DSA
  662. void *dsa_ptr; /* dsa specific data */
  663. #endif
  664. void *atalk_ptr; /* AppleTalk link */
  665. void *ip_ptr; /* IPv4 specific data */
  666. void *dn_ptr; /* DECnet specific data */
  667. void *ip6_ptr; /* IPv6 specific data */
  668. void *ec_ptr; /* Econet specific data */
  669. void *ax25_ptr; /* AX.25 specific data */
  670. struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
  671. assign before registering */
  672. /*
  673. * Cache line mostly used on receive path (including eth_type_trans())
  674. */
  675. unsigned long last_rx; /* Time of last Rx */
  676. /* Interface address info used in eth_type_trans() */
  677. unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
  678. because most packets are unicast) */
  679. unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
  680. struct netdev_queue rx_queue;
  681. struct netdev_queue *_tx ____cacheline_aligned_in_smp;
  682. /* Number of TX queues allocated at alloc_netdev_mq() time */
  683. unsigned int num_tx_queues;
  684. /* Number of TX queues currently active in device */
  685. unsigned int real_num_tx_queues;
  686. unsigned long tx_queue_len; /* Max frames per queue allowed */
  687. spinlock_t tx_global_lock;
  688. /*
  689. * One part is mostly used on xmit path (device)
  690. */
  691. void *priv; /* pointer to private data */
  692. /* These may be needed for future network-power-down code. */
  693. unsigned long trans_start; /* Time (in jiffies) of last Tx */
  694. int watchdog_timeo; /* used by dev_watchdog() */
  695. struct timer_list watchdog_timer;
  696. /* Number of references to this device */
  697. atomic_t refcnt ____cacheline_aligned_in_smp;
  698. /* delayed register/unregister */
  699. struct list_head todo_list;
  700. /* device index hash chain */
  701. struct hlist_node index_hlist;
  702. struct net_device *link_watch_next;
  703. /* register/unregister state machine */
  704. enum { NETREG_UNINITIALIZED=0,
  705. NETREG_REGISTERED, /* completed register_netdevice */
  706. NETREG_UNREGISTERING, /* called unregister_netdevice */
  707. NETREG_UNREGISTERED, /* completed unregister todo */
  708. NETREG_RELEASED, /* called free_netdev */
  709. } reg_state;
  710. /* Called from unregister, can be used to call free_netdev */
  711. void (*destructor)(struct net_device *dev);
  712. #ifdef CONFIG_NETPOLL
  713. struct netpoll_info *npinfo;
  714. #endif
  715. #ifdef CONFIG_NET_NS
  716. /* Network namespace this network device is inside */
  717. struct net *nd_net;
  718. #endif
  719. /* mid-layer private */
  720. void *ml_priv;
  721. /* bridge stuff */
  722. struct net_bridge_port *br_port;
  723. /* macvlan */
  724. struct macvlan_port *macvlan_port;
  725. /* GARP */
  726. struct garp_port *garp_port;
  727. /* class/net/name entry */
  728. struct device dev;
  729. /* space for optional statistics and wireless sysfs groups */
  730. struct attribute_group *sysfs_groups[3];
  731. /* rtnetlink link ops */
  732. const struct rtnl_link_ops *rtnl_link_ops;
  733. /* VLAN feature mask */
  734. unsigned long vlan_features;
  735. /* for setting kernel sock attribute on TCP connection setup */
  736. #define GSO_MAX_SIZE 65536
  737. unsigned int gso_max_size;
  738. #ifdef CONFIG_DCBNL
  739. /* Data Center Bridging netlink ops */
  740. struct dcbnl_rtnl_ops *dcbnl_ops;
  741. #endif
  742. #ifdef CONFIG_COMPAT_NET_DEV_OPS
  743. struct {
  744. int (*init)(struct net_device *dev);
  745. void (*uninit)(struct net_device *dev);
  746. int (*open)(struct net_device *dev);
  747. int (*stop)(struct net_device *dev);
  748. int (*hard_start_xmit) (struct sk_buff *skb,
  749. struct net_device *dev);
  750. u16 (*select_queue)(struct net_device *dev,
  751. struct sk_buff *skb);
  752. void (*change_rx_flags)(struct net_device *dev,
  753. int flags);
  754. void (*set_rx_mode)(struct net_device *dev);
  755. void (*set_multicast_list)(struct net_device *dev);
  756. int (*set_mac_address)(struct net_device *dev,
  757. void *addr);
  758. int (*validate_addr)(struct net_device *dev);
  759. int (*do_ioctl)(struct net_device *dev,
  760. struct ifreq *ifr, int cmd);
  761. int (*set_config)(struct net_device *dev,
  762. struct ifmap *map);
  763. int (*change_mtu)(struct net_device *dev, int new_mtu);
  764. int (*neigh_setup)(struct net_device *dev,
  765. struct neigh_parms *);
  766. void (*tx_timeout) (struct net_device *dev);
  767. struct net_device_stats* (*get_stats)(struct net_device *dev);
  768. void (*vlan_rx_register)(struct net_device *dev,
  769. struct vlan_group *grp);
  770. void (*vlan_rx_add_vid)(struct net_device *dev,
  771. unsigned short vid);
  772. void (*vlan_rx_kill_vid)(struct net_device *dev,
  773. unsigned short vid);
  774. #ifdef CONFIG_NET_POLL_CONTROLLER
  775. void (*poll_controller)(struct net_device *dev);
  776. #endif
  777. };
  778. #endif
  779. };
  780. #define to_net_dev(d) container_of(d, struct net_device, dev)
  781. #define NETDEV_ALIGN 32
  782. #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
  783. static inline
  784. struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
  785. unsigned int index)
  786. {
  787. return &dev->_tx[index];
  788. }
  789. static inline void netdev_for_each_tx_queue(struct net_device *dev,
  790. void (*f)(struct net_device *,
  791. struct netdev_queue *,
  792. void *),
  793. void *arg)
  794. {
  795. unsigned int i;
  796. for (i = 0; i < dev->num_tx_queues; i++)
  797. f(dev, &dev->_tx[i], arg);
  798. }
  799. /*
  800. * Net namespace inlines
  801. */
  802. static inline
  803. struct net *dev_net(const struct net_device *dev)
  804. {
  805. #ifdef CONFIG_NET_NS
  806. return dev->nd_net;
  807. #else
  808. return &init_net;
  809. #endif
  810. }
  811. static inline
  812. void dev_net_set(struct net_device *dev, struct net *net)
  813. {
  814. #ifdef CONFIG_NET_NS
  815. release_net(dev->nd_net);
  816. dev->nd_net = hold_net(net);
  817. #endif
  818. }
  819. static inline bool netdev_uses_dsa_tags(struct net_device *dev)
  820. {
  821. #ifdef CONFIG_NET_DSA_TAG_DSA
  822. if (dev->dsa_ptr != NULL)
  823. return dsa_uses_dsa_tags(dev->dsa_ptr);
  824. #endif
  825. return 0;
  826. }
  827. static inline bool netdev_uses_trailer_tags(struct net_device *dev)
  828. {
  829. #ifdef CONFIG_NET_DSA_TAG_TRAILER
  830. if (dev->dsa_ptr != NULL)
  831. return dsa_uses_trailer_tags(dev->dsa_ptr);
  832. #endif
  833. return 0;
  834. }
  835. /**
  836. * netdev_priv - access network device private data
  837. * @dev: network device
  838. *
  839. * Get network device private data
  840. */
  841. static inline void *netdev_priv(const struct net_device *dev)
  842. {
  843. return (char *)dev + ((sizeof(struct net_device)
  844. + NETDEV_ALIGN_CONST)
  845. & ~NETDEV_ALIGN_CONST);
  846. }
  847. /* Set the sysfs physical device reference for the network logical device
  848. * if set prior to registration will cause a symlink during initialization.
  849. */
  850. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  851. /**
  852. * netif_napi_add - initialize a napi context
  853. * @dev: network device
  854. * @napi: napi context
  855. * @poll: polling function
  856. * @weight: default weight
  857. *
  858. * netif_napi_add() must be used to initialize a napi context prior to calling
  859. * *any* of the other napi related functions.
  860. */
  861. static inline void netif_napi_add(struct net_device *dev,
  862. struct napi_struct *napi,
  863. int (*poll)(struct napi_struct *, int),
  864. int weight)
  865. {
  866. INIT_LIST_HEAD(&napi->poll_list);
  867. napi->poll = poll;
  868. napi->weight = weight;
  869. #ifdef CONFIG_NETPOLL
  870. napi->dev = dev;
  871. list_add(&napi->dev_list, &dev->napi_list);
  872. spin_lock_init(&napi->poll_lock);
  873. napi->poll_owner = -1;
  874. #endif
  875. set_bit(NAPI_STATE_SCHED, &napi->state);
  876. }
  877. /**
  878. * netif_napi_del - remove a napi context
  879. * @napi: napi context
  880. *
  881. * netif_napi_del() removes a napi context from the network device napi list
  882. */
  883. static inline void netif_napi_del(struct napi_struct *napi)
  884. {
  885. #ifdef CONFIG_NETPOLL
  886. list_del(&napi->dev_list);
  887. #endif
  888. }
  889. struct packet_type {
  890. __be16 type; /* This is really htons(ether_type). */
  891. struct net_device *dev; /* NULL is wildcarded here */
  892. int (*func) (struct sk_buff *,
  893. struct net_device *,
  894. struct packet_type *,
  895. struct net_device *);
  896. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  897. int features);
  898. int (*gso_send_check)(struct sk_buff *skb);
  899. void *af_packet_priv;
  900. struct list_head list;
  901. };
  902. #include <linux/interrupt.h>
  903. #include <linux/notifier.h>
  904. extern rwlock_t dev_base_lock; /* Device list lock */
  905. #define for_each_netdev(net, d) \
  906. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  907. #define for_each_netdev_safe(net, d, n) \
  908. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  909. #define for_each_netdev_continue(net, d) \
  910. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  911. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  912. static inline struct net_device *next_net_device(struct net_device *dev)
  913. {
  914. struct list_head *lh;
  915. struct net *net;
  916. net = dev_net(dev);
  917. lh = dev->dev_list.next;
  918. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  919. }
  920. static inline struct net_device *first_net_device(struct net *net)
  921. {
  922. return list_empty(&net->dev_base_head) ? NULL :
  923. net_device_entry(net->dev_base_head.next);
  924. }
  925. extern int netdev_boot_setup_check(struct net_device *dev);
  926. extern unsigned long netdev_boot_base(const char *prefix, int unit);
  927. extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
  928. extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  929. extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  930. extern void dev_add_pack(struct packet_type *pt);
  931. extern void dev_remove_pack(struct packet_type *pt);
  932. extern void __dev_remove_pack(struct packet_type *pt);
  933. extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
  934. unsigned short mask);
  935. extern struct net_device *dev_get_by_name(struct net *net, const char *name);
  936. extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
  937. extern int dev_alloc_name(struct net_device *dev, const char *name);
  938. extern int dev_open(struct net_device *dev);
  939. extern int dev_close(struct net_device *dev);
  940. extern void dev_disable_lro(struct net_device *dev);
  941. extern int dev_queue_xmit(struct sk_buff *skb);
  942. extern int register_netdevice(struct net_device *dev);
  943. extern void unregister_netdevice(struct net_device *dev);
  944. extern void free_netdev(struct net_device *dev);
  945. extern void synchronize_net(void);
  946. extern int register_netdevice_notifier(struct notifier_block *nb);
  947. extern int unregister_netdevice_notifier(struct notifier_block *nb);
  948. extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  949. extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
  950. extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  951. extern int dev_restart(struct net_device *dev);
  952. #ifdef CONFIG_NETPOLL_TRAP
  953. extern int netpoll_trap(void);
  954. #endif
  955. static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
  956. unsigned short type,
  957. const void *daddr, const void *saddr,
  958. unsigned len)
  959. {
  960. if (!dev->header_ops || !dev->header_ops->create)
  961. return 0;
  962. return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
  963. }
  964. static inline int dev_parse_header(const struct sk_buff *skb,
  965. unsigned char *haddr)
  966. {
  967. const struct net_device *dev = skb->dev;
  968. if (!dev->header_ops || !dev->header_ops->parse)
  969. return 0;
  970. return dev->header_ops->parse(skb, haddr);
  971. }
  972. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  973. extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
  974. static inline int unregister_gifconf(unsigned int family)
  975. {
  976. return register_gifconf(family, NULL);
  977. }
  978. /*
  979. * Incoming packets are placed on per-cpu queues so that
  980. * no locking is needed.
  981. */
  982. struct softnet_data
  983. {
  984. struct Qdisc *output_queue;
  985. struct sk_buff_head input_pkt_queue;
  986. struct list_head poll_list;
  987. struct sk_buff *completion_queue;
  988. struct napi_struct backlog;
  989. #ifdef CONFIG_NET_DMA
  990. struct dma_chan *net_dma;
  991. #endif
  992. };
  993. DECLARE_PER_CPU(struct softnet_data,softnet_data);
  994. #define HAVE_NETIF_QUEUE
  995. extern void __netif_schedule(struct Qdisc *q);
  996. static inline void netif_schedule_queue(struct netdev_queue *txq)
  997. {
  998. if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
  999. __netif_schedule(txq->qdisc);
  1000. }
  1001. static inline void netif_tx_schedule_all(struct net_device *dev)
  1002. {
  1003. unsigned int i;
  1004. for (i = 0; i < dev->num_tx_queues; i++)
  1005. netif_schedule_queue(netdev_get_tx_queue(dev, i));
  1006. }
  1007. static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
  1008. {
  1009. clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  1010. }
  1011. /**
  1012. * netif_start_queue - allow transmit
  1013. * @dev: network device
  1014. *
  1015. * Allow upper layers to call the device hard_start_xmit routine.
  1016. */
  1017. static inline void netif_start_queue(struct net_device *dev)
  1018. {
  1019. netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
  1020. }
  1021. static inline void netif_tx_start_all_queues(struct net_device *dev)
  1022. {
  1023. unsigned int i;
  1024. for (i = 0; i < dev->num_tx_queues; i++) {
  1025. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1026. netif_tx_start_queue(txq);
  1027. }
  1028. }
  1029. static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  1030. {
  1031. #ifdef CONFIG_NETPOLL_TRAP
  1032. if (netpoll_trap()) {
  1033. clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  1034. return;
  1035. }
  1036. #endif
  1037. if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
  1038. __netif_schedule(dev_queue->qdisc);
  1039. }
  1040. /**
  1041. * netif_wake_queue - restart transmit
  1042. * @dev: network device
  1043. *
  1044. * Allow upper layers to call the device hard_start_xmit routine.
  1045. * Used for flow control when transmit resources are available.
  1046. */
  1047. static inline void netif_wake_queue(struct net_device *dev)
  1048. {
  1049. netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
  1050. }
  1051. static inline void netif_tx_wake_all_queues(struct net_device *dev)
  1052. {
  1053. unsigned int i;
  1054. for (i = 0; i < dev->num_tx_queues; i++) {
  1055. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1056. netif_tx_wake_queue(txq);
  1057. }
  1058. }
  1059. static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
  1060. {
  1061. set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  1062. }
  1063. /**
  1064. * netif_stop_queue - stop transmitted packets
  1065. * @dev: network device
  1066. *
  1067. * Stop upper layers calling the device hard_start_xmit routine.
  1068. * Used for flow control when transmit resources are unavailable.
  1069. */
  1070. static inline void netif_stop_queue(struct net_device *dev)
  1071. {
  1072. netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
  1073. }
  1074. static inline void netif_tx_stop_all_queues(struct net_device *dev)
  1075. {
  1076. unsigned int i;
  1077. for (i = 0; i < dev->num_tx_queues; i++) {
  1078. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1079. netif_tx_stop_queue(txq);
  1080. }
  1081. }
  1082. static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
  1083. {
  1084. return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  1085. }
  1086. /**
  1087. * netif_queue_stopped - test if transmit queue is flowblocked
  1088. * @dev: network device
  1089. *
  1090. * Test if transmit queue on device is currently unable to send.
  1091. */
  1092. static inline int netif_queue_stopped(const struct net_device *dev)
  1093. {
  1094. return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
  1095. }
  1096. static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
  1097. {
  1098. return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
  1099. }
  1100. /**
  1101. * netif_running - test if up
  1102. * @dev: network device
  1103. *
  1104. * Test if the device has been brought up.
  1105. */
  1106. static inline int netif_running(const struct net_device *dev)
  1107. {
  1108. return test_bit(__LINK_STATE_START, &dev->state);
  1109. }
  1110. /*
  1111. * Routines to manage the subqueues on a device. We only need start
  1112. * stop, and a check if it's stopped. All other device management is
  1113. * done at the overall netdevice level.
  1114. * Also test the device if we're multiqueue.
  1115. */
  1116. /**
  1117. * netif_start_subqueue - allow sending packets on subqueue
  1118. * @dev: network device
  1119. * @queue_index: sub queue index
  1120. *
  1121. * Start individual transmit queue of a device with multiple transmit queues.
  1122. */
  1123. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  1124. {
  1125. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1126. clear_bit(__QUEUE_STATE_XOFF, &txq->state);
  1127. }
  1128. /**
  1129. * netif_stop_subqueue - stop sending packets on subqueue
  1130. * @dev: network device
  1131. * @queue_index: sub queue index
  1132. *
  1133. * Stop individual transmit queue of a device with multiple transmit queues.
  1134. */
  1135. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  1136. {
  1137. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1138. #ifdef CONFIG_NETPOLL_TRAP
  1139. if (netpoll_trap())
  1140. return;
  1141. #endif
  1142. set_bit(__QUEUE_STATE_XOFF, &txq->state);
  1143. }
  1144. /**
  1145. * netif_subqueue_stopped - test status of subqueue
  1146. * @dev: network device
  1147. * @queue_index: sub queue index
  1148. *
  1149. * Check individual transmit queue of a device with multiple transmit queues.
  1150. */
  1151. static inline int __netif_subqueue_stopped(const struct net_device *dev,
  1152. u16 queue_index)
  1153. {
  1154. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1155. return test_bit(__QUEUE_STATE_XOFF, &txq->state);
  1156. }
  1157. static inline int netif_subqueue_stopped(const struct net_device *dev,
  1158. struct sk_buff *skb)
  1159. {
  1160. return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
  1161. }
  1162. /**
  1163. * netif_wake_subqueue - allow sending packets on subqueue
  1164. * @dev: network device
  1165. * @queue_index: sub queue index
  1166. *
  1167. * Resume individual transmit queue of a device with multiple transmit queues.
  1168. */
  1169. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  1170. {
  1171. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1172. #ifdef CONFIG_NETPOLL_TRAP
  1173. if (netpoll_trap())
  1174. return;
  1175. #endif
  1176. if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
  1177. __netif_schedule(txq->qdisc);
  1178. }
  1179. /**
  1180. * netif_is_multiqueue - test if device has multiple transmit queues
  1181. * @dev: network device
  1182. *
  1183. * Check if device has multiple transmit queues
  1184. */
  1185. static inline int netif_is_multiqueue(const struct net_device *dev)
  1186. {
  1187. return (dev->num_tx_queues > 1);
  1188. }
  1189. /* Use this variant when it is known for sure that it
  1190. * is executing from hardware interrupt context or with hardware interrupts
  1191. * disabled.
  1192. */
  1193. extern void dev_kfree_skb_irq(struct sk_buff *skb);
  1194. /* Use this variant in places where it could be invoked
  1195. * from either hardware interrupt or other context, with hardware interrupts
  1196. * either disabled or enabled.
  1197. */
  1198. extern void dev_kfree_skb_any(struct sk_buff *skb);
  1199. #define HAVE_NETIF_RX 1
  1200. extern int netif_rx(struct sk_buff *skb);
  1201. extern int netif_rx_ni(struct sk_buff *skb);
  1202. #define HAVE_NETIF_RECEIVE_SKB 1
  1203. extern int netif_receive_skb(struct sk_buff *skb);
  1204. extern void netif_nit_deliver(struct sk_buff *skb);
  1205. extern int dev_valid_name(const char *name);
  1206. extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  1207. extern int dev_ethtool(struct net *net, struct ifreq *);
  1208. extern unsigned dev_get_flags(const struct net_device *);
  1209. extern int dev_change_flags(struct net_device *, unsigned);
  1210. extern int dev_change_name(struct net_device *, const char *);
  1211. extern int dev_set_alias(struct net_device *, const char *, size_t);
  1212. extern int dev_change_net_namespace(struct net_device *,
  1213. struct net *, const char *);
  1214. extern int dev_set_mtu(struct net_device *, int);
  1215. extern int dev_set_mac_address(struct net_device *,
  1216. struct sockaddr *);
  1217. extern int dev_hard_start_xmit(struct sk_buff *skb,
  1218. struct net_device *dev,
  1219. struct netdev_queue *txq);
  1220. extern int netdev_budget;
  1221. /* Called by rtnetlink.c:rtnl_unlock() */
  1222. extern void netdev_run_todo(void);
  1223. /**
  1224. * dev_put - release reference to device
  1225. * @dev: network device
  1226. *
  1227. * Release reference to device to allow it to be freed.
  1228. */
  1229. static inline void dev_put(struct net_device *dev)
  1230. {
  1231. atomic_dec(&dev->refcnt);
  1232. }
  1233. /**
  1234. * dev_hold - get reference to device
  1235. * @dev: network device
  1236. *
  1237. * Hold reference to device to keep it from being freed.
  1238. */
  1239. static inline void dev_hold(struct net_device *dev)
  1240. {
  1241. atomic_inc(&dev->refcnt);
  1242. }
  1243. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  1244. * and _off may be called from IRQ context, but it is caller
  1245. * who is responsible for serialization of these calls.
  1246. *
  1247. * The name carrier is inappropriate, these functions should really be
  1248. * called netif_lowerlayer_*() because they represent the state of any
  1249. * kind of lower layer not just hardware media.
  1250. */
  1251. extern void linkwatch_fire_event(struct net_device *dev);
  1252. /**
  1253. * netif_carrier_ok - test if carrier present
  1254. * @dev: network device
  1255. *
  1256. * Check if carrier is present on device
  1257. */
  1258. static inline int netif_carrier_ok(const struct net_device *dev)
  1259. {
  1260. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  1261. }
  1262. extern void __netdev_watchdog_up(struct net_device *dev);
  1263. extern void netif_carrier_on(struct net_device *dev);
  1264. extern void netif_carrier_off(struct net_device *dev);
  1265. /**
  1266. * netif_dormant_on - mark device as dormant.
  1267. * @dev: network device
  1268. *
  1269. * Mark device as dormant (as per RFC2863).
  1270. *
  1271. * The dormant state indicates that the relevant interface is not
  1272. * actually in a condition to pass packets (i.e., it is not 'up') but is
  1273. * in a "pending" state, waiting for some external event. For "on-
  1274. * demand" interfaces, this new state identifies the situation where the
  1275. * interface is waiting for events to place it in the up state.
  1276. *
  1277. */
  1278. static inline void netif_dormant_on(struct net_device *dev)
  1279. {
  1280. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  1281. linkwatch_fire_event(dev);
  1282. }
  1283. /**
  1284. * netif_dormant_off - set device as not dormant.
  1285. * @dev: network device
  1286. *
  1287. * Device is not in dormant state.
  1288. */
  1289. static inline void netif_dormant_off(struct net_device *dev)
  1290. {
  1291. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  1292. linkwatch_fire_event(dev);
  1293. }
  1294. /**
  1295. * netif_dormant - test if carrier present
  1296. * @dev: network device
  1297. *
  1298. * Check if carrier is present on device
  1299. */
  1300. static inline int netif_dormant(const struct net_device *dev)
  1301. {
  1302. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  1303. }
  1304. /**
  1305. * netif_oper_up - test if device is operational
  1306. * @dev: network device
  1307. *
  1308. * Check if carrier is operational
  1309. */
  1310. static inline int netif_oper_up(const struct net_device *dev) {
  1311. return (dev->operstate == IF_OPER_UP ||
  1312. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  1313. }
  1314. /**
  1315. * netif_device_present - is device available or removed
  1316. * @dev: network device
  1317. *
  1318. * Check if device has not been removed from system.
  1319. */
  1320. static inline int netif_device_present(struct net_device *dev)
  1321. {
  1322. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  1323. }
  1324. extern void netif_device_detach(struct net_device *dev);
  1325. extern void netif_device_attach(struct net_device *dev);
  1326. /*
  1327. * Network interface message level settings
  1328. */
  1329. #define HAVE_NETIF_MSG 1
  1330. enum {
  1331. NETIF_MSG_DRV = 0x0001,
  1332. NETIF_MSG_PROBE = 0x0002,
  1333. NETIF_MSG_LINK = 0x0004,
  1334. NETIF_MSG_TIMER = 0x0008,
  1335. NETIF_MSG_IFDOWN = 0x0010,
  1336. NETIF_MSG_IFUP = 0x0020,
  1337. NETIF_MSG_RX_ERR = 0x0040,
  1338. NETIF_MSG_TX_ERR = 0x0080,
  1339. NETIF_MSG_TX_QUEUED = 0x0100,
  1340. NETIF_MSG_INTR = 0x0200,
  1341. NETIF_MSG_TX_DONE = 0x0400,
  1342. NETIF_MSG_RX_STATUS = 0x0800,
  1343. NETIF_MSG_PKTDATA = 0x1000,
  1344. NETIF_MSG_HW = 0x2000,
  1345. NETIF_MSG_WOL = 0x4000,
  1346. };
  1347. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  1348. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  1349. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  1350. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  1351. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  1352. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  1353. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  1354. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  1355. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  1356. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  1357. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  1358. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  1359. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  1360. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  1361. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  1362. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  1363. {
  1364. /* use default */
  1365. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  1366. return default_msg_enable_bits;
  1367. if (debug_value == 0) /* no output */
  1368. return 0;
  1369. /* set low N bits */
  1370. return (1 << debug_value) - 1;
  1371. }
  1372. /* Test if receive needs to be scheduled but only if up */
  1373. static inline int netif_rx_schedule_prep(struct net_device *dev,
  1374. struct napi_struct *napi)
  1375. {
  1376. return napi_schedule_prep(napi);
  1377. }
  1378. /* Add interface to tail of rx poll list. This assumes that _prep has
  1379. * already been called and returned 1.
  1380. */
  1381. static inline void __netif_rx_schedule(struct net_device *dev,
  1382. struct napi_struct *napi)
  1383. {
  1384. __napi_schedule(napi);
  1385. }
  1386. /* Try to reschedule poll. Called by irq handler. */
  1387. static inline void netif_rx_schedule(struct net_device *dev,
  1388. struct napi_struct *napi)
  1389. {
  1390. if (netif_rx_schedule_prep(dev, napi))
  1391. __netif_rx_schedule(dev, napi);
  1392. }
  1393. /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
  1394. static inline int netif_rx_reschedule(struct net_device *dev,
  1395. struct napi_struct *napi)
  1396. {
  1397. if (napi_schedule_prep(napi)) {
  1398. __netif_rx_schedule(dev, napi);
  1399. return 1;
  1400. }
  1401. return 0;
  1402. }
  1403. /* same as netif_rx_complete, except that local_irq_save(flags)
  1404. * has already been issued
  1405. */
  1406. static inline void __netif_rx_complete(struct net_device *dev,
  1407. struct napi_struct *napi)
  1408. {
  1409. __napi_complete(napi);
  1410. }
  1411. /* Remove interface from poll list: it must be in the poll list
  1412. * on current cpu. This primitive is called by dev->poll(), when
  1413. * it completes the work. The device cannot be out of poll list at this
  1414. * moment, it is BUG().
  1415. */
  1416. static inline void netif_rx_complete(struct net_device *dev,
  1417. struct napi_struct *napi)
  1418. {
  1419. unsigned long flags;
  1420. local_irq_save(flags);
  1421. __netif_rx_complete(dev, napi);
  1422. local_irq_restore(flags);
  1423. }
  1424. static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
  1425. {
  1426. spin_lock(&txq->_xmit_lock);
  1427. txq->xmit_lock_owner = cpu;
  1428. }
  1429. static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
  1430. {
  1431. spin_lock_bh(&txq->_xmit_lock);
  1432. txq->xmit_lock_owner = smp_processor_id();
  1433. }
  1434. static inline int __netif_tx_trylock(struct netdev_queue *txq)
  1435. {
  1436. int ok = spin_trylock(&txq->_xmit_lock);
  1437. if (likely(ok))
  1438. txq->xmit_lock_owner = smp_processor_id();
  1439. return ok;
  1440. }
  1441. static inline void __netif_tx_unlock(struct netdev_queue *txq)
  1442. {
  1443. txq->xmit_lock_owner = -1;
  1444. spin_unlock(&txq->_xmit_lock);
  1445. }
  1446. static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
  1447. {
  1448. txq->xmit_lock_owner = -1;
  1449. spin_unlock_bh(&txq->_xmit_lock);
  1450. }
  1451. /**
  1452. * netif_tx_lock - grab network device transmit lock
  1453. * @dev: network device
  1454. *
  1455. * Get network device transmit lock
  1456. */
  1457. static inline void netif_tx_lock(struct net_device *dev)
  1458. {
  1459. unsigned int i;
  1460. int cpu;
  1461. spin_lock(&dev->tx_global_lock);
  1462. cpu = smp_processor_id();
  1463. for (i = 0; i < dev->num_tx_queues; i++) {
  1464. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1465. /* We are the only thread of execution doing a
  1466. * freeze, but we have to grab the _xmit_lock in
  1467. * order to synchronize with threads which are in
  1468. * the ->hard_start_xmit() handler and already
  1469. * checked the frozen bit.
  1470. */
  1471. __netif_tx_lock(txq, cpu);
  1472. set_bit(__QUEUE_STATE_FROZEN, &txq->state);
  1473. __netif_tx_unlock(txq);
  1474. }
  1475. }
  1476. static inline void netif_tx_lock_bh(struct net_device *dev)
  1477. {
  1478. local_bh_disable();
  1479. netif_tx_lock(dev);
  1480. }
  1481. static inline void netif_tx_unlock(struct net_device *dev)
  1482. {
  1483. unsigned int i;
  1484. for (i = 0; i < dev->num_tx_queues; i++) {
  1485. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1486. /* No need to grab the _xmit_lock here. If the
  1487. * queue is not stopped for another reason, we
  1488. * force a schedule.
  1489. */
  1490. clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
  1491. if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
  1492. __netif_schedule(txq->qdisc);
  1493. }
  1494. spin_unlock(&dev->tx_global_lock);
  1495. }
  1496. static inline void netif_tx_unlock_bh(struct net_device *dev)
  1497. {
  1498. netif_tx_unlock(dev);
  1499. local_bh_enable();
  1500. }
  1501. #define HARD_TX_LOCK(dev, txq, cpu) { \
  1502. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1503. __netif_tx_lock(txq, cpu); \
  1504. } \
  1505. }
  1506. #define HARD_TX_UNLOCK(dev, txq) { \
  1507. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1508. __netif_tx_unlock(txq); \
  1509. } \
  1510. }
  1511. static inline void netif_tx_disable(struct net_device *dev)
  1512. {
  1513. unsigned int i;
  1514. int cpu;
  1515. local_bh_disable();
  1516. cpu = smp_processor_id();
  1517. for (i = 0; i < dev->num_tx_queues; i++) {
  1518. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1519. __netif_tx_lock(txq, cpu);
  1520. netif_tx_stop_queue(txq);
  1521. __netif_tx_unlock(txq);
  1522. }
  1523. local_bh_enable();
  1524. }
  1525. static inline void netif_addr_lock(struct net_device *dev)
  1526. {
  1527. spin_lock(&dev->addr_list_lock);
  1528. }
  1529. static inline void netif_addr_lock_bh(struct net_device *dev)
  1530. {
  1531. spin_lock_bh(&dev->addr_list_lock);
  1532. }
  1533. static inline void netif_addr_unlock(struct net_device *dev)
  1534. {
  1535. spin_unlock(&dev->addr_list_lock);
  1536. }
  1537. static inline void netif_addr_unlock_bh(struct net_device *dev)
  1538. {
  1539. spin_unlock_bh(&dev->addr_list_lock);
  1540. }
  1541. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  1542. extern void ether_setup(struct net_device *dev);
  1543. /* Support for loadable net-drivers */
  1544. extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
  1545. void (*setup)(struct net_device *),
  1546. unsigned int queue_count);
  1547. #define alloc_netdev(sizeof_priv, name, setup) \
  1548. alloc_netdev_mq(sizeof_priv, name, setup, 1)
  1549. extern int register_netdev(struct net_device *dev);
  1550. extern void unregister_netdev(struct net_device *dev);
  1551. /* Functions used for secondary unicast and multicast support */
  1552. extern void dev_set_rx_mode(struct net_device *dev);
  1553. extern void __dev_set_rx_mode(struct net_device *dev);
  1554. extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
  1555. extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
  1556. extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
  1557. extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
  1558. extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
  1559. extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
  1560. extern int dev_mc_sync(struct net_device *to, struct net_device *from);
  1561. extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  1562. extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
  1563. extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
  1564. extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
  1565. extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
  1566. extern int dev_set_promiscuity(struct net_device *dev, int inc);
  1567. extern int dev_set_allmulti(struct net_device *dev, int inc);
  1568. extern void netdev_state_change(struct net_device *dev);
  1569. extern void netdev_bonding_change(struct net_device *dev);
  1570. extern void netdev_features_change(struct net_device *dev);
  1571. /* Load a device via the kmod */
  1572. extern void dev_load(struct net *net, const char *name);
  1573. extern void dev_mcast_init(void);
  1574. extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
  1575. extern int netdev_max_backlog;
  1576. extern int weight_p;
  1577. extern int netdev_set_master(struct net_device *dev, struct net_device *master);
  1578. extern int skb_checksum_help(struct sk_buff *skb);
  1579. extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  1580. #ifdef CONFIG_BUG
  1581. extern void netdev_rx_csum_fault(struct net_device *dev);
  1582. #else
  1583. static inline void netdev_rx_csum_fault(struct net_device *dev)
  1584. {
  1585. }
  1586. #endif
  1587. /* rx skb timestamps */
  1588. extern void net_enable_timestamp(void);
  1589. extern void net_disable_timestamp(void);
  1590. #ifdef CONFIG_PROC_FS
  1591. extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  1592. extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  1593. extern void dev_seq_stop(struct seq_file *seq, void *v);
  1594. #endif
  1595. extern int netdev_class_create_file(struct class_attribute *class_attr);
  1596. extern void netdev_class_remove_file(struct class_attribute *class_attr);
  1597. extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
  1598. extern void linkwatch_run_queue(void);
  1599. unsigned long netdev_increment_features(unsigned long all, unsigned long one,
  1600. unsigned long mask);
  1601. unsigned long netdev_fix_features(unsigned long features, const char *name);
  1602. static inline int net_gso_ok(int features, int gso_type)
  1603. {
  1604. int feature = gso_type << NETIF_F_GSO_SHIFT;
  1605. return (features & feature) == feature;
  1606. }
  1607. static inline int skb_gso_ok(struct sk_buff *skb, int features)
  1608. {
  1609. return net_gso_ok(features, skb_shinfo(skb)->gso_type);
  1610. }
  1611. static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  1612. {
  1613. return skb_is_gso(skb) &&
  1614. (!skb_gso_ok(skb, dev->features) ||
  1615. unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
  1616. }
  1617. static inline void netif_set_gso_max_size(struct net_device *dev,
  1618. unsigned int size)
  1619. {
  1620. dev->gso_max_size = size;
  1621. }
  1622. /* On bonding slaves other than the currently active slave, suppress
  1623. * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
  1624. * ARP on active-backup slaves with arp_validate enabled.
  1625. */
  1626. static inline int skb_bond_should_drop(struct sk_buff *skb)
  1627. {
  1628. struct net_device *dev = skb->dev;
  1629. struct net_device *master = dev->master;
  1630. if (master) {
  1631. if (master->priv_flags & IFF_MASTER_ARPMON)
  1632. dev->last_rx = jiffies;
  1633. if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
  1634. if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
  1635. skb->protocol == __constant_htons(ETH_P_ARP))
  1636. return 0;
  1637. if (master->priv_flags & IFF_MASTER_ALB) {
  1638. if (skb->pkt_type != PACKET_BROADCAST &&
  1639. skb->pkt_type != PACKET_MULTICAST)
  1640. return 0;
  1641. }
  1642. if (master->priv_flags & IFF_MASTER_8023AD &&
  1643. skb->protocol == __constant_htons(ETH_P_SLOW))
  1644. return 0;
  1645. return 1;
  1646. }
  1647. }
  1648. return 0;
  1649. }
  1650. extern struct pernet_operations __net_initdata loopback_net_ops;
  1651. #endif /* __KERNEL__ */
  1652. #endif /* _LINUX_DEV_H */