netdevice.h 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <Alan.Cox@linux.org>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/if.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_packet.h>
  30. #ifdef __KERNEL__
  31. #include <linux/timer.h>
  32. #include <asm/atomic.h>
  33. #include <asm/cache.h>
  34. #include <asm/byteorder.h>
  35. #include <linux/device.h>
  36. #include <linux/percpu.h>
  37. #include <linux/dmaengine.h>
  38. struct vlan_group;
  39. struct ethtool_ops;
  40. struct netpoll_info;
  41. /* 802.11 specific */
  42. struct wireless_dev;
  43. /* source back-compat hooks */
  44. #define SET_ETHTOOL_OPS(netdev,ops) \
  45. ( (netdev)->ethtool_ops = (ops) )
  46. #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
  47. functions are available. */
  48. #define HAVE_FREE_NETDEV /* free_netdev() */
  49. #define HAVE_NETDEV_PRIV /* netdev_priv() */
  50. #define NET_XMIT_SUCCESS 0
  51. #define NET_XMIT_DROP 1 /* skb dropped */
  52. #define NET_XMIT_CN 2 /* congestion notification */
  53. #define NET_XMIT_POLICED 3 /* skb is shot by police */
  54. #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
  55. (TC use only - dev_queue_xmit
  56. returns this as NET_XMIT_SUCCESS) */
  57. /* Backlog congestion levels */
  58. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  59. #define NET_RX_DROP 1 /* packet dropped */
  60. #define NET_RX_CN_LOW 2 /* storm alert, just in case */
  61. #define NET_RX_CN_MOD 3 /* Storm on its way! */
  62. #define NET_RX_CN_HIGH 4 /* The storm is here */
  63. #define NET_RX_BAD 5 /* packet dropped due to kernel error */
  64. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  65. * indicates that the device will soon be dropping packets, or already drops
  66. * some packets of the same priority; prompting us to send less aggressively. */
  67. #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
  68. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  69. #endif
  70. #define MAX_ADDR_LEN 32 /* Largest hardware address length */
  71. /* Driver transmit return codes */
  72. #define NETDEV_TX_OK 0 /* driver took care of packet */
  73. #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
  74. #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
  75. /*
  76. * Compute the worst case header length according to the protocols
  77. * used.
  78. */
  79. #if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
  80. #define LL_MAX_HEADER 32
  81. #else
  82. #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
  83. #define LL_MAX_HEADER 96
  84. #else
  85. #define LL_MAX_HEADER 48
  86. #endif
  87. #endif
  88. #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
  89. !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
  90. !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
  91. !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
  92. #define MAX_HEADER LL_MAX_HEADER
  93. #else
  94. #define MAX_HEADER (LL_MAX_HEADER + 48)
  95. #endif
  96. struct net_device_subqueue
  97. {
  98. /* Give a control state for each queue. This struct may contain
  99. * per-queue locks in the future.
  100. */
  101. unsigned long state;
  102. };
  103. /*
  104. * Network device statistics. Akin to the 2.0 ether stats but
  105. * with byte counters.
  106. */
  107. struct net_device_stats
  108. {
  109. unsigned long rx_packets; /* total packets received */
  110. unsigned long tx_packets; /* total packets transmitted */
  111. unsigned long rx_bytes; /* total bytes received */
  112. unsigned long tx_bytes; /* total bytes transmitted */
  113. unsigned long rx_errors; /* bad packets received */
  114. unsigned long tx_errors; /* packet transmit problems */
  115. unsigned long rx_dropped; /* no space in linux buffers */
  116. unsigned long tx_dropped; /* no space available in linux */
  117. unsigned long multicast; /* multicast packets received */
  118. unsigned long collisions;
  119. /* detailed rx_errors: */
  120. unsigned long rx_length_errors;
  121. unsigned long rx_over_errors; /* receiver ring buff overflow */
  122. unsigned long rx_crc_errors; /* recved pkt with crc error */
  123. unsigned long rx_frame_errors; /* recv'd frame alignment error */
  124. unsigned long rx_fifo_errors; /* recv'r fifo overrun */
  125. unsigned long rx_missed_errors; /* receiver missed packet */
  126. /* detailed tx_errors */
  127. unsigned long tx_aborted_errors;
  128. unsigned long tx_carrier_errors;
  129. unsigned long tx_fifo_errors;
  130. unsigned long tx_heartbeat_errors;
  131. unsigned long tx_window_errors;
  132. /* for cslip etc */
  133. unsigned long rx_compressed;
  134. unsigned long tx_compressed;
  135. };
  136. /* Media selection options. */
  137. enum {
  138. IF_PORT_UNKNOWN = 0,
  139. IF_PORT_10BASE2,
  140. IF_PORT_10BASET,
  141. IF_PORT_AUI,
  142. IF_PORT_100BASET,
  143. IF_PORT_100BASETX,
  144. IF_PORT_100BASEFX
  145. };
  146. #ifdef __KERNEL__
  147. #include <linux/cache.h>
  148. #include <linux/skbuff.h>
  149. struct neighbour;
  150. struct neigh_parms;
  151. struct sk_buff;
  152. struct netif_rx_stats
  153. {
  154. unsigned total;
  155. unsigned dropped;
  156. unsigned time_squeeze;
  157. unsigned cpu_collision;
  158. };
  159. DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
  160. struct dev_addr_list
  161. {
  162. struct dev_addr_list *next;
  163. u8 da_addr[MAX_ADDR_LEN];
  164. u8 da_addrlen;
  165. u8 da_synced;
  166. int da_users;
  167. int da_gusers;
  168. };
  169. /*
  170. * We tag multicasts with these structures.
  171. */
  172. #define dev_mc_list dev_addr_list
  173. #define dmi_addr da_addr
  174. #define dmi_addrlen da_addrlen
  175. #define dmi_users da_users
  176. #define dmi_gusers da_gusers
  177. struct hh_cache
  178. {
  179. struct hh_cache *hh_next; /* Next entry */
  180. atomic_t hh_refcnt; /* number of users */
  181. /*
  182. * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
  183. * cache line on SMP.
  184. * They are mostly read, but hh_refcnt may be changed quite frequently,
  185. * incurring cache line ping pongs.
  186. */
  187. __be16 hh_type ____cacheline_aligned_in_smp;
  188. /* protocol identifier, f.e ETH_P_IP
  189. * NOTE: For VLANs, this will be the
  190. * encapuslated type. --BLG
  191. */
  192. u16 hh_len; /* length of header */
  193. int (*hh_output)(struct sk_buff *skb);
  194. seqlock_t hh_lock;
  195. /* cached hardware header; allow for machine alignment needs. */
  196. #define HH_DATA_MOD 16
  197. #define HH_DATA_OFF(__len) \
  198. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  199. #define HH_DATA_ALIGN(__len) \
  200. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  201. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  202. };
  203. /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  204. * Alternative is:
  205. * dev->hard_header_len ? (dev->hard_header_len +
  206. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  207. *
  208. * We could use other alignment values, but we must maintain the
  209. * relationship HH alignment <= LL alignment.
  210. */
  211. #define LL_RESERVED_SPACE(dev) \
  212. (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  213. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  214. ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  215. /* These flag bits are private to the generic network queueing
  216. * layer, they may not be explicitly referenced by any other
  217. * code.
  218. */
  219. enum netdev_state_t
  220. {
  221. __LINK_STATE_XOFF=0,
  222. __LINK_STATE_START,
  223. __LINK_STATE_PRESENT,
  224. __LINK_STATE_SCHED,
  225. __LINK_STATE_NOCARRIER,
  226. __LINK_STATE_RX_SCHED,
  227. __LINK_STATE_LINKWATCH_PENDING,
  228. __LINK_STATE_DORMANT,
  229. __LINK_STATE_QDISC_RUNNING,
  230. /* Set by the netpoll NAPI code */
  231. __LINK_STATE_POLL_LIST_FROZEN,
  232. };
  233. /*
  234. * This structure holds at boot time configured netdevice settings. They
  235. * are then used in the device probing.
  236. */
  237. struct netdev_boot_setup {
  238. char name[IFNAMSIZ];
  239. struct ifmap map;
  240. };
  241. #define NETDEV_BOOT_SETUP_MAX 8
  242. extern int __init netdev_boot_setup(char *str);
  243. /*
  244. * The DEVICE structure.
  245. * Actually, this whole structure is a big mistake. It mixes I/O
  246. * data with strictly "high-level" data, and it has to know about
  247. * almost every data structure used in the INET module.
  248. *
  249. * FIXME: cleanup struct net_device such that network protocol info
  250. * moves out.
  251. */
  252. struct net_device
  253. {
  254. /*
  255. * This is the first field of the "visible" part of this structure
  256. * (i.e. as seen by users in the "Space.c" file). It is the name
  257. * the interface.
  258. */
  259. char name[IFNAMSIZ];
  260. /* device name hash chain */
  261. struct hlist_node name_hlist;
  262. /*
  263. * I/O specific fields
  264. * FIXME: Merge these and struct ifmap into one
  265. */
  266. unsigned long mem_end; /* shared mem end */
  267. unsigned long mem_start; /* shared mem start */
  268. unsigned long base_addr; /* device I/O address */
  269. unsigned int irq; /* device IRQ number */
  270. /*
  271. * Some hardware also needs these fields, but they are not
  272. * part of the usual set specified in Space.c.
  273. */
  274. unsigned char if_port; /* Selectable AUI, TP,..*/
  275. unsigned char dma; /* DMA channel */
  276. unsigned long state;
  277. struct list_head dev_list;
  278. /* The device initialization function. Called only once. */
  279. int (*init)(struct net_device *dev);
  280. /* ------- Fields preinitialized in Space.c finish here ------- */
  281. /* Net device features */
  282. unsigned long features;
  283. #define NETIF_F_SG 1 /* Scatter/gather IO. */
  284. #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
  285. #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
  286. #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
  287. #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
  288. #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
  289. #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
  290. #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
  291. #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
  292. #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
  293. #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
  294. #define NETIF_F_GSO 2048 /* Enable software GSO. */
  295. #define NETIF_F_LLTX 4096 /* LockLess TX */
  296. #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
  297. /* Segmentation offload features */
  298. #define NETIF_F_GSO_SHIFT 16
  299. #define NETIF_F_GSO_MASK 0xffff0000
  300. #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  301. #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
  302. #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  303. #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
  304. #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
  305. /* List of features with software fallbacks. */
  306. #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
  307. #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  308. #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
  309. #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
  310. #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
  311. struct net_device *next_sched;
  312. /* Interface index. Unique device identifier */
  313. int ifindex;
  314. int iflink;
  315. struct net_device_stats* (*get_stats)(struct net_device *dev);
  316. struct net_device_stats stats;
  317. #ifdef CONFIG_WIRELESS_EXT
  318. /* List of functions to handle Wireless Extensions (instead of ioctl).
  319. * See <net/iw_handler.h> for details. Jean II */
  320. const struct iw_handler_def * wireless_handlers;
  321. /* Instance data managed by the core of Wireless Extensions. */
  322. struct iw_public_data * wireless_data;
  323. #endif
  324. const struct ethtool_ops *ethtool_ops;
  325. /*
  326. * This marks the end of the "visible" part of the structure. All
  327. * fields hereafter are internal to the system, and may change at
  328. * will (read: may be cleaned up at will).
  329. */
  330. unsigned int flags; /* interface flags (a la BSD) */
  331. unsigned short gflags;
  332. unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
  333. unsigned short padded; /* How much padding added by alloc_netdev() */
  334. unsigned char operstate; /* RFC2863 operstate */
  335. unsigned char link_mode; /* mapping policy to operstate */
  336. unsigned mtu; /* interface MTU value */
  337. unsigned short type; /* interface hardware type */
  338. unsigned short hard_header_len; /* hardware hdr length */
  339. struct net_device *master; /* Pointer to master device of a group,
  340. * which this device is member of.
  341. */
  342. /* Interface address info. */
  343. unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
  344. unsigned char addr_len; /* hardware address length */
  345. unsigned short dev_id; /* for shared network cards */
  346. struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
  347. int uc_count; /* Number of installed ucasts */
  348. int uc_promisc;
  349. struct dev_addr_list *mc_list; /* Multicast mac addresses */
  350. int mc_count; /* Number of installed mcasts */
  351. int promiscuity;
  352. int allmulti;
  353. /* Protocol specific pointers */
  354. void *atalk_ptr; /* AppleTalk link */
  355. void *ip_ptr; /* IPv4 specific data */
  356. void *dn_ptr; /* DECnet specific data */
  357. void *ip6_ptr; /* IPv6 specific data */
  358. void *ec_ptr; /* Econet specific data */
  359. void *ax25_ptr; /* AX.25 specific data */
  360. struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
  361. assign before registering */
  362. /*
  363. * Cache line mostly used on receive path (including eth_type_trans())
  364. */
  365. struct list_head poll_list ____cacheline_aligned_in_smp;
  366. /* Link to poll list */
  367. int (*poll) (struct net_device *dev, int *quota);
  368. int quota;
  369. int weight;
  370. unsigned long last_rx; /* Time of last Rx */
  371. /* Interface address info used in eth_type_trans() */
  372. unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
  373. because most packets are unicast) */
  374. unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
  375. /*
  376. * Cache line mostly used on queue transmit path (qdisc)
  377. */
  378. /* device queue lock */
  379. spinlock_t queue_lock ____cacheline_aligned_in_smp;
  380. struct Qdisc *qdisc;
  381. struct Qdisc *qdisc_sleeping;
  382. struct list_head qdisc_list;
  383. unsigned long tx_queue_len; /* Max frames per queue allowed */
  384. /* Partially transmitted GSO packet. */
  385. struct sk_buff *gso_skb;
  386. /* ingress path synchronizer */
  387. spinlock_t ingress_lock;
  388. struct Qdisc *qdisc_ingress;
  389. /*
  390. * One part is mostly used on xmit path (device)
  391. */
  392. /* hard_start_xmit synchronizer */
  393. spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
  394. /* cpu id of processor entered to hard_start_xmit or -1,
  395. if nobody entered there.
  396. */
  397. int xmit_lock_owner;
  398. void *priv; /* pointer to private data */
  399. int (*hard_start_xmit) (struct sk_buff *skb,
  400. struct net_device *dev);
  401. /* These may be needed for future network-power-down code. */
  402. unsigned long trans_start; /* Time (in jiffies) of last Tx */
  403. int watchdog_timeo; /* used by dev_watchdog() */
  404. struct timer_list watchdog_timer;
  405. /*
  406. * refcnt is a very hot point, so align it on SMP
  407. */
  408. /* Number of references to this device */
  409. atomic_t refcnt ____cacheline_aligned_in_smp;
  410. /* delayed register/unregister */
  411. struct list_head todo_list;
  412. /* device index hash chain */
  413. struct hlist_node index_hlist;
  414. struct net_device *link_watch_next;
  415. /* register/unregister state machine */
  416. enum { NETREG_UNINITIALIZED=0,
  417. NETREG_REGISTERED, /* completed register_netdevice */
  418. NETREG_UNREGISTERING, /* called unregister_netdevice */
  419. NETREG_UNREGISTERED, /* completed unregister todo */
  420. NETREG_RELEASED, /* called free_netdev */
  421. } reg_state;
  422. /* Called after device is detached from network. */
  423. void (*uninit)(struct net_device *dev);
  424. /* Called after last user reference disappears. */
  425. void (*destructor)(struct net_device *dev);
  426. /* Pointers to interface service routines. */
  427. int (*open)(struct net_device *dev);
  428. int (*stop)(struct net_device *dev);
  429. #define HAVE_NETDEV_POLL
  430. int (*hard_header) (struct sk_buff *skb,
  431. struct net_device *dev,
  432. unsigned short type,
  433. void *daddr,
  434. void *saddr,
  435. unsigned len);
  436. int (*rebuild_header)(struct sk_buff *skb);
  437. #define HAVE_CHANGE_RX_FLAGS
  438. void (*change_rx_flags)(struct net_device *dev,
  439. int flags);
  440. #define HAVE_SET_RX_MODE
  441. void (*set_rx_mode)(struct net_device *dev);
  442. #define HAVE_MULTICAST
  443. void (*set_multicast_list)(struct net_device *dev);
  444. #define HAVE_SET_MAC_ADDR
  445. int (*set_mac_address)(struct net_device *dev,
  446. void *addr);
  447. #define HAVE_PRIVATE_IOCTL
  448. int (*do_ioctl)(struct net_device *dev,
  449. struct ifreq *ifr, int cmd);
  450. #define HAVE_SET_CONFIG
  451. int (*set_config)(struct net_device *dev,
  452. struct ifmap *map);
  453. #define HAVE_HEADER_CACHE
  454. int (*hard_header_cache)(struct neighbour *neigh,
  455. struct hh_cache *hh);
  456. void (*header_cache_update)(struct hh_cache *hh,
  457. struct net_device *dev,
  458. unsigned char * haddr);
  459. #define HAVE_CHANGE_MTU
  460. int (*change_mtu)(struct net_device *dev, int new_mtu);
  461. #define HAVE_TX_TIMEOUT
  462. void (*tx_timeout) (struct net_device *dev);
  463. void (*vlan_rx_register)(struct net_device *dev,
  464. struct vlan_group *grp);
  465. void (*vlan_rx_add_vid)(struct net_device *dev,
  466. unsigned short vid);
  467. void (*vlan_rx_kill_vid)(struct net_device *dev,
  468. unsigned short vid);
  469. int (*hard_header_parse)(struct sk_buff *skb,
  470. unsigned char *haddr);
  471. int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
  472. #ifdef CONFIG_NETPOLL
  473. struct netpoll_info *npinfo;
  474. #endif
  475. #ifdef CONFIG_NET_POLL_CONTROLLER
  476. void (*poll_controller)(struct net_device *dev);
  477. #endif
  478. /* bridge stuff */
  479. struct net_bridge_port *br_port;
  480. /* macvlan */
  481. struct macvlan_port *macvlan_port;
  482. /* class/net/name entry */
  483. struct device dev;
  484. /* space for optional statistics and wireless sysfs groups */
  485. struct attribute_group *sysfs_groups[3];
  486. /* rtnetlink link ops */
  487. const struct rtnl_link_ops *rtnl_link_ops;
  488. /* The TX queue control structures */
  489. unsigned int egress_subqueue_count;
  490. struct net_device_subqueue egress_subqueue[0];
  491. };
  492. #define to_net_dev(d) container_of(d, struct net_device, dev)
  493. #define NETDEV_ALIGN 32
  494. #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
  495. static inline void *netdev_priv(const struct net_device *dev)
  496. {
  497. return dev->priv;
  498. }
  499. #define SET_MODULE_OWNER(dev) do { } while (0)
  500. /* Set the sysfs physical device reference for the network logical device
  501. * if set prior to registration will cause a symlink during initialization.
  502. */
  503. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  504. struct packet_type {
  505. __be16 type; /* This is really htons(ether_type). */
  506. struct net_device *dev; /* NULL is wildcarded here */
  507. int (*func) (struct sk_buff *,
  508. struct net_device *,
  509. struct packet_type *,
  510. struct net_device *);
  511. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  512. int features);
  513. int (*gso_send_check)(struct sk_buff *skb);
  514. void *af_packet_priv;
  515. struct list_head list;
  516. };
  517. #include <linux/interrupt.h>
  518. #include <linux/notifier.h>
  519. extern struct net_device loopback_dev; /* The loopback */
  520. extern struct list_head dev_base_head; /* All devices */
  521. extern rwlock_t dev_base_lock; /* Device list lock */
  522. #define for_each_netdev(d) \
  523. list_for_each_entry(d, &dev_base_head, dev_list)
  524. #define for_each_netdev_safe(d, n) \
  525. list_for_each_entry_safe(d, n, &dev_base_head, dev_list)
  526. #define for_each_netdev_continue(d) \
  527. list_for_each_entry_continue(d, &dev_base_head, dev_list)
  528. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  529. static inline struct net_device *next_net_device(struct net_device *dev)
  530. {
  531. struct list_head *lh;
  532. lh = dev->dev_list.next;
  533. return lh == &dev_base_head ? NULL : net_device_entry(lh);
  534. }
  535. static inline struct net_device *first_net_device(void)
  536. {
  537. return list_empty(&dev_base_head) ? NULL :
  538. net_device_entry(dev_base_head.next);
  539. }
  540. extern int netdev_boot_setup_check(struct net_device *dev);
  541. extern unsigned long netdev_boot_base(const char *prefix, int unit);
  542. extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr);
  543. extern struct net_device *dev_getfirstbyhwtype(unsigned short type);
  544. extern struct net_device *__dev_getfirstbyhwtype(unsigned short type);
  545. extern void dev_add_pack(struct packet_type *pt);
  546. extern void dev_remove_pack(struct packet_type *pt);
  547. extern void __dev_remove_pack(struct packet_type *pt);
  548. extern struct net_device *dev_get_by_flags(unsigned short flags,
  549. unsigned short mask);
  550. extern struct net_device *dev_get_by_name(const char *name);
  551. extern struct net_device *__dev_get_by_name(const char *name);
  552. extern int dev_alloc_name(struct net_device *dev, const char *name);
  553. extern int dev_open(struct net_device *dev);
  554. extern int dev_close(struct net_device *dev);
  555. extern int dev_queue_xmit(struct sk_buff *skb);
  556. extern int register_netdevice(struct net_device *dev);
  557. extern void unregister_netdevice(struct net_device *dev);
  558. extern void free_netdev(struct net_device *dev);
  559. extern void synchronize_net(void);
  560. extern int register_netdevice_notifier(struct notifier_block *nb);
  561. extern int unregister_netdevice_notifier(struct notifier_block *nb);
  562. extern int call_netdevice_notifiers(unsigned long val, void *v);
  563. extern struct net_device *dev_get_by_index(int ifindex);
  564. extern struct net_device *__dev_get_by_index(int ifindex);
  565. extern int dev_restart(struct net_device *dev);
  566. #ifdef CONFIG_NETPOLL_TRAP
  567. extern int netpoll_trap(void);
  568. #endif
  569. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  570. extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
  571. static inline int unregister_gifconf(unsigned int family)
  572. {
  573. return register_gifconf(family, NULL);
  574. }
  575. /*
  576. * Incoming packets are placed on per-cpu queues so that
  577. * no locking is needed.
  578. */
  579. struct softnet_data
  580. {
  581. struct net_device *output_queue;
  582. struct sk_buff_head input_pkt_queue;
  583. struct list_head poll_list;
  584. struct sk_buff *completion_queue;
  585. struct net_device backlog_dev; /* Sorry. 8) */
  586. #ifdef CONFIG_NET_DMA
  587. struct dma_chan *net_dma;
  588. #endif
  589. };
  590. DECLARE_PER_CPU(struct softnet_data,softnet_data);
  591. #define HAVE_NETIF_QUEUE
  592. extern void __netif_schedule(struct net_device *dev);
  593. static inline void netif_schedule(struct net_device *dev)
  594. {
  595. if (!test_bit(__LINK_STATE_XOFF, &dev->state))
  596. __netif_schedule(dev);
  597. }
  598. static inline void netif_start_queue(struct net_device *dev)
  599. {
  600. clear_bit(__LINK_STATE_XOFF, &dev->state);
  601. }
  602. static inline void netif_wake_queue(struct net_device *dev)
  603. {
  604. #ifdef CONFIG_NETPOLL_TRAP
  605. if (netpoll_trap()) {
  606. clear_bit(__LINK_STATE_XOFF, &dev->state);
  607. return;
  608. }
  609. #endif
  610. if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
  611. __netif_schedule(dev);
  612. }
  613. static inline void netif_stop_queue(struct net_device *dev)
  614. {
  615. set_bit(__LINK_STATE_XOFF, &dev->state);
  616. }
  617. static inline int netif_queue_stopped(const struct net_device *dev)
  618. {
  619. return test_bit(__LINK_STATE_XOFF, &dev->state);
  620. }
  621. static inline int netif_running(const struct net_device *dev)
  622. {
  623. return test_bit(__LINK_STATE_START, &dev->state);
  624. }
  625. /*
  626. * Routines to manage the subqueues on a device. We only need start
  627. * stop, and a check if it's stopped. All other device management is
  628. * done at the overall netdevice level.
  629. * Also test the device if we're multiqueue.
  630. */
  631. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  632. {
  633. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  634. clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
  635. #endif
  636. }
  637. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  638. {
  639. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  640. #ifdef CONFIG_NETPOLL_TRAP
  641. if (netpoll_trap())
  642. return;
  643. #endif
  644. set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
  645. #endif
  646. }
  647. static inline int netif_subqueue_stopped(const struct net_device *dev,
  648. u16 queue_index)
  649. {
  650. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  651. return test_bit(__LINK_STATE_XOFF,
  652. &dev->egress_subqueue[queue_index].state);
  653. #else
  654. return 0;
  655. #endif
  656. }
  657. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  658. {
  659. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  660. #ifdef CONFIG_NETPOLL_TRAP
  661. if (netpoll_trap())
  662. return;
  663. #endif
  664. if (test_and_clear_bit(__LINK_STATE_XOFF,
  665. &dev->egress_subqueue[queue_index].state))
  666. __netif_schedule(dev);
  667. #endif
  668. }
  669. static inline int netif_is_multiqueue(const struct net_device *dev)
  670. {
  671. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  672. return (!!(NETIF_F_MULTI_QUEUE & dev->features));
  673. #else
  674. return 0;
  675. #endif
  676. }
  677. /* Use this variant when it is known for sure that it
  678. * is executing from interrupt context.
  679. */
  680. static inline void dev_kfree_skb_irq(struct sk_buff *skb)
  681. {
  682. if (atomic_dec_and_test(&skb->users)) {
  683. struct softnet_data *sd;
  684. unsigned long flags;
  685. local_irq_save(flags);
  686. sd = &__get_cpu_var(softnet_data);
  687. skb->next = sd->completion_queue;
  688. sd->completion_queue = skb;
  689. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  690. local_irq_restore(flags);
  691. }
  692. }
  693. /* Use this variant in places where it could be invoked
  694. * either from interrupt or non-interrupt context.
  695. */
  696. extern void dev_kfree_skb_any(struct sk_buff *skb);
  697. #define HAVE_NETIF_RX 1
  698. extern int netif_rx(struct sk_buff *skb);
  699. extern int netif_rx_ni(struct sk_buff *skb);
  700. #define HAVE_NETIF_RECEIVE_SKB 1
  701. extern int netif_receive_skb(struct sk_buff *skb);
  702. extern int dev_valid_name(const char *name);
  703. extern int dev_ioctl(unsigned int cmd, void __user *);
  704. extern int dev_ethtool(struct ifreq *);
  705. extern unsigned dev_get_flags(const struct net_device *);
  706. extern int dev_change_flags(struct net_device *, unsigned);
  707. extern int dev_change_name(struct net_device *, char *);
  708. extern int dev_set_mtu(struct net_device *, int);
  709. extern int dev_set_mac_address(struct net_device *,
  710. struct sockaddr *);
  711. extern int dev_hard_start_xmit(struct sk_buff *skb,
  712. struct net_device *dev);
  713. extern void dev_init(void);
  714. extern int netdev_budget;
  715. /* Called by rtnetlink.c:rtnl_unlock() */
  716. extern void netdev_run_todo(void);
  717. static inline void dev_put(struct net_device *dev)
  718. {
  719. atomic_dec(&dev->refcnt);
  720. }
  721. static inline void dev_hold(struct net_device *dev)
  722. {
  723. atomic_inc(&dev->refcnt);
  724. }
  725. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  726. * and _off may be called from IRQ context, but it is caller
  727. * who is responsible for serialization of these calls.
  728. *
  729. * The name carrier is inappropriate, these functions should really be
  730. * called netif_lowerlayer_*() because they represent the state of any
  731. * kind of lower layer not just hardware media.
  732. */
  733. extern void linkwatch_fire_event(struct net_device *dev);
  734. static inline int netif_carrier_ok(const struct net_device *dev)
  735. {
  736. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  737. }
  738. extern void __netdev_watchdog_up(struct net_device *dev);
  739. extern void netif_carrier_on(struct net_device *dev);
  740. extern void netif_carrier_off(struct net_device *dev);
  741. static inline void netif_dormant_on(struct net_device *dev)
  742. {
  743. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  744. linkwatch_fire_event(dev);
  745. }
  746. static inline void netif_dormant_off(struct net_device *dev)
  747. {
  748. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  749. linkwatch_fire_event(dev);
  750. }
  751. static inline int netif_dormant(const struct net_device *dev)
  752. {
  753. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  754. }
  755. static inline int netif_oper_up(const struct net_device *dev) {
  756. return (dev->operstate == IF_OPER_UP ||
  757. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  758. }
  759. /* Hot-plugging. */
  760. static inline int netif_device_present(struct net_device *dev)
  761. {
  762. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  763. }
  764. extern void netif_device_detach(struct net_device *dev);
  765. extern void netif_device_attach(struct net_device *dev);
  766. /*
  767. * Network interface message level settings
  768. */
  769. #define HAVE_NETIF_MSG 1
  770. enum {
  771. NETIF_MSG_DRV = 0x0001,
  772. NETIF_MSG_PROBE = 0x0002,
  773. NETIF_MSG_LINK = 0x0004,
  774. NETIF_MSG_TIMER = 0x0008,
  775. NETIF_MSG_IFDOWN = 0x0010,
  776. NETIF_MSG_IFUP = 0x0020,
  777. NETIF_MSG_RX_ERR = 0x0040,
  778. NETIF_MSG_TX_ERR = 0x0080,
  779. NETIF_MSG_TX_QUEUED = 0x0100,
  780. NETIF_MSG_INTR = 0x0200,
  781. NETIF_MSG_TX_DONE = 0x0400,
  782. NETIF_MSG_RX_STATUS = 0x0800,
  783. NETIF_MSG_PKTDATA = 0x1000,
  784. NETIF_MSG_HW = 0x2000,
  785. NETIF_MSG_WOL = 0x4000,
  786. };
  787. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  788. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  789. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  790. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  791. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  792. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  793. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  794. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  795. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  796. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  797. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  798. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  799. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  800. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  801. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  802. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  803. {
  804. /* use default */
  805. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  806. return default_msg_enable_bits;
  807. if (debug_value == 0) /* no output */
  808. return 0;
  809. /* set low N bits */
  810. return (1 << debug_value) - 1;
  811. }
  812. /* Test if receive needs to be scheduled */
  813. static inline int __netif_rx_schedule_prep(struct net_device *dev)
  814. {
  815. return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
  816. }
  817. /* Test if receive needs to be scheduled but only if up */
  818. static inline int netif_rx_schedule_prep(struct net_device *dev)
  819. {
  820. return netif_running(dev) && __netif_rx_schedule_prep(dev);
  821. }
  822. /* Add interface to tail of rx poll list. This assumes that _prep has
  823. * already been called and returned 1.
  824. */
  825. extern void __netif_rx_schedule(struct net_device *dev);
  826. /* Try to reschedule poll. Called by irq handler. */
  827. static inline void netif_rx_schedule(struct net_device *dev)
  828. {
  829. if (netif_rx_schedule_prep(dev))
  830. __netif_rx_schedule(dev);
  831. }
  832. /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete().
  833. * Do not inline this?
  834. */
  835. static inline int netif_rx_reschedule(struct net_device *dev, int undo)
  836. {
  837. if (netif_rx_schedule_prep(dev)) {
  838. unsigned long flags;
  839. dev->quota += undo;
  840. local_irq_save(flags);
  841. list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
  842. __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  843. local_irq_restore(flags);
  844. return 1;
  845. }
  846. return 0;
  847. }
  848. /* same as netif_rx_complete, except that local_irq_save(flags)
  849. * has already been issued
  850. */
  851. static inline void __netif_rx_complete(struct net_device *dev)
  852. {
  853. BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
  854. list_del(&dev->poll_list);
  855. smp_mb__before_clear_bit();
  856. clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
  857. }
  858. /* Remove interface from poll list: it must be in the poll list
  859. * on current cpu. This primitive is called by dev->poll(), when
  860. * it completes the work. The device cannot be out of poll list at this
  861. * moment, it is BUG().
  862. */
  863. static inline void netif_rx_complete(struct net_device *dev)
  864. {
  865. unsigned long flags;
  866. #ifdef CONFIG_NETPOLL
  867. /* Prevent race with netpoll - yes, this is a kludge.
  868. * But at least it doesn't penalize the non-netpoll
  869. * code path. */
  870. if (test_bit(__LINK_STATE_POLL_LIST_FROZEN, &dev->state))
  871. return;
  872. #endif
  873. local_irq_save(flags);
  874. __netif_rx_complete(dev);
  875. local_irq_restore(flags);
  876. }
  877. static inline void netif_poll_disable(struct net_device *dev)
  878. {
  879. while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state))
  880. /* No hurry. */
  881. schedule_timeout_interruptible(1);
  882. }
  883. static inline void netif_poll_enable(struct net_device *dev)
  884. {
  885. smp_mb__before_clear_bit();
  886. clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
  887. }
  888. static inline void netif_tx_lock(struct net_device *dev)
  889. {
  890. spin_lock(&dev->_xmit_lock);
  891. dev->xmit_lock_owner = smp_processor_id();
  892. }
  893. static inline void netif_tx_lock_bh(struct net_device *dev)
  894. {
  895. spin_lock_bh(&dev->_xmit_lock);
  896. dev->xmit_lock_owner = smp_processor_id();
  897. }
  898. static inline int netif_tx_trylock(struct net_device *dev)
  899. {
  900. int ok = spin_trylock(&dev->_xmit_lock);
  901. if (likely(ok))
  902. dev->xmit_lock_owner = smp_processor_id();
  903. return ok;
  904. }
  905. static inline void netif_tx_unlock(struct net_device *dev)
  906. {
  907. dev->xmit_lock_owner = -1;
  908. spin_unlock(&dev->_xmit_lock);
  909. }
  910. static inline void netif_tx_unlock_bh(struct net_device *dev)
  911. {
  912. dev->xmit_lock_owner = -1;
  913. spin_unlock_bh(&dev->_xmit_lock);
  914. }
  915. static inline void netif_tx_disable(struct net_device *dev)
  916. {
  917. netif_tx_lock_bh(dev);
  918. netif_stop_queue(dev);
  919. netif_tx_unlock_bh(dev);
  920. }
  921. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  922. extern void ether_setup(struct net_device *dev);
  923. /* Support for loadable net-drivers */
  924. extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
  925. void (*setup)(struct net_device *),
  926. unsigned int queue_count);
  927. #define alloc_netdev(sizeof_priv, name, setup) \
  928. alloc_netdev_mq(sizeof_priv, name, setup, 1)
  929. extern int register_netdev(struct net_device *dev);
  930. extern void unregister_netdev(struct net_device *dev);
  931. /* Functions used for secondary unicast and multicast support */
  932. extern void dev_set_rx_mode(struct net_device *dev);
  933. extern void __dev_set_rx_mode(struct net_device *dev);
  934. extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
  935. extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
  936. extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
  937. extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
  938. extern int dev_mc_sync(struct net_device *to, struct net_device *from);
  939. extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  940. extern void dev_mc_discard(struct net_device *dev);
  941. extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
  942. extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
  943. extern void __dev_addr_discard(struct dev_addr_list **list);
  944. extern void dev_set_promiscuity(struct net_device *dev, int inc);
  945. extern void dev_set_allmulti(struct net_device *dev, int inc);
  946. extern void netdev_state_change(struct net_device *dev);
  947. extern void netdev_features_change(struct net_device *dev);
  948. /* Load a device via the kmod */
  949. extern void dev_load(const char *name);
  950. extern void dev_mcast_init(void);
  951. extern int netdev_max_backlog;
  952. extern int weight_p;
  953. extern int netdev_set_master(struct net_device *dev, struct net_device *master);
  954. extern int skb_checksum_help(struct sk_buff *skb);
  955. extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  956. #ifdef CONFIG_BUG
  957. extern void netdev_rx_csum_fault(struct net_device *dev);
  958. #else
  959. static inline void netdev_rx_csum_fault(struct net_device *dev)
  960. {
  961. }
  962. #endif
  963. /* rx skb timestamps */
  964. extern void net_enable_timestamp(void);
  965. extern void net_disable_timestamp(void);
  966. #ifdef CONFIG_PROC_FS
  967. extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  968. extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  969. extern void dev_seq_stop(struct seq_file *seq, void *v);
  970. #endif
  971. extern void linkwatch_run_queue(void);
  972. static inline int net_gso_ok(int features, int gso_type)
  973. {
  974. int feature = gso_type << NETIF_F_GSO_SHIFT;
  975. return (features & feature) == feature;
  976. }
  977. static inline int skb_gso_ok(struct sk_buff *skb, int features)
  978. {
  979. return net_gso_ok(features, skb_shinfo(skb)->gso_type);
  980. }
  981. static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  982. {
  983. return skb_is_gso(skb) &&
  984. (!skb_gso_ok(skb, dev->features) ||
  985. unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
  986. }
  987. /* On bonding slaves other than the currently active slave, suppress
  988. * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
  989. * ARP on active-backup slaves with arp_validate enabled.
  990. */
  991. static inline int skb_bond_should_drop(struct sk_buff *skb)
  992. {
  993. struct net_device *dev = skb->dev;
  994. struct net_device *master = dev->master;
  995. if (master &&
  996. (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
  997. if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
  998. skb->protocol == __constant_htons(ETH_P_ARP))
  999. return 0;
  1000. if (master->priv_flags & IFF_MASTER_ALB) {
  1001. if (skb->pkt_type != PACKET_BROADCAST &&
  1002. skb->pkt_type != PACKET_MULTICAST)
  1003. return 0;
  1004. }
  1005. if (master->priv_flags & IFF_MASTER_8023AD &&
  1006. skb->protocol == __constant_htons(ETH_P_SLOW))
  1007. return 0;
  1008. return 1;
  1009. }
  1010. return 0;
  1011. }
  1012. #endif /* __KERNEL__ */
  1013. #endif /* _LINUX_DEV_H */