netdevice.h 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <Alan.Cox@linux.org>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/if.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_packet.h>
  30. #ifdef __KERNEL__
  31. #include <linux/timer.h>
  32. #include <linux/delay.h>
  33. #include <asm/atomic.h>
  34. #include <asm/cache.h>
  35. #include <asm/byteorder.h>
  36. #include <linux/device.h>
  37. #include <linux/percpu.h>
  38. #include <linux/dmaengine.h>
  39. #include <linux/workqueue.h>
  40. #include <net/net_namespace.h>
  41. struct vlan_group;
  42. struct ethtool_ops;
  43. struct netpoll_info;
  44. /* 802.11 specific */
  45. struct wireless_dev;
  46. /* source back-compat hooks */
  47. #define SET_ETHTOOL_OPS(netdev,ops) \
  48. ( (netdev)->ethtool_ops = (ops) )
  49. #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
  50. functions are available. */
  51. #define HAVE_FREE_NETDEV /* free_netdev() */
  52. #define HAVE_NETDEV_PRIV /* netdev_priv() */
  53. #define NET_XMIT_SUCCESS 0
  54. #define NET_XMIT_DROP 1 /* skb dropped */
  55. #define NET_XMIT_CN 2 /* congestion notification */
  56. #define NET_XMIT_POLICED 3 /* skb is shot by police */
  57. #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
  58. (TC use only - dev_queue_xmit
  59. returns this as NET_XMIT_SUCCESS) */
  60. /* Backlog congestion levels */
  61. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  62. #define NET_RX_DROP 1 /* packet dropped */
  63. #define NET_RX_CN_LOW 2 /* storm alert, just in case */
  64. #define NET_RX_CN_MOD 3 /* Storm on its way! */
  65. #define NET_RX_CN_HIGH 4 /* The storm is here */
  66. #define NET_RX_BAD 5 /* packet dropped due to kernel error */
  67. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  68. * indicates that the device will soon be dropping packets, or already drops
  69. * some packets of the same priority; prompting us to send less aggressively. */
  70. #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
  71. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  72. #endif
  73. #define MAX_ADDR_LEN 32 /* Largest hardware address length */
  74. /* Driver transmit return codes */
  75. #define NETDEV_TX_OK 0 /* driver took care of packet */
  76. #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
  77. #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
  78. /*
  79. * Compute the worst case header length according to the protocols
  80. * used.
  81. */
  82. #if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
  83. #define LL_MAX_HEADER 32
  84. #else
  85. #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
  86. #define LL_MAX_HEADER 96
  87. #else
  88. #define LL_MAX_HEADER 48
  89. #endif
  90. #endif
  91. #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
  92. !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
  93. !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
  94. !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
  95. #define MAX_HEADER LL_MAX_HEADER
  96. #else
  97. #define MAX_HEADER (LL_MAX_HEADER + 48)
  98. #endif
  99. struct net_device_subqueue
  100. {
  101. /* Give a control state for each queue. This struct may contain
  102. * per-queue locks in the future.
  103. */
  104. unsigned long state;
  105. };
  106. /*
  107. * Network device statistics. Akin to the 2.0 ether stats but
  108. * with byte counters.
  109. */
  110. struct net_device_stats
  111. {
  112. unsigned long rx_packets; /* total packets received */
  113. unsigned long tx_packets; /* total packets transmitted */
  114. unsigned long rx_bytes; /* total bytes received */
  115. unsigned long tx_bytes; /* total bytes transmitted */
  116. unsigned long rx_errors; /* bad packets received */
  117. unsigned long tx_errors; /* packet transmit problems */
  118. unsigned long rx_dropped; /* no space in linux buffers */
  119. unsigned long tx_dropped; /* no space available in linux */
  120. unsigned long multicast; /* multicast packets received */
  121. unsigned long collisions;
  122. /* detailed rx_errors: */
  123. unsigned long rx_length_errors;
  124. unsigned long rx_over_errors; /* receiver ring buff overflow */
  125. unsigned long rx_crc_errors; /* recved pkt with crc error */
  126. unsigned long rx_frame_errors; /* recv'd frame alignment error */
  127. unsigned long rx_fifo_errors; /* recv'r fifo overrun */
  128. unsigned long rx_missed_errors; /* receiver missed packet */
  129. /* detailed tx_errors */
  130. unsigned long tx_aborted_errors;
  131. unsigned long tx_carrier_errors;
  132. unsigned long tx_fifo_errors;
  133. unsigned long tx_heartbeat_errors;
  134. unsigned long tx_window_errors;
  135. /* for cslip etc */
  136. unsigned long rx_compressed;
  137. unsigned long tx_compressed;
  138. };
  139. /* Media selection options. */
  140. enum {
  141. IF_PORT_UNKNOWN = 0,
  142. IF_PORT_10BASE2,
  143. IF_PORT_10BASET,
  144. IF_PORT_AUI,
  145. IF_PORT_100BASET,
  146. IF_PORT_100BASETX,
  147. IF_PORT_100BASEFX
  148. };
  149. #ifdef __KERNEL__
  150. #include <linux/cache.h>
  151. #include <linux/skbuff.h>
  152. struct neighbour;
  153. struct neigh_parms;
  154. struct sk_buff;
  155. struct netif_rx_stats
  156. {
  157. unsigned total;
  158. unsigned dropped;
  159. unsigned time_squeeze;
  160. unsigned cpu_collision;
  161. };
  162. DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
  163. struct dev_addr_list
  164. {
  165. struct dev_addr_list *next;
  166. u8 da_addr[MAX_ADDR_LEN];
  167. u8 da_addrlen;
  168. u8 da_synced;
  169. int da_users;
  170. int da_gusers;
  171. };
  172. /*
  173. * We tag multicasts with these structures.
  174. */
  175. #define dev_mc_list dev_addr_list
  176. #define dmi_addr da_addr
  177. #define dmi_addrlen da_addrlen
  178. #define dmi_users da_users
  179. #define dmi_gusers da_gusers
  180. struct hh_cache
  181. {
  182. struct hh_cache *hh_next; /* Next entry */
  183. atomic_t hh_refcnt; /* number of users */
  184. /*
  185. * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
  186. * cache line on SMP.
  187. * They are mostly read, but hh_refcnt may be changed quite frequently,
  188. * incurring cache line ping pongs.
  189. */
  190. __be16 hh_type ____cacheline_aligned_in_smp;
  191. /* protocol identifier, f.e ETH_P_IP
  192. * NOTE: For VLANs, this will be the
  193. * encapuslated type. --BLG
  194. */
  195. u16 hh_len; /* length of header */
  196. int (*hh_output)(struct sk_buff *skb);
  197. seqlock_t hh_lock;
  198. /* cached hardware header; allow for machine alignment needs. */
  199. #define HH_DATA_MOD 16
  200. #define HH_DATA_OFF(__len) \
  201. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  202. #define HH_DATA_ALIGN(__len) \
  203. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  204. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  205. };
  206. /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  207. * Alternative is:
  208. * dev->hard_header_len ? (dev->hard_header_len +
  209. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  210. *
  211. * We could use other alignment values, but we must maintain the
  212. * relationship HH alignment <= LL alignment.
  213. */
  214. #define LL_RESERVED_SPACE(dev) \
  215. (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  216. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  217. ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  218. /* These flag bits are private to the generic network queueing
  219. * layer, they may not be explicitly referenced by any other
  220. * code.
  221. */
  222. enum netdev_state_t
  223. {
  224. __LINK_STATE_XOFF=0,
  225. __LINK_STATE_START,
  226. __LINK_STATE_PRESENT,
  227. __LINK_STATE_SCHED,
  228. __LINK_STATE_NOCARRIER,
  229. __LINK_STATE_LINKWATCH_PENDING,
  230. __LINK_STATE_DORMANT,
  231. __LINK_STATE_QDISC_RUNNING,
  232. };
  233. /*
  234. * This structure holds at boot time configured netdevice settings. They
  235. * are then used in the device probing.
  236. */
  237. struct netdev_boot_setup {
  238. char name[IFNAMSIZ];
  239. struct ifmap map;
  240. };
  241. #define NETDEV_BOOT_SETUP_MAX 8
  242. extern int __init netdev_boot_setup(char *str);
  243. /*
  244. * Structure for NAPI scheduling similar to tasklet but with weighting
  245. */
  246. struct napi_struct {
  247. /* The poll_list must only be managed by the entity which
  248. * changes the state of the NAPI_STATE_SCHED bit. This means
  249. * whoever atomically sets that bit can add this napi_struct
  250. * to the per-cpu poll_list, and whoever clears that bit
  251. * can remove from the list right before clearing the bit.
  252. */
  253. struct list_head poll_list;
  254. unsigned long state;
  255. int weight;
  256. int (*poll)(struct napi_struct *, int);
  257. #ifdef CONFIG_NETPOLL
  258. spinlock_t poll_lock;
  259. int poll_owner;
  260. struct net_device *dev;
  261. struct list_head dev_list;
  262. #endif
  263. };
  264. enum
  265. {
  266. NAPI_STATE_SCHED, /* Poll is scheduled */
  267. };
  268. extern void FASTCALL(__napi_schedule(struct napi_struct *n));
  269. /**
  270. * napi_schedule_prep - check if napi can be scheduled
  271. * @n: napi context
  272. *
  273. * Test if NAPI routine is already running, and if not mark
  274. * it as running. This is used as a condition variable
  275. * insure only one NAPI poll instance runs
  276. */
  277. static inline int napi_schedule_prep(struct napi_struct *n)
  278. {
  279. return !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  280. }
  281. /**
  282. * napi_schedule - schedule NAPI poll
  283. * @n: napi context
  284. *
  285. * Schedule NAPI poll routine to be called if it is not already
  286. * running.
  287. */
  288. static inline void napi_schedule(struct napi_struct *n)
  289. {
  290. if (napi_schedule_prep(n))
  291. __napi_schedule(n);
  292. }
  293. /**
  294. * napi_complete - NAPI processing complete
  295. * @n: napi context
  296. *
  297. * Mark NAPI processing as complete.
  298. */
  299. static inline void __napi_complete(struct napi_struct *n)
  300. {
  301. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  302. list_del(&n->poll_list);
  303. smp_mb__before_clear_bit();
  304. clear_bit(NAPI_STATE_SCHED, &n->state);
  305. }
  306. static inline void napi_complete(struct napi_struct *n)
  307. {
  308. local_irq_disable();
  309. __napi_complete(n);
  310. local_irq_enable();
  311. }
  312. /**
  313. * napi_disable - prevent NAPI from scheduling
  314. * @n: napi context
  315. *
  316. * Stop NAPI from being scheduled on this context.
  317. * Waits till any outstanding processing completes.
  318. */
  319. static inline void napi_disable(struct napi_struct *n)
  320. {
  321. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  322. msleep_interruptible(1);
  323. }
  324. /**
  325. * napi_enable - enable NAPI scheduling
  326. * @n: napi context
  327. *
  328. * Resume NAPI from being scheduled on this context.
  329. * Must be paired with napi_disable.
  330. */
  331. static inline void napi_enable(struct napi_struct *n)
  332. {
  333. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  334. smp_mb__before_clear_bit();
  335. clear_bit(NAPI_STATE_SCHED, &n->state);
  336. }
  337. /*
  338. * The DEVICE structure.
  339. * Actually, this whole structure is a big mistake. It mixes I/O
  340. * data with strictly "high-level" data, and it has to know about
  341. * almost every data structure used in the INET module.
  342. *
  343. * FIXME: cleanup struct net_device such that network protocol info
  344. * moves out.
  345. */
  346. struct net_device
  347. {
  348. /*
  349. * This is the first field of the "visible" part of this structure
  350. * (i.e. as seen by users in the "Space.c" file). It is the name
  351. * the interface.
  352. */
  353. char name[IFNAMSIZ];
  354. /* device name hash chain */
  355. struct hlist_node name_hlist;
  356. /*
  357. * I/O specific fields
  358. * FIXME: Merge these and struct ifmap into one
  359. */
  360. unsigned long mem_end; /* shared mem end */
  361. unsigned long mem_start; /* shared mem start */
  362. unsigned long base_addr; /* device I/O address */
  363. unsigned int irq; /* device IRQ number */
  364. /*
  365. * Some hardware also needs these fields, but they are not
  366. * part of the usual set specified in Space.c.
  367. */
  368. unsigned char if_port; /* Selectable AUI, TP,..*/
  369. unsigned char dma; /* DMA channel */
  370. unsigned long state;
  371. struct list_head dev_list;
  372. #ifdef CONFIG_NETPOLL
  373. struct list_head napi_list;
  374. #endif
  375. /* The device initialization function. Called only once. */
  376. int (*init)(struct net_device *dev);
  377. /* ------- Fields preinitialized in Space.c finish here ------- */
  378. /* Net device features */
  379. unsigned long features;
  380. #define NETIF_F_SG 1 /* Scatter/gather IO. */
  381. #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
  382. #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
  383. #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
  384. #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
  385. #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
  386. #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
  387. #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
  388. #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
  389. #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
  390. #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
  391. #define NETIF_F_GSO 2048 /* Enable software GSO. */
  392. #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
  393. /* do not use LLTX in new drivers */
  394. #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
  395. #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
  396. #define NETIF_F_LRO 32768 /* large receive offload */
  397. /* Segmentation offload features */
  398. #define NETIF_F_GSO_SHIFT 16
  399. #define NETIF_F_GSO_MASK 0xffff0000
  400. #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  401. #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
  402. #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  403. #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
  404. #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
  405. /* List of features with software fallbacks. */
  406. #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
  407. #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  408. #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
  409. #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
  410. #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
  411. struct net_device *next_sched;
  412. /* Interface index. Unique device identifier */
  413. int ifindex;
  414. int iflink;
  415. struct net_device_stats* (*get_stats)(struct net_device *dev);
  416. struct net_device_stats stats;
  417. #ifdef CONFIG_WIRELESS_EXT
  418. /* List of functions to handle Wireless Extensions (instead of ioctl).
  419. * See <net/iw_handler.h> for details. Jean II */
  420. const struct iw_handler_def * wireless_handlers;
  421. /* Instance data managed by the core of Wireless Extensions. */
  422. struct iw_public_data * wireless_data;
  423. #endif
  424. const struct ethtool_ops *ethtool_ops;
  425. /*
  426. * This marks the end of the "visible" part of the structure. All
  427. * fields hereafter are internal to the system, and may change at
  428. * will (read: may be cleaned up at will).
  429. */
  430. unsigned int flags; /* interface flags (a la BSD) */
  431. unsigned short gflags;
  432. unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
  433. unsigned short padded; /* How much padding added by alloc_netdev() */
  434. unsigned char operstate; /* RFC2863 operstate */
  435. unsigned char link_mode; /* mapping policy to operstate */
  436. unsigned mtu; /* interface MTU value */
  437. unsigned short type; /* interface hardware type */
  438. unsigned short hard_header_len; /* hardware hdr length */
  439. struct net_device *master; /* Pointer to master device of a group,
  440. * which this device is member of.
  441. */
  442. /* Interface address info. */
  443. unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
  444. unsigned char addr_len; /* hardware address length */
  445. unsigned short dev_id; /* for shared network cards */
  446. struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
  447. int uc_count; /* Number of installed ucasts */
  448. int uc_promisc;
  449. struct dev_addr_list *mc_list; /* Multicast mac addresses */
  450. int mc_count; /* Number of installed mcasts */
  451. int promiscuity;
  452. int allmulti;
  453. /* Protocol specific pointers */
  454. void *atalk_ptr; /* AppleTalk link */
  455. void *ip_ptr; /* IPv4 specific data */
  456. void *dn_ptr; /* DECnet specific data */
  457. void *ip6_ptr; /* IPv6 specific data */
  458. void *ec_ptr; /* Econet specific data */
  459. void *ax25_ptr; /* AX.25 specific data */
  460. struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
  461. assign before registering */
  462. /*
  463. * Cache line mostly used on receive path (including eth_type_trans())
  464. */
  465. unsigned long last_rx; /* Time of last Rx */
  466. /* Interface address info used in eth_type_trans() */
  467. unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
  468. because most packets are unicast) */
  469. unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
  470. /*
  471. * Cache line mostly used on queue transmit path (qdisc)
  472. */
  473. /* device queue lock */
  474. spinlock_t queue_lock ____cacheline_aligned_in_smp;
  475. struct Qdisc *qdisc;
  476. struct Qdisc *qdisc_sleeping;
  477. struct list_head qdisc_list;
  478. unsigned long tx_queue_len; /* Max frames per queue allowed */
  479. /* Partially transmitted GSO packet. */
  480. struct sk_buff *gso_skb;
  481. /* ingress path synchronizer */
  482. spinlock_t ingress_lock;
  483. struct Qdisc *qdisc_ingress;
  484. /*
  485. * One part is mostly used on xmit path (device)
  486. */
  487. /* hard_start_xmit synchronizer */
  488. spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
  489. /* cpu id of processor entered to hard_start_xmit or -1,
  490. if nobody entered there.
  491. */
  492. int xmit_lock_owner;
  493. void *priv; /* pointer to private data */
  494. int (*hard_start_xmit) (struct sk_buff *skb,
  495. struct net_device *dev);
  496. /* These may be needed for future network-power-down code. */
  497. unsigned long trans_start; /* Time (in jiffies) of last Tx */
  498. int watchdog_timeo; /* used by dev_watchdog() */
  499. struct timer_list watchdog_timer;
  500. /*
  501. * refcnt is a very hot point, so align it on SMP
  502. */
  503. /* Number of references to this device */
  504. atomic_t refcnt ____cacheline_aligned_in_smp;
  505. /* delayed register/unregister */
  506. struct list_head todo_list;
  507. /* device index hash chain */
  508. struct hlist_node index_hlist;
  509. struct net_device *link_watch_next;
  510. /* register/unregister state machine */
  511. enum { NETREG_UNINITIALIZED=0,
  512. NETREG_REGISTERED, /* completed register_netdevice */
  513. NETREG_UNREGISTERING, /* called unregister_netdevice */
  514. NETREG_UNREGISTERED, /* completed unregister todo */
  515. NETREG_RELEASED, /* called free_netdev */
  516. } reg_state;
  517. /* Called after device is detached from network. */
  518. void (*uninit)(struct net_device *dev);
  519. /* Called after last user reference disappears. */
  520. void (*destructor)(struct net_device *dev);
  521. /* Pointers to interface service routines. */
  522. int (*open)(struct net_device *dev);
  523. int (*stop)(struct net_device *dev);
  524. #define HAVE_NETDEV_POLL
  525. int (*hard_header) (struct sk_buff *skb,
  526. struct net_device *dev,
  527. unsigned short type,
  528. void *daddr,
  529. void *saddr,
  530. unsigned len);
  531. int (*rebuild_header)(struct sk_buff *skb);
  532. #define HAVE_CHANGE_RX_FLAGS
  533. void (*change_rx_flags)(struct net_device *dev,
  534. int flags);
  535. #define HAVE_SET_RX_MODE
  536. void (*set_rx_mode)(struct net_device *dev);
  537. #define HAVE_MULTICAST
  538. void (*set_multicast_list)(struct net_device *dev);
  539. #define HAVE_SET_MAC_ADDR
  540. int (*set_mac_address)(struct net_device *dev,
  541. void *addr);
  542. #define HAVE_PRIVATE_IOCTL
  543. int (*do_ioctl)(struct net_device *dev,
  544. struct ifreq *ifr, int cmd);
  545. #define HAVE_SET_CONFIG
  546. int (*set_config)(struct net_device *dev,
  547. struct ifmap *map);
  548. #define HAVE_HEADER_CACHE
  549. int (*hard_header_cache)(struct neighbour *neigh,
  550. struct hh_cache *hh);
  551. void (*header_cache_update)(struct hh_cache *hh,
  552. struct net_device *dev,
  553. unsigned char * haddr);
  554. #define HAVE_CHANGE_MTU
  555. int (*change_mtu)(struct net_device *dev, int new_mtu);
  556. #define HAVE_TX_TIMEOUT
  557. void (*tx_timeout) (struct net_device *dev);
  558. void (*vlan_rx_register)(struct net_device *dev,
  559. struct vlan_group *grp);
  560. void (*vlan_rx_add_vid)(struct net_device *dev,
  561. unsigned short vid);
  562. void (*vlan_rx_kill_vid)(struct net_device *dev,
  563. unsigned short vid);
  564. int (*hard_header_parse)(struct sk_buff *skb,
  565. unsigned char *haddr);
  566. int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
  567. #ifdef CONFIG_NETPOLL
  568. struct netpoll_info *npinfo;
  569. #endif
  570. #ifdef CONFIG_NET_POLL_CONTROLLER
  571. void (*poll_controller)(struct net_device *dev);
  572. #endif
  573. /* Network namespace this network device is inside */
  574. struct net *nd_net;
  575. /* bridge stuff */
  576. struct net_bridge_port *br_port;
  577. /* macvlan */
  578. struct macvlan_port *macvlan_port;
  579. /* class/net/name entry */
  580. struct device dev;
  581. /* space for optional statistics and wireless sysfs groups */
  582. struct attribute_group *sysfs_groups[3];
  583. /* rtnetlink link ops */
  584. const struct rtnl_link_ops *rtnl_link_ops;
  585. /* The TX queue control structures */
  586. unsigned int egress_subqueue_count;
  587. struct net_device_subqueue egress_subqueue[1];
  588. };
  589. #define to_net_dev(d) container_of(d, struct net_device, dev)
  590. #define NETDEV_ALIGN 32
  591. #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
  592. /**
  593. * netdev_priv - access network device private data
  594. * @dev: network device
  595. *
  596. * Get network device private data
  597. */
  598. static inline void *netdev_priv(const struct net_device *dev)
  599. {
  600. return dev->priv;
  601. }
  602. /* Set the sysfs physical device reference for the network logical device
  603. * if set prior to registration will cause a symlink during initialization.
  604. */
  605. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  606. static inline void netif_napi_add(struct net_device *dev,
  607. struct napi_struct *napi,
  608. int (*poll)(struct napi_struct *, int),
  609. int weight)
  610. {
  611. INIT_LIST_HEAD(&napi->poll_list);
  612. napi->poll = poll;
  613. napi->weight = weight;
  614. #ifdef CONFIG_NETPOLL
  615. napi->dev = dev;
  616. list_add(&napi->dev_list, &dev->napi_list);
  617. spin_lock_init(&napi->poll_lock);
  618. napi->poll_owner = -1;
  619. #endif
  620. set_bit(NAPI_STATE_SCHED, &napi->state);
  621. }
  622. struct packet_type {
  623. __be16 type; /* This is really htons(ether_type). */
  624. struct net_device *dev; /* NULL is wildcarded here */
  625. int (*func) (struct sk_buff *,
  626. struct net_device *,
  627. struct packet_type *,
  628. struct net_device *);
  629. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  630. int features);
  631. int (*gso_send_check)(struct sk_buff *skb);
  632. void *af_packet_priv;
  633. struct list_head list;
  634. };
  635. #include <linux/interrupt.h>
  636. #include <linux/notifier.h>
  637. extern rwlock_t dev_base_lock; /* Device list lock */
  638. #define for_each_netdev(net, d) \
  639. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  640. #define for_each_netdev_safe(net, d, n) \
  641. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  642. #define for_each_netdev_continue(net, d) \
  643. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  644. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  645. static inline struct net_device *next_net_device(struct net_device *dev)
  646. {
  647. struct list_head *lh;
  648. struct net *net;
  649. net = dev->nd_net;
  650. lh = dev->dev_list.next;
  651. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  652. }
  653. static inline struct net_device *first_net_device(struct net *net)
  654. {
  655. return list_empty(&net->dev_base_head) ? NULL :
  656. net_device_entry(net->dev_base_head.next);
  657. }
  658. extern int netdev_boot_setup_check(struct net_device *dev);
  659. extern unsigned long netdev_boot_base(const char *prefix, int unit);
  660. extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
  661. extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  662. extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  663. extern void dev_add_pack(struct packet_type *pt);
  664. extern void dev_remove_pack(struct packet_type *pt);
  665. extern void __dev_remove_pack(struct packet_type *pt);
  666. extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
  667. unsigned short mask);
  668. extern struct net_device *dev_get_by_name(struct net *net, const char *name);
  669. extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
  670. extern int dev_alloc_name(struct net_device *dev, const char *name);
  671. extern int dev_open(struct net_device *dev);
  672. extern int dev_close(struct net_device *dev);
  673. extern int dev_queue_xmit(struct sk_buff *skb);
  674. extern int register_netdevice(struct net_device *dev);
  675. extern void unregister_netdevice(struct net_device *dev);
  676. extern void free_netdev(struct net_device *dev);
  677. extern void synchronize_net(void);
  678. extern int register_netdevice_notifier(struct notifier_block *nb);
  679. extern int unregister_netdevice_notifier(struct notifier_block *nb);
  680. extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  681. extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
  682. extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  683. extern int dev_restart(struct net_device *dev);
  684. #ifdef CONFIG_NETPOLL_TRAP
  685. extern int netpoll_trap(void);
  686. #endif
  687. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  688. extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
  689. static inline int unregister_gifconf(unsigned int family)
  690. {
  691. return register_gifconf(family, NULL);
  692. }
  693. /*
  694. * Incoming packets are placed on per-cpu queues so that
  695. * no locking is needed.
  696. */
  697. struct softnet_data
  698. {
  699. struct net_device *output_queue;
  700. struct sk_buff_head input_pkt_queue;
  701. struct list_head poll_list;
  702. struct sk_buff *completion_queue;
  703. struct napi_struct backlog;
  704. #ifdef CONFIG_NET_DMA
  705. struct dma_chan *net_dma;
  706. #endif
  707. };
  708. DECLARE_PER_CPU(struct softnet_data,softnet_data);
  709. #define HAVE_NETIF_QUEUE
  710. extern void __netif_schedule(struct net_device *dev);
  711. static inline void netif_schedule(struct net_device *dev)
  712. {
  713. if (!test_bit(__LINK_STATE_XOFF, &dev->state))
  714. __netif_schedule(dev);
  715. }
  716. /**
  717. * netif_start_queue - allow transmit
  718. * @dev: network device
  719. *
  720. * Allow upper layers to call the device hard_start_xmit routine.
  721. */
  722. static inline void netif_start_queue(struct net_device *dev)
  723. {
  724. clear_bit(__LINK_STATE_XOFF, &dev->state);
  725. }
  726. /**
  727. * netif_wake_queue - restart transmit
  728. * @dev: network device
  729. *
  730. * Allow upper layers to call the device hard_start_xmit routine.
  731. * Used for flow control when transmit resources are available.
  732. */
  733. static inline void netif_wake_queue(struct net_device *dev)
  734. {
  735. #ifdef CONFIG_NETPOLL_TRAP
  736. if (netpoll_trap()) {
  737. clear_bit(__LINK_STATE_XOFF, &dev->state);
  738. return;
  739. }
  740. #endif
  741. if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
  742. __netif_schedule(dev);
  743. }
  744. /**
  745. * netif_stop_queue - stop transmitted packets
  746. * @dev: network device
  747. *
  748. * Stop upper layers calling the device hard_start_xmit routine.
  749. * Used for flow control when transmit resources are unavailable.
  750. */
  751. static inline void netif_stop_queue(struct net_device *dev)
  752. {
  753. set_bit(__LINK_STATE_XOFF, &dev->state);
  754. }
  755. /**
  756. * netif_queue_stopped - test if transmit queue is flowblocked
  757. * @dev: network device
  758. *
  759. * Test if transmit queue on device is currently unable to send.
  760. */
  761. static inline int netif_queue_stopped(const struct net_device *dev)
  762. {
  763. return test_bit(__LINK_STATE_XOFF, &dev->state);
  764. }
  765. /**
  766. * netif_running - test if up
  767. * @dev: network device
  768. *
  769. * Test if the device has been brought up.
  770. */
  771. static inline int netif_running(const struct net_device *dev)
  772. {
  773. return test_bit(__LINK_STATE_START, &dev->state);
  774. }
  775. /*
  776. * Routines to manage the subqueues on a device. We only need start
  777. * stop, and a check if it's stopped. All other device management is
  778. * done at the overall netdevice level.
  779. * Also test the device if we're multiqueue.
  780. */
  781. /**
  782. * netif_start_subqueue - allow sending packets on subqueue
  783. * @dev: network device
  784. * @queue_index: sub queue index
  785. *
  786. * Start individual transmit queue of a device with multiple transmit queues.
  787. */
  788. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  789. {
  790. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  791. clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
  792. #endif
  793. }
  794. /**
  795. * netif_stop_subqueue - stop sending packets on subqueue
  796. * @dev: network device
  797. * @queue_index: sub queue index
  798. *
  799. * Stop individual transmit queue of a device with multiple transmit queues.
  800. */
  801. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  802. {
  803. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  804. #ifdef CONFIG_NETPOLL_TRAP
  805. if (netpoll_trap())
  806. return;
  807. #endif
  808. set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
  809. #endif
  810. }
  811. /**
  812. * netif_subqueue_stopped - test status of subqueue
  813. * @dev: network device
  814. * @queue_index: sub queue index
  815. *
  816. * Check individual transmit queue of a device with multiple transmit queues.
  817. */
  818. static inline int netif_subqueue_stopped(const struct net_device *dev,
  819. u16 queue_index)
  820. {
  821. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  822. return test_bit(__LINK_STATE_XOFF,
  823. &dev->egress_subqueue[queue_index].state);
  824. #else
  825. return 0;
  826. #endif
  827. }
  828. /**
  829. * netif_wake_subqueue - allow sending packets on subqueue
  830. * @dev: network device
  831. * @queue_index: sub queue index
  832. *
  833. * Resume individual transmit queue of a device with multiple transmit queues.
  834. */
  835. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  836. {
  837. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  838. #ifdef CONFIG_NETPOLL_TRAP
  839. if (netpoll_trap())
  840. return;
  841. #endif
  842. if (test_and_clear_bit(__LINK_STATE_XOFF,
  843. &dev->egress_subqueue[queue_index].state))
  844. __netif_schedule(dev);
  845. #endif
  846. }
  847. /**
  848. * netif_is_multiqueue - test if device has multiple transmit queues
  849. * @dev: network device
  850. *
  851. * Check if device has multiple transmit queues
  852. * Always falls if NETDEVICE_MULTIQUEUE is not configured
  853. */
  854. static inline int netif_is_multiqueue(const struct net_device *dev)
  855. {
  856. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  857. return (!!(NETIF_F_MULTI_QUEUE & dev->features));
  858. #else
  859. return 0;
  860. #endif
  861. }
  862. /* Use this variant when it is known for sure that it
  863. * is executing from interrupt context.
  864. */
  865. extern void dev_kfree_skb_irq(struct sk_buff *skb);
  866. /* Use this variant in places where it could be invoked
  867. * either from interrupt or non-interrupt context.
  868. */
  869. extern void dev_kfree_skb_any(struct sk_buff *skb);
  870. #define HAVE_NETIF_RX 1
  871. extern int netif_rx(struct sk_buff *skb);
  872. extern int netif_rx_ni(struct sk_buff *skb);
  873. #define HAVE_NETIF_RECEIVE_SKB 1
  874. extern int netif_receive_skb(struct sk_buff *skb);
  875. extern int dev_valid_name(const char *name);
  876. extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  877. extern int dev_ethtool(struct net *net, struct ifreq *);
  878. extern unsigned dev_get_flags(const struct net_device *);
  879. extern int dev_change_flags(struct net_device *, unsigned);
  880. extern int dev_change_name(struct net_device *, char *);
  881. extern int dev_change_net_namespace(struct net_device *,
  882. struct net *, const char *);
  883. extern int dev_set_mtu(struct net_device *, int);
  884. extern int dev_set_mac_address(struct net_device *,
  885. struct sockaddr *);
  886. extern int dev_hard_start_xmit(struct sk_buff *skb,
  887. struct net_device *dev);
  888. extern int netdev_budget;
  889. /* Called by rtnetlink.c:rtnl_unlock() */
  890. extern void netdev_run_todo(void);
  891. /**
  892. * dev_put - release reference to device
  893. * @dev: network device
  894. *
  895. * Hold reference to device to keep it from being freed.
  896. */
  897. static inline void dev_put(struct net_device *dev)
  898. {
  899. atomic_dec(&dev->refcnt);
  900. }
  901. /**
  902. * dev_hold - get reference to device
  903. * @dev: network device
  904. *
  905. * Release reference to device to allow it to be freed.
  906. */
  907. static inline void dev_hold(struct net_device *dev)
  908. {
  909. atomic_inc(&dev->refcnt);
  910. }
  911. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  912. * and _off may be called from IRQ context, but it is caller
  913. * who is responsible for serialization of these calls.
  914. *
  915. * The name carrier is inappropriate, these functions should really be
  916. * called netif_lowerlayer_*() because they represent the state of any
  917. * kind of lower layer not just hardware media.
  918. */
  919. extern void linkwatch_fire_event(struct net_device *dev);
  920. /**
  921. * netif_carrier_ok - test if carrier present
  922. * @dev: network device
  923. *
  924. * Check if carrier is present on device
  925. */
  926. static inline int netif_carrier_ok(const struct net_device *dev)
  927. {
  928. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  929. }
  930. extern void __netdev_watchdog_up(struct net_device *dev);
  931. extern void netif_carrier_on(struct net_device *dev);
  932. extern void netif_carrier_off(struct net_device *dev);
  933. /**
  934. * netif_dormant_on - mark device as dormant.
  935. * @dev: network device
  936. *
  937. * Mark device as dormant (as per RFC2863).
  938. *
  939. * The dormant state indicates that the relevant interface is not
  940. * actually in a condition to pass packets (i.e., it is not 'up') but is
  941. * in a "pending" state, waiting for some external event. For "on-
  942. * demand" interfaces, this new state identifies the situation where the
  943. * interface is waiting for events to place it in the up state.
  944. *
  945. */
  946. static inline void netif_dormant_on(struct net_device *dev)
  947. {
  948. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  949. linkwatch_fire_event(dev);
  950. }
  951. /**
  952. * netif_dormant_off - set device as not dormant.
  953. * @dev: network device
  954. *
  955. * Device is not in dormant state.
  956. */
  957. static inline void netif_dormant_off(struct net_device *dev)
  958. {
  959. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  960. linkwatch_fire_event(dev);
  961. }
  962. /**
  963. * netif_dormant - test if carrier present
  964. * @dev: network device
  965. *
  966. * Check if carrier is present on device
  967. */
  968. static inline int netif_dormant(const struct net_device *dev)
  969. {
  970. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  971. }
  972. /**
  973. * netif_oper_up - test if device is operational
  974. * @dev: network device
  975. *
  976. * Check if carrier is operational
  977. */
  978. static inline int netif_oper_up(const struct net_device *dev) {
  979. return (dev->operstate == IF_OPER_UP ||
  980. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  981. }
  982. /**
  983. * netif_device_present - is device available or removed
  984. * @dev: network device
  985. *
  986. * Check if device has not been removed from system.
  987. */
  988. static inline int netif_device_present(struct net_device *dev)
  989. {
  990. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  991. }
  992. extern void netif_device_detach(struct net_device *dev);
  993. extern void netif_device_attach(struct net_device *dev);
  994. /*
  995. * Network interface message level settings
  996. */
  997. #define HAVE_NETIF_MSG 1
  998. enum {
  999. NETIF_MSG_DRV = 0x0001,
  1000. NETIF_MSG_PROBE = 0x0002,
  1001. NETIF_MSG_LINK = 0x0004,
  1002. NETIF_MSG_TIMER = 0x0008,
  1003. NETIF_MSG_IFDOWN = 0x0010,
  1004. NETIF_MSG_IFUP = 0x0020,
  1005. NETIF_MSG_RX_ERR = 0x0040,
  1006. NETIF_MSG_TX_ERR = 0x0080,
  1007. NETIF_MSG_TX_QUEUED = 0x0100,
  1008. NETIF_MSG_INTR = 0x0200,
  1009. NETIF_MSG_TX_DONE = 0x0400,
  1010. NETIF_MSG_RX_STATUS = 0x0800,
  1011. NETIF_MSG_PKTDATA = 0x1000,
  1012. NETIF_MSG_HW = 0x2000,
  1013. NETIF_MSG_WOL = 0x4000,
  1014. };
  1015. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  1016. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  1017. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  1018. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  1019. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  1020. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  1021. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  1022. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  1023. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  1024. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  1025. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  1026. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  1027. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  1028. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  1029. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  1030. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  1031. {
  1032. /* use default */
  1033. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  1034. return default_msg_enable_bits;
  1035. if (debug_value == 0) /* no output */
  1036. return 0;
  1037. /* set low N bits */
  1038. return (1 << debug_value) - 1;
  1039. }
  1040. /* Test if receive needs to be scheduled but only if up */
  1041. static inline int netif_rx_schedule_prep(struct net_device *dev,
  1042. struct napi_struct *napi)
  1043. {
  1044. return netif_running(dev) && napi_schedule_prep(napi);
  1045. }
  1046. /* Add interface to tail of rx poll list. This assumes that _prep has
  1047. * already been called and returned 1.
  1048. */
  1049. static inline void __netif_rx_schedule(struct net_device *dev,
  1050. struct napi_struct *napi)
  1051. {
  1052. dev_hold(dev);
  1053. __napi_schedule(napi);
  1054. }
  1055. /* Try to reschedule poll. Called by irq handler. */
  1056. static inline void netif_rx_schedule(struct net_device *dev,
  1057. struct napi_struct *napi)
  1058. {
  1059. if (netif_rx_schedule_prep(dev, napi))
  1060. __netif_rx_schedule(dev, napi);
  1061. }
  1062. /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
  1063. static inline int netif_rx_reschedule(struct net_device *dev,
  1064. struct napi_struct *napi)
  1065. {
  1066. if (napi_schedule_prep(napi)) {
  1067. __netif_rx_schedule(dev, napi);
  1068. return 1;
  1069. }
  1070. return 0;
  1071. }
  1072. /* same as netif_rx_complete, except that local_irq_save(flags)
  1073. * has already been issued
  1074. */
  1075. static inline void __netif_rx_complete(struct net_device *dev,
  1076. struct napi_struct *napi)
  1077. {
  1078. __napi_complete(napi);
  1079. dev_put(dev);
  1080. }
  1081. /* Remove interface from poll list: it must be in the poll list
  1082. * on current cpu. This primitive is called by dev->poll(), when
  1083. * it completes the work. The device cannot be out of poll list at this
  1084. * moment, it is BUG().
  1085. */
  1086. static inline void netif_rx_complete(struct net_device *dev,
  1087. struct napi_struct *napi)
  1088. {
  1089. unsigned long flags;
  1090. local_irq_save(flags);
  1091. __netif_rx_complete(dev, napi);
  1092. local_irq_restore(flags);
  1093. }
  1094. /**
  1095. * netif_tx_lock - grab network device transmit lock
  1096. * @dev: network device
  1097. *
  1098. * Get network device transmit lock
  1099. */
  1100. static inline void __netif_tx_lock(struct net_device *dev, int cpu)
  1101. {
  1102. spin_lock(&dev->_xmit_lock);
  1103. dev->xmit_lock_owner = cpu;
  1104. }
  1105. static inline void netif_tx_lock(struct net_device *dev)
  1106. {
  1107. __netif_tx_lock(dev, smp_processor_id());
  1108. }
  1109. static inline void netif_tx_lock_bh(struct net_device *dev)
  1110. {
  1111. spin_lock_bh(&dev->_xmit_lock);
  1112. dev->xmit_lock_owner = smp_processor_id();
  1113. }
  1114. static inline int netif_tx_trylock(struct net_device *dev)
  1115. {
  1116. int ok = spin_trylock(&dev->_xmit_lock);
  1117. if (likely(ok))
  1118. dev->xmit_lock_owner = smp_processor_id();
  1119. return ok;
  1120. }
  1121. static inline void netif_tx_unlock(struct net_device *dev)
  1122. {
  1123. dev->xmit_lock_owner = -1;
  1124. spin_unlock(&dev->_xmit_lock);
  1125. }
  1126. static inline void netif_tx_unlock_bh(struct net_device *dev)
  1127. {
  1128. dev->xmit_lock_owner = -1;
  1129. spin_unlock_bh(&dev->_xmit_lock);
  1130. }
  1131. #define HARD_TX_LOCK(dev, cpu) { \
  1132. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1133. __netif_tx_lock(dev, cpu); \
  1134. } \
  1135. }
  1136. #define HARD_TX_UNLOCK(dev) { \
  1137. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1138. netif_tx_unlock(dev); \
  1139. } \
  1140. }
  1141. static inline void netif_tx_disable(struct net_device *dev)
  1142. {
  1143. netif_tx_lock_bh(dev);
  1144. netif_stop_queue(dev);
  1145. netif_tx_unlock_bh(dev);
  1146. }
  1147. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  1148. extern void ether_setup(struct net_device *dev);
  1149. /* Support for loadable net-drivers */
  1150. extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
  1151. void (*setup)(struct net_device *),
  1152. unsigned int queue_count);
  1153. #define alloc_netdev(sizeof_priv, name, setup) \
  1154. alloc_netdev_mq(sizeof_priv, name, setup, 1)
  1155. extern int register_netdev(struct net_device *dev);
  1156. extern void unregister_netdev(struct net_device *dev);
  1157. /* Functions used for secondary unicast and multicast support */
  1158. extern void dev_set_rx_mode(struct net_device *dev);
  1159. extern void __dev_set_rx_mode(struct net_device *dev);
  1160. extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
  1161. extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
  1162. extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
  1163. extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
  1164. extern int dev_mc_sync(struct net_device *to, struct net_device *from);
  1165. extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  1166. extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
  1167. extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
  1168. extern void dev_set_promiscuity(struct net_device *dev, int inc);
  1169. extern void dev_set_allmulti(struct net_device *dev, int inc);
  1170. extern void netdev_state_change(struct net_device *dev);
  1171. extern void netdev_features_change(struct net_device *dev);
  1172. /* Load a device via the kmod */
  1173. extern void dev_load(struct net *net, const char *name);
  1174. extern void dev_mcast_init(void);
  1175. extern int netdev_max_backlog;
  1176. extern int weight_p;
  1177. extern int netdev_set_master(struct net_device *dev, struct net_device *master);
  1178. extern int skb_checksum_help(struct sk_buff *skb);
  1179. extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  1180. #ifdef CONFIG_BUG
  1181. extern void netdev_rx_csum_fault(struct net_device *dev);
  1182. #else
  1183. static inline void netdev_rx_csum_fault(struct net_device *dev)
  1184. {
  1185. }
  1186. #endif
  1187. /* rx skb timestamps */
  1188. extern void net_enable_timestamp(void);
  1189. extern void net_disable_timestamp(void);
  1190. #ifdef CONFIG_PROC_FS
  1191. extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  1192. extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  1193. extern void dev_seq_stop(struct seq_file *seq, void *v);
  1194. #endif
  1195. extern void linkwatch_run_queue(void);
  1196. extern int netdev_compute_features(unsigned long all, unsigned long one);
  1197. static inline int net_gso_ok(int features, int gso_type)
  1198. {
  1199. int feature = gso_type << NETIF_F_GSO_SHIFT;
  1200. return (features & feature) == feature;
  1201. }
  1202. static inline int skb_gso_ok(struct sk_buff *skb, int features)
  1203. {
  1204. return net_gso_ok(features, skb_shinfo(skb)->gso_type);
  1205. }
  1206. static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  1207. {
  1208. return skb_is_gso(skb) &&
  1209. (!skb_gso_ok(skb, dev->features) ||
  1210. unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
  1211. }
  1212. /* On bonding slaves other than the currently active slave, suppress
  1213. * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
  1214. * ARP on active-backup slaves with arp_validate enabled.
  1215. */
  1216. static inline int skb_bond_should_drop(struct sk_buff *skb)
  1217. {
  1218. struct net_device *dev = skb->dev;
  1219. struct net_device *master = dev->master;
  1220. if (master &&
  1221. (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
  1222. if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
  1223. skb->protocol == __constant_htons(ETH_P_ARP))
  1224. return 0;
  1225. if (master->priv_flags & IFF_MASTER_ALB) {
  1226. if (skb->pkt_type != PACKET_BROADCAST &&
  1227. skb->pkt_type != PACKET_MULTICAST)
  1228. return 0;
  1229. }
  1230. if (master->priv_flags & IFF_MASTER_8023AD &&
  1231. skb->protocol == __constant_htons(ETH_P_SLOW))
  1232. return 0;
  1233. return 1;
  1234. }
  1235. return 0;
  1236. }
  1237. #endif /* __KERNEL__ */
  1238. #endif /* _LINUX_DEV_H */