netdevice.h 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <Alan.Cox@linux.org>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/if.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_packet.h>
  30. #ifdef __KERNEL__
  31. #include <linux/timer.h>
  32. #include <linux/delay.h>
  33. #include <asm/atomic.h>
  34. #include <asm/cache.h>
  35. #include <asm/byteorder.h>
  36. #include <linux/device.h>
  37. #include <linux/percpu.h>
  38. #include <linux/dmaengine.h>
  39. #include <linux/workqueue.h>
  40. #include <net/net_namespace.h>
  41. struct vlan_group;
  42. struct ethtool_ops;
  43. struct netpoll_info;
  44. /* 802.11 specific */
  45. struct wireless_dev;
  46. /* source back-compat hooks */
  47. #define SET_ETHTOOL_OPS(netdev,ops) \
  48. ( (netdev)->ethtool_ops = (ops) )
  49. #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
  50. functions are available. */
  51. #define HAVE_FREE_NETDEV /* free_netdev() */
  52. #define HAVE_NETDEV_PRIV /* netdev_priv() */
  53. #define NET_XMIT_SUCCESS 0
  54. #define NET_XMIT_DROP 1 /* skb dropped */
  55. #define NET_XMIT_CN 2 /* congestion notification */
  56. #define NET_XMIT_POLICED 3 /* skb is shot by police */
  57. #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
  58. (TC use only - dev_queue_xmit
  59. returns this as NET_XMIT_SUCCESS) */
  60. /* Backlog congestion levels */
  61. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  62. #define NET_RX_DROP 1 /* packet dropped */
  63. #define NET_RX_CN_LOW 2 /* storm alert, just in case */
  64. #define NET_RX_CN_MOD 3 /* Storm on its way! */
  65. #define NET_RX_CN_HIGH 4 /* The storm is here */
  66. #define NET_RX_BAD 5 /* packet dropped due to kernel error */
  67. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  68. * indicates that the device will soon be dropping packets, or already drops
  69. * some packets of the same priority; prompting us to send less aggressively. */
  70. #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
  71. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  72. #endif
  73. #define MAX_ADDR_LEN 32 /* Largest hardware address length */
  74. /* Driver transmit return codes */
  75. #define NETDEV_TX_OK 0 /* driver took care of packet */
  76. #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
  77. #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
  78. #ifdef __KERNEL__
  79. /*
  80. * Compute the worst case header length according to the protocols
  81. * used.
  82. */
  83. #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
  84. # if defined(CONFIG_MAC80211_MESH)
  85. # define LL_MAX_HEADER 128
  86. # else
  87. # define LL_MAX_HEADER 96
  88. # endif
  89. #elif defined(CONFIG_TR)
  90. # define LL_MAX_HEADER 48
  91. #else
  92. # define LL_MAX_HEADER 32
  93. #endif
  94. #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
  95. !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
  96. !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
  97. !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
  98. #define MAX_HEADER LL_MAX_HEADER
  99. #else
  100. #define MAX_HEADER (LL_MAX_HEADER + 48)
  101. #endif
  102. #endif /* __KERNEL__ */
  103. struct net_device_subqueue
  104. {
  105. /* Give a control state for each queue. This struct may contain
  106. * per-queue locks in the future.
  107. */
  108. unsigned long state;
  109. };
  110. /*
  111. * Network device statistics. Akin to the 2.0 ether stats but
  112. * with byte counters.
  113. */
  114. struct net_device_stats
  115. {
  116. unsigned long rx_packets; /* total packets received */
  117. unsigned long tx_packets; /* total packets transmitted */
  118. unsigned long rx_bytes; /* total bytes received */
  119. unsigned long tx_bytes; /* total bytes transmitted */
  120. unsigned long rx_errors; /* bad packets received */
  121. unsigned long tx_errors; /* packet transmit problems */
  122. unsigned long rx_dropped; /* no space in linux buffers */
  123. unsigned long tx_dropped; /* no space available in linux */
  124. unsigned long multicast; /* multicast packets received */
  125. unsigned long collisions;
  126. /* detailed rx_errors: */
  127. unsigned long rx_length_errors;
  128. unsigned long rx_over_errors; /* receiver ring buff overflow */
  129. unsigned long rx_crc_errors; /* recved pkt with crc error */
  130. unsigned long rx_frame_errors; /* recv'd frame alignment error */
  131. unsigned long rx_fifo_errors; /* recv'r fifo overrun */
  132. unsigned long rx_missed_errors; /* receiver missed packet */
  133. /* detailed tx_errors */
  134. unsigned long tx_aborted_errors;
  135. unsigned long tx_carrier_errors;
  136. unsigned long tx_fifo_errors;
  137. unsigned long tx_heartbeat_errors;
  138. unsigned long tx_window_errors;
  139. /* for cslip etc */
  140. unsigned long rx_compressed;
  141. unsigned long tx_compressed;
  142. };
  143. /* Media selection options. */
  144. enum {
  145. IF_PORT_UNKNOWN = 0,
  146. IF_PORT_10BASE2,
  147. IF_PORT_10BASET,
  148. IF_PORT_AUI,
  149. IF_PORT_100BASET,
  150. IF_PORT_100BASETX,
  151. IF_PORT_100BASEFX
  152. };
  153. #ifdef __KERNEL__
  154. #include <linux/cache.h>
  155. #include <linux/skbuff.h>
  156. struct neighbour;
  157. struct neigh_parms;
  158. struct sk_buff;
  159. struct netif_rx_stats
  160. {
  161. unsigned total;
  162. unsigned dropped;
  163. unsigned time_squeeze;
  164. unsigned cpu_collision;
  165. };
  166. DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
  167. struct dev_addr_list
  168. {
  169. struct dev_addr_list *next;
  170. u8 da_addr[MAX_ADDR_LEN];
  171. u8 da_addrlen;
  172. u8 da_synced;
  173. int da_users;
  174. int da_gusers;
  175. };
  176. /*
  177. * We tag multicasts with these structures.
  178. */
  179. #define dev_mc_list dev_addr_list
  180. #define dmi_addr da_addr
  181. #define dmi_addrlen da_addrlen
  182. #define dmi_users da_users
  183. #define dmi_gusers da_gusers
  184. struct hh_cache
  185. {
  186. struct hh_cache *hh_next; /* Next entry */
  187. atomic_t hh_refcnt; /* number of users */
  188. /*
  189. * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
  190. * cache line on SMP.
  191. * They are mostly read, but hh_refcnt may be changed quite frequently,
  192. * incurring cache line ping pongs.
  193. */
  194. __be16 hh_type ____cacheline_aligned_in_smp;
  195. /* protocol identifier, f.e ETH_P_IP
  196. * NOTE: For VLANs, this will be the
  197. * encapuslated type. --BLG
  198. */
  199. u16 hh_len; /* length of header */
  200. int (*hh_output)(struct sk_buff *skb);
  201. seqlock_t hh_lock;
  202. /* cached hardware header; allow for machine alignment needs. */
  203. #define HH_DATA_MOD 16
  204. #define HH_DATA_OFF(__len) \
  205. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  206. #define HH_DATA_ALIGN(__len) \
  207. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  208. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  209. };
  210. /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  211. * Alternative is:
  212. * dev->hard_header_len ? (dev->hard_header_len +
  213. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  214. *
  215. * We could use other alignment values, but we must maintain the
  216. * relationship HH alignment <= LL alignment.
  217. *
  218. * LL_ALLOCATED_SPACE also takes into account the tailroom the device
  219. * may need.
  220. */
  221. #define LL_RESERVED_SPACE(dev) \
  222. ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  223. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  224. ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  225. #define LL_ALLOCATED_SPACE(dev) \
  226. ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  227. struct header_ops {
  228. int (*create) (struct sk_buff *skb, struct net_device *dev,
  229. unsigned short type, const void *daddr,
  230. const void *saddr, unsigned len);
  231. int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
  232. int (*rebuild)(struct sk_buff *skb);
  233. #define HAVE_HEADER_CACHE
  234. int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
  235. void (*cache_update)(struct hh_cache *hh,
  236. const struct net_device *dev,
  237. const unsigned char *haddr);
  238. };
  239. /* These flag bits are private to the generic network queueing
  240. * layer, they may not be explicitly referenced by any other
  241. * code.
  242. */
  243. enum netdev_state_t
  244. {
  245. __LINK_STATE_START,
  246. __LINK_STATE_PRESENT,
  247. __LINK_STATE_SCHED,
  248. __LINK_STATE_NOCARRIER,
  249. __LINK_STATE_LINKWATCH_PENDING,
  250. __LINK_STATE_DORMANT,
  251. };
  252. /*
  253. * This structure holds at boot time configured netdevice settings. They
  254. * are then used in the device probing.
  255. */
  256. struct netdev_boot_setup {
  257. char name[IFNAMSIZ];
  258. struct ifmap map;
  259. };
  260. #define NETDEV_BOOT_SETUP_MAX 8
  261. extern int __init netdev_boot_setup(char *str);
  262. /*
  263. * Structure for NAPI scheduling similar to tasklet but with weighting
  264. */
  265. struct napi_struct {
  266. /* The poll_list must only be managed by the entity which
  267. * changes the state of the NAPI_STATE_SCHED bit. This means
  268. * whoever atomically sets that bit can add this napi_struct
  269. * to the per-cpu poll_list, and whoever clears that bit
  270. * can remove from the list right before clearing the bit.
  271. */
  272. struct list_head poll_list;
  273. unsigned long state;
  274. int weight;
  275. int (*poll)(struct napi_struct *, int);
  276. #ifdef CONFIG_NETPOLL
  277. spinlock_t poll_lock;
  278. int poll_owner;
  279. struct net_device *dev;
  280. struct list_head dev_list;
  281. #endif
  282. };
  283. enum
  284. {
  285. NAPI_STATE_SCHED, /* Poll is scheduled */
  286. NAPI_STATE_DISABLE, /* Disable pending */
  287. };
  288. extern void __napi_schedule(struct napi_struct *n);
  289. static inline int napi_disable_pending(struct napi_struct *n)
  290. {
  291. return test_bit(NAPI_STATE_DISABLE, &n->state);
  292. }
  293. /**
  294. * napi_schedule_prep - check if napi can be scheduled
  295. * @n: napi context
  296. *
  297. * Test if NAPI routine is already running, and if not mark
  298. * it as running. This is used as a condition variable
  299. * insure only one NAPI poll instance runs. We also make
  300. * sure there is no pending NAPI disable.
  301. */
  302. static inline int napi_schedule_prep(struct napi_struct *n)
  303. {
  304. return !napi_disable_pending(n) &&
  305. !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  306. }
  307. /**
  308. * napi_schedule - schedule NAPI poll
  309. * @n: napi context
  310. *
  311. * Schedule NAPI poll routine to be called if it is not already
  312. * running.
  313. */
  314. static inline void napi_schedule(struct napi_struct *n)
  315. {
  316. if (napi_schedule_prep(n))
  317. __napi_schedule(n);
  318. }
  319. /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
  320. static inline int napi_reschedule(struct napi_struct *napi)
  321. {
  322. if (napi_schedule_prep(napi)) {
  323. __napi_schedule(napi);
  324. return 1;
  325. }
  326. return 0;
  327. }
  328. /**
  329. * napi_complete - NAPI processing complete
  330. * @n: napi context
  331. *
  332. * Mark NAPI processing as complete.
  333. */
  334. static inline void __napi_complete(struct napi_struct *n)
  335. {
  336. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  337. list_del(&n->poll_list);
  338. smp_mb__before_clear_bit();
  339. clear_bit(NAPI_STATE_SCHED, &n->state);
  340. }
  341. static inline void napi_complete(struct napi_struct *n)
  342. {
  343. unsigned long flags;
  344. local_irq_save(flags);
  345. __napi_complete(n);
  346. local_irq_restore(flags);
  347. }
  348. /**
  349. * napi_disable - prevent NAPI from scheduling
  350. * @n: napi context
  351. *
  352. * Stop NAPI from being scheduled on this context.
  353. * Waits till any outstanding processing completes.
  354. */
  355. static inline void napi_disable(struct napi_struct *n)
  356. {
  357. set_bit(NAPI_STATE_DISABLE, &n->state);
  358. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  359. msleep(1);
  360. clear_bit(NAPI_STATE_DISABLE, &n->state);
  361. }
  362. /**
  363. * napi_enable - enable NAPI scheduling
  364. * @n: napi context
  365. *
  366. * Resume NAPI from being scheduled on this context.
  367. * Must be paired with napi_disable.
  368. */
  369. static inline void napi_enable(struct napi_struct *n)
  370. {
  371. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  372. smp_mb__before_clear_bit();
  373. clear_bit(NAPI_STATE_SCHED, &n->state);
  374. }
  375. #ifdef CONFIG_SMP
  376. /**
  377. * napi_synchronize - wait until NAPI is not running
  378. * @n: napi context
  379. *
  380. * Wait until NAPI is done being scheduled on this context.
  381. * Waits till any outstanding processing completes but
  382. * does not disable future activations.
  383. */
  384. static inline void napi_synchronize(const struct napi_struct *n)
  385. {
  386. while (test_bit(NAPI_STATE_SCHED, &n->state))
  387. msleep(1);
  388. }
  389. #else
  390. # define napi_synchronize(n) barrier()
  391. #endif
  392. enum netdev_queue_state_t
  393. {
  394. __QUEUE_STATE_XOFF,
  395. __QUEUE_STATE_QDISC_RUNNING,
  396. };
  397. struct netdev_queue {
  398. spinlock_t lock;
  399. struct net_device *dev;
  400. struct Qdisc *qdisc;
  401. unsigned long state;
  402. struct sk_buff *gso_skb;
  403. spinlock_t _xmit_lock;
  404. int xmit_lock_owner;
  405. struct Qdisc *qdisc_sleeping;
  406. struct list_head qdisc_list;
  407. struct netdev_queue *next_sched;
  408. };
  409. /*
  410. * The DEVICE structure.
  411. * Actually, this whole structure is a big mistake. It mixes I/O
  412. * data with strictly "high-level" data, and it has to know about
  413. * almost every data structure used in the INET module.
  414. *
  415. * FIXME: cleanup struct net_device such that network protocol info
  416. * moves out.
  417. */
  418. struct net_device
  419. {
  420. /*
  421. * This is the first field of the "visible" part of this structure
  422. * (i.e. as seen by users in the "Space.c" file). It is the name
  423. * the interface.
  424. */
  425. char name[IFNAMSIZ];
  426. /* device name hash chain */
  427. struct hlist_node name_hlist;
  428. /*
  429. * I/O specific fields
  430. * FIXME: Merge these and struct ifmap into one
  431. */
  432. unsigned long mem_end; /* shared mem end */
  433. unsigned long mem_start; /* shared mem start */
  434. unsigned long base_addr; /* device I/O address */
  435. unsigned int irq; /* device IRQ number */
  436. /*
  437. * Some hardware also needs these fields, but they are not
  438. * part of the usual set specified in Space.c.
  439. */
  440. unsigned char if_port; /* Selectable AUI, TP,..*/
  441. unsigned char dma; /* DMA channel */
  442. unsigned long state;
  443. struct list_head dev_list;
  444. #ifdef CONFIG_NETPOLL
  445. struct list_head napi_list;
  446. #endif
  447. /* The device initialization function. Called only once. */
  448. int (*init)(struct net_device *dev);
  449. /* ------- Fields preinitialized in Space.c finish here ------- */
  450. /* Net device features */
  451. unsigned long features;
  452. #define NETIF_F_SG 1 /* Scatter/gather IO. */
  453. #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
  454. #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
  455. #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
  456. #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
  457. #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
  458. #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
  459. #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
  460. #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
  461. #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
  462. #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
  463. #define NETIF_F_GSO 2048 /* Enable software GSO. */
  464. #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
  465. /* do not use LLTX in new drivers */
  466. #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
  467. #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
  468. #define NETIF_F_LRO 32768 /* large receive offload */
  469. /* Segmentation offload features */
  470. #define NETIF_F_GSO_SHIFT 16
  471. #define NETIF_F_GSO_MASK 0xffff0000
  472. #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  473. #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
  474. #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  475. #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
  476. #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
  477. /* List of features with software fallbacks. */
  478. #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
  479. #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  480. #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
  481. #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
  482. #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
  483. /* Interface index. Unique device identifier */
  484. int ifindex;
  485. int iflink;
  486. struct net_device_stats* (*get_stats)(struct net_device *dev);
  487. struct net_device_stats stats;
  488. #ifdef CONFIG_WIRELESS_EXT
  489. /* List of functions to handle Wireless Extensions (instead of ioctl).
  490. * See <net/iw_handler.h> for details. Jean II */
  491. const struct iw_handler_def * wireless_handlers;
  492. /* Instance data managed by the core of Wireless Extensions. */
  493. struct iw_public_data * wireless_data;
  494. #endif
  495. const struct ethtool_ops *ethtool_ops;
  496. /* Hardware header description */
  497. const struct header_ops *header_ops;
  498. /*
  499. * This marks the end of the "visible" part of the structure. All
  500. * fields hereafter are internal to the system, and may change at
  501. * will (read: may be cleaned up at will).
  502. */
  503. unsigned int flags; /* interface flags (a la BSD) */
  504. unsigned short gflags;
  505. unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
  506. unsigned short padded; /* How much padding added by alloc_netdev() */
  507. unsigned char operstate; /* RFC2863 operstate */
  508. unsigned char link_mode; /* mapping policy to operstate */
  509. unsigned mtu; /* interface MTU value */
  510. unsigned short type; /* interface hardware type */
  511. unsigned short hard_header_len; /* hardware hdr length */
  512. /* extra head- and tailroom the hardware may need, but not in all cases
  513. * can this be guaranteed, especially tailroom. Some cases also use
  514. * LL_MAX_HEADER instead to allocate the skb.
  515. */
  516. unsigned short needed_headroom;
  517. unsigned short needed_tailroom;
  518. struct net_device *master; /* Pointer to master device of a group,
  519. * which this device is member of.
  520. */
  521. /* Interface address info. */
  522. unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
  523. unsigned char addr_len; /* hardware address length */
  524. unsigned short dev_id; /* for shared network cards */
  525. struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
  526. int uc_count; /* Number of installed ucasts */
  527. int uc_promisc;
  528. struct dev_addr_list *mc_list; /* Multicast mac addresses */
  529. int mc_count; /* Number of installed mcasts */
  530. unsigned int promiscuity;
  531. unsigned int allmulti;
  532. /* Protocol specific pointers */
  533. void *atalk_ptr; /* AppleTalk link */
  534. void *ip_ptr; /* IPv4 specific data */
  535. void *dn_ptr; /* DECnet specific data */
  536. void *ip6_ptr; /* IPv6 specific data */
  537. void *ec_ptr; /* Econet specific data */
  538. void *ax25_ptr; /* AX.25 specific data */
  539. struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
  540. assign before registering */
  541. /*
  542. * Cache line mostly used on receive path (including eth_type_trans())
  543. */
  544. unsigned long last_rx; /* Time of last Rx */
  545. /* Interface address info used in eth_type_trans() */
  546. unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
  547. because most packets are unicast) */
  548. unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
  549. struct netdev_queue rx_queue;
  550. struct netdev_queue tx_queue ____cacheline_aligned_in_smp;
  551. unsigned long tx_queue_len; /* Max frames per queue allowed */
  552. /*
  553. * One part is mostly used on xmit path (device)
  554. */
  555. void *priv; /* pointer to private data */
  556. int (*hard_start_xmit) (struct sk_buff *skb,
  557. struct net_device *dev);
  558. /* These may be needed for future network-power-down code. */
  559. unsigned long trans_start; /* Time (in jiffies) of last Tx */
  560. int watchdog_timeo; /* used by dev_watchdog() */
  561. struct timer_list watchdog_timer;
  562. /*
  563. * refcnt is a very hot point, so align it on SMP
  564. */
  565. /* Number of references to this device */
  566. atomic_t refcnt ____cacheline_aligned_in_smp;
  567. /* delayed register/unregister */
  568. struct list_head todo_list;
  569. /* device index hash chain */
  570. struct hlist_node index_hlist;
  571. struct net_device *link_watch_next;
  572. /* register/unregister state machine */
  573. enum { NETREG_UNINITIALIZED=0,
  574. NETREG_REGISTERED, /* completed register_netdevice */
  575. NETREG_UNREGISTERING, /* called unregister_netdevice */
  576. NETREG_UNREGISTERED, /* completed unregister todo */
  577. NETREG_RELEASED, /* called free_netdev */
  578. } reg_state;
  579. /* Called after device is detached from network. */
  580. void (*uninit)(struct net_device *dev);
  581. /* Called after last user reference disappears. */
  582. void (*destructor)(struct net_device *dev);
  583. /* Pointers to interface service routines. */
  584. int (*open)(struct net_device *dev);
  585. int (*stop)(struct net_device *dev);
  586. #define HAVE_NETDEV_POLL
  587. #define HAVE_CHANGE_RX_FLAGS
  588. void (*change_rx_flags)(struct net_device *dev,
  589. int flags);
  590. #define HAVE_SET_RX_MODE
  591. void (*set_rx_mode)(struct net_device *dev);
  592. #define HAVE_MULTICAST
  593. void (*set_multicast_list)(struct net_device *dev);
  594. #define HAVE_SET_MAC_ADDR
  595. int (*set_mac_address)(struct net_device *dev,
  596. void *addr);
  597. #define HAVE_VALIDATE_ADDR
  598. int (*validate_addr)(struct net_device *dev);
  599. #define HAVE_PRIVATE_IOCTL
  600. int (*do_ioctl)(struct net_device *dev,
  601. struct ifreq *ifr, int cmd);
  602. #define HAVE_SET_CONFIG
  603. int (*set_config)(struct net_device *dev,
  604. struct ifmap *map);
  605. #define HAVE_CHANGE_MTU
  606. int (*change_mtu)(struct net_device *dev, int new_mtu);
  607. #define HAVE_TX_TIMEOUT
  608. void (*tx_timeout) (struct net_device *dev);
  609. void (*vlan_rx_register)(struct net_device *dev,
  610. struct vlan_group *grp);
  611. void (*vlan_rx_add_vid)(struct net_device *dev,
  612. unsigned short vid);
  613. void (*vlan_rx_kill_vid)(struct net_device *dev,
  614. unsigned short vid);
  615. int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
  616. #ifdef CONFIG_NETPOLL
  617. struct netpoll_info *npinfo;
  618. #endif
  619. #ifdef CONFIG_NET_POLL_CONTROLLER
  620. void (*poll_controller)(struct net_device *dev);
  621. #endif
  622. #ifdef CONFIG_NET_NS
  623. /* Network namespace this network device is inside */
  624. struct net *nd_net;
  625. #endif
  626. /* mid-layer private */
  627. void *ml_priv;
  628. /* bridge stuff */
  629. struct net_bridge_port *br_port;
  630. /* macvlan */
  631. struct macvlan_port *macvlan_port;
  632. /* GARP */
  633. struct garp_port *garp_port;
  634. /* class/net/name entry */
  635. struct device dev;
  636. /* space for optional statistics and wireless sysfs groups */
  637. struct attribute_group *sysfs_groups[3];
  638. /* rtnetlink link ops */
  639. const struct rtnl_link_ops *rtnl_link_ops;
  640. /* VLAN feature mask */
  641. unsigned long vlan_features;
  642. /* for setting kernel sock attribute on TCP connection setup */
  643. #define GSO_MAX_SIZE 65536
  644. unsigned int gso_max_size;
  645. /* The TX queue control structures */
  646. unsigned int egress_subqueue_count;
  647. struct net_device_subqueue egress_subqueue[1];
  648. };
  649. #define to_net_dev(d) container_of(d, struct net_device, dev)
  650. #define NETDEV_ALIGN 32
  651. #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
  652. /*
  653. * Net namespace inlines
  654. */
  655. static inline
  656. struct net *dev_net(const struct net_device *dev)
  657. {
  658. #ifdef CONFIG_NET_NS
  659. return dev->nd_net;
  660. #else
  661. return &init_net;
  662. #endif
  663. }
  664. static inline
  665. void dev_net_set(struct net_device *dev, struct net *net)
  666. {
  667. #ifdef CONFIG_NET_NS
  668. release_net(dev->nd_net);
  669. dev->nd_net = hold_net(net);
  670. #endif
  671. }
  672. /**
  673. * netdev_priv - access network device private data
  674. * @dev: network device
  675. *
  676. * Get network device private data
  677. */
  678. static inline void *netdev_priv(const struct net_device *dev)
  679. {
  680. return dev->priv;
  681. }
  682. /* Set the sysfs physical device reference for the network logical device
  683. * if set prior to registration will cause a symlink during initialization.
  684. */
  685. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  686. /**
  687. * netif_napi_add - initialize a napi context
  688. * @dev: network device
  689. * @napi: napi context
  690. * @poll: polling function
  691. * @weight: default weight
  692. *
  693. * netif_napi_add() must be used to initialize a napi context prior to calling
  694. * *any* of the other napi related functions.
  695. */
  696. static inline void netif_napi_add(struct net_device *dev,
  697. struct napi_struct *napi,
  698. int (*poll)(struct napi_struct *, int),
  699. int weight)
  700. {
  701. INIT_LIST_HEAD(&napi->poll_list);
  702. napi->poll = poll;
  703. napi->weight = weight;
  704. #ifdef CONFIG_NETPOLL
  705. napi->dev = dev;
  706. list_add(&napi->dev_list, &dev->napi_list);
  707. spin_lock_init(&napi->poll_lock);
  708. napi->poll_owner = -1;
  709. #endif
  710. set_bit(NAPI_STATE_SCHED, &napi->state);
  711. }
  712. /**
  713. * netif_napi_del - remove a napi context
  714. * @napi: napi context
  715. *
  716. * netif_napi_del() removes a napi context from the network device napi list
  717. */
  718. static inline void netif_napi_del(struct napi_struct *napi)
  719. {
  720. #ifdef CONFIG_NETPOLL
  721. list_del(&napi->dev_list);
  722. #endif
  723. }
  724. struct packet_type {
  725. __be16 type; /* This is really htons(ether_type). */
  726. struct net_device *dev; /* NULL is wildcarded here */
  727. int (*func) (struct sk_buff *,
  728. struct net_device *,
  729. struct packet_type *,
  730. struct net_device *);
  731. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  732. int features);
  733. int (*gso_send_check)(struct sk_buff *skb);
  734. void *af_packet_priv;
  735. struct list_head list;
  736. };
  737. #include <linux/interrupt.h>
  738. #include <linux/notifier.h>
  739. extern rwlock_t dev_base_lock; /* Device list lock */
  740. #define for_each_netdev(net, d) \
  741. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  742. #define for_each_netdev_safe(net, d, n) \
  743. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  744. #define for_each_netdev_continue(net, d) \
  745. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  746. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  747. static inline struct net_device *next_net_device(struct net_device *dev)
  748. {
  749. struct list_head *lh;
  750. struct net *net;
  751. net = dev_net(dev);
  752. lh = dev->dev_list.next;
  753. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  754. }
  755. static inline struct net_device *first_net_device(struct net *net)
  756. {
  757. return list_empty(&net->dev_base_head) ? NULL :
  758. net_device_entry(net->dev_base_head.next);
  759. }
  760. extern int netdev_boot_setup_check(struct net_device *dev);
  761. extern unsigned long netdev_boot_base(const char *prefix, int unit);
  762. extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
  763. extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  764. extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  765. extern void dev_add_pack(struct packet_type *pt);
  766. extern void dev_remove_pack(struct packet_type *pt);
  767. extern void __dev_remove_pack(struct packet_type *pt);
  768. extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
  769. unsigned short mask);
  770. extern struct net_device *dev_get_by_name(struct net *net, const char *name);
  771. extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
  772. extern int dev_alloc_name(struct net_device *dev, const char *name);
  773. extern int dev_open(struct net_device *dev);
  774. extern int dev_close(struct net_device *dev);
  775. extern void dev_disable_lro(struct net_device *dev);
  776. extern int dev_queue_xmit(struct sk_buff *skb);
  777. extern int register_netdevice(struct net_device *dev);
  778. extern void unregister_netdevice(struct net_device *dev);
  779. extern void free_netdev(struct net_device *dev);
  780. extern void synchronize_net(void);
  781. extern int register_netdevice_notifier(struct notifier_block *nb);
  782. extern int unregister_netdevice_notifier(struct notifier_block *nb);
  783. extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  784. extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
  785. extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  786. extern int dev_restart(struct net_device *dev);
  787. #ifdef CONFIG_NETPOLL_TRAP
  788. extern int netpoll_trap(void);
  789. #endif
  790. static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
  791. unsigned short type,
  792. const void *daddr, const void *saddr,
  793. unsigned len)
  794. {
  795. if (!dev->header_ops || !dev->header_ops->create)
  796. return 0;
  797. return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
  798. }
  799. static inline int dev_parse_header(const struct sk_buff *skb,
  800. unsigned char *haddr)
  801. {
  802. const struct net_device *dev = skb->dev;
  803. if (!dev->header_ops || !dev->header_ops->parse)
  804. return 0;
  805. return dev->header_ops->parse(skb, haddr);
  806. }
  807. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  808. extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
  809. static inline int unregister_gifconf(unsigned int family)
  810. {
  811. return register_gifconf(family, NULL);
  812. }
  813. /*
  814. * Incoming packets are placed on per-cpu queues so that
  815. * no locking is needed.
  816. */
  817. struct softnet_data
  818. {
  819. struct netdev_queue *output_queue;
  820. struct sk_buff_head input_pkt_queue;
  821. struct list_head poll_list;
  822. struct sk_buff *completion_queue;
  823. struct napi_struct backlog;
  824. #ifdef CONFIG_NET_DMA
  825. struct dma_chan *net_dma;
  826. #endif
  827. };
  828. DECLARE_PER_CPU(struct softnet_data,softnet_data);
  829. #define HAVE_NETIF_QUEUE
  830. extern void __netif_schedule(struct netdev_queue *txq);
  831. static inline void netif_schedule_queue(struct netdev_queue *txq)
  832. {
  833. if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
  834. __netif_schedule(txq);
  835. }
  836. static inline void netif_schedule(struct net_device *dev)
  837. {
  838. netif_schedule_queue(&dev->tx_queue);
  839. }
  840. /**
  841. * netif_start_queue - allow transmit
  842. * @dev: network device
  843. *
  844. * Allow upper layers to call the device hard_start_xmit routine.
  845. */
  846. static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
  847. {
  848. clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  849. }
  850. static inline void netif_start_queue(struct net_device *dev)
  851. {
  852. netif_tx_start_queue(&dev->tx_queue);
  853. }
  854. /**
  855. * netif_wake_queue - restart transmit
  856. * @dev: network device
  857. *
  858. * Allow upper layers to call the device hard_start_xmit routine.
  859. * Used for flow control when transmit resources are available.
  860. */
  861. static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  862. {
  863. #ifdef CONFIG_NETPOLL_TRAP
  864. if (netpoll_trap()) {
  865. clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  866. return;
  867. }
  868. #endif
  869. if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
  870. __netif_schedule(dev_queue);
  871. }
  872. static inline void netif_wake_queue(struct net_device *dev)
  873. {
  874. netif_tx_wake_queue(&dev->tx_queue);
  875. }
  876. /**
  877. * netif_stop_queue - stop transmitted packets
  878. * @dev: network device
  879. *
  880. * Stop upper layers calling the device hard_start_xmit routine.
  881. * Used for flow control when transmit resources are unavailable.
  882. */
  883. static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
  884. {
  885. set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  886. }
  887. static inline void netif_stop_queue(struct net_device *dev)
  888. {
  889. netif_tx_stop_queue(&dev->tx_queue);
  890. }
  891. /**
  892. * netif_queue_stopped - test if transmit queue is flowblocked
  893. * @dev: network device
  894. *
  895. * Test if transmit queue on device is currently unable to send.
  896. */
  897. static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
  898. {
  899. return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  900. }
  901. static inline int netif_queue_stopped(const struct net_device *dev)
  902. {
  903. return netif_tx_queue_stopped(&dev->tx_queue);
  904. }
  905. /**
  906. * netif_running - test if up
  907. * @dev: network device
  908. *
  909. * Test if the device has been brought up.
  910. */
  911. static inline int netif_running(const struct net_device *dev)
  912. {
  913. return test_bit(__LINK_STATE_START, &dev->state);
  914. }
  915. /*
  916. * Routines to manage the subqueues on a device. We only need start
  917. * stop, and a check if it's stopped. All other device management is
  918. * done at the overall netdevice level.
  919. * Also test the device if we're multiqueue.
  920. */
  921. /**
  922. * netif_start_subqueue - allow sending packets on subqueue
  923. * @dev: network device
  924. * @queue_index: sub queue index
  925. *
  926. * Start individual transmit queue of a device with multiple transmit queues.
  927. */
  928. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  929. {
  930. clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
  931. }
  932. /**
  933. * netif_stop_subqueue - stop sending packets on subqueue
  934. * @dev: network device
  935. * @queue_index: sub queue index
  936. *
  937. * Stop individual transmit queue of a device with multiple transmit queues.
  938. */
  939. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  940. {
  941. #ifdef CONFIG_NETPOLL_TRAP
  942. if (netpoll_trap())
  943. return;
  944. #endif
  945. set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
  946. }
  947. /**
  948. * netif_subqueue_stopped - test status of subqueue
  949. * @dev: network device
  950. * @queue_index: sub queue index
  951. *
  952. * Check individual transmit queue of a device with multiple transmit queues.
  953. */
  954. static inline int __netif_subqueue_stopped(const struct net_device *dev,
  955. u16 queue_index)
  956. {
  957. return test_bit(__QUEUE_STATE_XOFF,
  958. &dev->egress_subqueue[queue_index].state);
  959. }
  960. static inline int netif_subqueue_stopped(const struct net_device *dev,
  961. struct sk_buff *skb)
  962. {
  963. return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
  964. }
  965. /**
  966. * netif_wake_subqueue - allow sending packets on subqueue
  967. * @dev: network device
  968. * @queue_index: sub queue index
  969. *
  970. * Resume individual transmit queue of a device with multiple transmit queues.
  971. */
  972. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  973. {
  974. #ifdef CONFIG_NETPOLL_TRAP
  975. if (netpoll_trap())
  976. return;
  977. #endif
  978. if (test_and_clear_bit(__QUEUE_STATE_XOFF,
  979. &dev->egress_subqueue[queue_index].state))
  980. __netif_schedule(&dev->tx_queue);
  981. }
  982. /**
  983. * netif_is_multiqueue - test if device has multiple transmit queues
  984. * @dev: network device
  985. *
  986. * Check if device has multiple transmit queues
  987. * Always falls if NETDEVICE_MULTIQUEUE is not configured
  988. */
  989. static inline int netif_is_multiqueue(const struct net_device *dev)
  990. {
  991. return (!!(NETIF_F_MULTI_QUEUE & dev->features));
  992. }
  993. /* Use this variant when it is known for sure that it
  994. * is executing from hardware interrupt context or with hardware interrupts
  995. * disabled.
  996. */
  997. extern void dev_kfree_skb_irq(struct sk_buff *skb);
  998. /* Use this variant in places where it could be invoked
  999. * from either hardware interrupt or other context, with hardware interrupts
  1000. * either disabled or enabled.
  1001. */
  1002. extern void dev_kfree_skb_any(struct sk_buff *skb);
  1003. #define HAVE_NETIF_RX 1
  1004. extern int netif_rx(struct sk_buff *skb);
  1005. extern int netif_rx_ni(struct sk_buff *skb);
  1006. #define HAVE_NETIF_RECEIVE_SKB 1
  1007. extern int netif_receive_skb(struct sk_buff *skb);
  1008. extern int dev_valid_name(const char *name);
  1009. extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  1010. extern int dev_ethtool(struct net *net, struct ifreq *);
  1011. extern unsigned dev_get_flags(const struct net_device *);
  1012. extern int dev_change_flags(struct net_device *, unsigned);
  1013. extern int dev_change_name(struct net_device *, char *);
  1014. extern int dev_change_net_namespace(struct net_device *,
  1015. struct net *, const char *);
  1016. extern int dev_set_mtu(struct net_device *, int);
  1017. extern int dev_set_mac_address(struct net_device *,
  1018. struct sockaddr *);
  1019. extern int dev_hard_start_xmit(struct sk_buff *skb,
  1020. struct net_device *dev);
  1021. extern int netdev_budget;
  1022. /* Called by rtnetlink.c:rtnl_unlock() */
  1023. extern void netdev_run_todo(void);
  1024. /**
  1025. * dev_put - release reference to device
  1026. * @dev: network device
  1027. *
  1028. * Release reference to device to allow it to be freed.
  1029. */
  1030. static inline void dev_put(struct net_device *dev)
  1031. {
  1032. atomic_dec(&dev->refcnt);
  1033. }
  1034. /**
  1035. * dev_hold - get reference to device
  1036. * @dev: network device
  1037. *
  1038. * Hold reference to device to keep it from being freed.
  1039. */
  1040. static inline void dev_hold(struct net_device *dev)
  1041. {
  1042. atomic_inc(&dev->refcnt);
  1043. }
  1044. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  1045. * and _off may be called from IRQ context, but it is caller
  1046. * who is responsible for serialization of these calls.
  1047. *
  1048. * The name carrier is inappropriate, these functions should really be
  1049. * called netif_lowerlayer_*() because they represent the state of any
  1050. * kind of lower layer not just hardware media.
  1051. */
  1052. extern void linkwatch_fire_event(struct net_device *dev);
  1053. /**
  1054. * netif_carrier_ok - test if carrier present
  1055. * @dev: network device
  1056. *
  1057. * Check if carrier is present on device
  1058. */
  1059. static inline int netif_carrier_ok(const struct net_device *dev)
  1060. {
  1061. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  1062. }
  1063. extern void __netdev_watchdog_up(struct net_device *dev);
  1064. extern void netif_carrier_on(struct net_device *dev);
  1065. extern void netif_carrier_off(struct net_device *dev);
  1066. /**
  1067. * netif_dormant_on - mark device as dormant.
  1068. * @dev: network device
  1069. *
  1070. * Mark device as dormant (as per RFC2863).
  1071. *
  1072. * The dormant state indicates that the relevant interface is not
  1073. * actually in a condition to pass packets (i.e., it is not 'up') but is
  1074. * in a "pending" state, waiting for some external event. For "on-
  1075. * demand" interfaces, this new state identifies the situation where the
  1076. * interface is waiting for events to place it in the up state.
  1077. *
  1078. */
  1079. static inline void netif_dormant_on(struct net_device *dev)
  1080. {
  1081. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  1082. linkwatch_fire_event(dev);
  1083. }
  1084. /**
  1085. * netif_dormant_off - set device as not dormant.
  1086. * @dev: network device
  1087. *
  1088. * Device is not in dormant state.
  1089. */
  1090. static inline void netif_dormant_off(struct net_device *dev)
  1091. {
  1092. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  1093. linkwatch_fire_event(dev);
  1094. }
  1095. /**
  1096. * netif_dormant - test if carrier present
  1097. * @dev: network device
  1098. *
  1099. * Check if carrier is present on device
  1100. */
  1101. static inline int netif_dormant(const struct net_device *dev)
  1102. {
  1103. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  1104. }
  1105. /**
  1106. * netif_oper_up - test if device is operational
  1107. * @dev: network device
  1108. *
  1109. * Check if carrier is operational
  1110. */
  1111. static inline int netif_oper_up(const struct net_device *dev) {
  1112. return (dev->operstate == IF_OPER_UP ||
  1113. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  1114. }
  1115. /**
  1116. * netif_device_present - is device available or removed
  1117. * @dev: network device
  1118. *
  1119. * Check if device has not been removed from system.
  1120. */
  1121. static inline int netif_device_present(struct net_device *dev)
  1122. {
  1123. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  1124. }
  1125. extern void netif_device_detach(struct net_device *dev);
  1126. extern void netif_device_attach(struct net_device *dev);
  1127. /*
  1128. * Network interface message level settings
  1129. */
  1130. #define HAVE_NETIF_MSG 1
  1131. enum {
  1132. NETIF_MSG_DRV = 0x0001,
  1133. NETIF_MSG_PROBE = 0x0002,
  1134. NETIF_MSG_LINK = 0x0004,
  1135. NETIF_MSG_TIMER = 0x0008,
  1136. NETIF_MSG_IFDOWN = 0x0010,
  1137. NETIF_MSG_IFUP = 0x0020,
  1138. NETIF_MSG_RX_ERR = 0x0040,
  1139. NETIF_MSG_TX_ERR = 0x0080,
  1140. NETIF_MSG_TX_QUEUED = 0x0100,
  1141. NETIF_MSG_INTR = 0x0200,
  1142. NETIF_MSG_TX_DONE = 0x0400,
  1143. NETIF_MSG_RX_STATUS = 0x0800,
  1144. NETIF_MSG_PKTDATA = 0x1000,
  1145. NETIF_MSG_HW = 0x2000,
  1146. NETIF_MSG_WOL = 0x4000,
  1147. };
  1148. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  1149. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  1150. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  1151. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  1152. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  1153. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  1154. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  1155. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  1156. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  1157. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  1158. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  1159. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  1160. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  1161. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  1162. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  1163. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  1164. {
  1165. /* use default */
  1166. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  1167. return default_msg_enable_bits;
  1168. if (debug_value == 0) /* no output */
  1169. return 0;
  1170. /* set low N bits */
  1171. return (1 << debug_value) - 1;
  1172. }
  1173. /* Test if receive needs to be scheduled but only if up */
  1174. static inline int netif_rx_schedule_prep(struct net_device *dev,
  1175. struct napi_struct *napi)
  1176. {
  1177. return napi_schedule_prep(napi);
  1178. }
  1179. /* Add interface to tail of rx poll list. This assumes that _prep has
  1180. * already been called and returned 1.
  1181. */
  1182. static inline void __netif_rx_schedule(struct net_device *dev,
  1183. struct napi_struct *napi)
  1184. {
  1185. __napi_schedule(napi);
  1186. }
  1187. /* Try to reschedule poll. Called by irq handler. */
  1188. static inline void netif_rx_schedule(struct net_device *dev,
  1189. struct napi_struct *napi)
  1190. {
  1191. if (netif_rx_schedule_prep(dev, napi))
  1192. __netif_rx_schedule(dev, napi);
  1193. }
  1194. /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
  1195. static inline int netif_rx_reschedule(struct net_device *dev,
  1196. struct napi_struct *napi)
  1197. {
  1198. if (napi_schedule_prep(napi)) {
  1199. __netif_rx_schedule(dev, napi);
  1200. return 1;
  1201. }
  1202. return 0;
  1203. }
  1204. /* same as netif_rx_complete, except that local_irq_save(flags)
  1205. * has already been issued
  1206. */
  1207. static inline void __netif_rx_complete(struct net_device *dev,
  1208. struct napi_struct *napi)
  1209. {
  1210. __napi_complete(napi);
  1211. }
  1212. /* Remove interface from poll list: it must be in the poll list
  1213. * on current cpu. This primitive is called by dev->poll(), when
  1214. * it completes the work. The device cannot be out of poll list at this
  1215. * moment, it is BUG().
  1216. */
  1217. static inline void netif_rx_complete(struct net_device *dev,
  1218. struct napi_struct *napi)
  1219. {
  1220. unsigned long flags;
  1221. local_irq_save(flags);
  1222. __netif_rx_complete(dev, napi);
  1223. local_irq_restore(flags);
  1224. }
  1225. /**
  1226. * netif_tx_lock - grab network device transmit lock
  1227. * @dev: network device
  1228. * @cpu: cpu number of lock owner
  1229. *
  1230. * Get network device transmit lock
  1231. */
  1232. static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
  1233. {
  1234. spin_lock(&txq->_xmit_lock);
  1235. txq->xmit_lock_owner = cpu;
  1236. }
  1237. static inline void netif_tx_lock(struct net_device *dev)
  1238. {
  1239. __netif_tx_lock(&dev->tx_queue, smp_processor_id());
  1240. }
  1241. static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
  1242. {
  1243. spin_lock_bh(&txq->_xmit_lock);
  1244. txq->xmit_lock_owner = smp_processor_id();
  1245. }
  1246. static inline void netif_tx_lock_bh(struct net_device *dev)
  1247. {
  1248. __netif_tx_lock_bh(&dev->tx_queue);
  1249. }
  1250. static inline int __netif_tx_trylock(struct netdev_queue *txq)
  1251. {
  1252. int ok = spin_trylock(&txq->_xmit_lock);
  1253. if (likely(ok))
  1254. txq->xmit_lock_owner = smp_processor_id();
  1255. return ok;
  1256. }
  1257. static inline int netif_tx_trylock(struct net_device *dev)
  1258. {
  1259. return __netif_tx_trylock(&dev->tx_queue);
  1260. }
  1261. static inline void __netif_tx_unlock(struct netdev_queue *txq)
  1262. {
  1263. txq->xmit_lock_owner = -1;
  1264. spin_unlock(&txq->_xmit_lock);
  1265. }
  1266. static inline void netif_tx_unlock(struct net_device *dev)
  1267. {
  1268. __netif_tx_unlock(&dev->tx_queue);
  1269. }
  1270. static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
  1271. {
  1272. txq->xmit_lock_owner = -1;
  1273. spin_unlock_bh(&txq->_xmit_lock);
  1274. }
  1275. static inline void netif_tx_unlock_bh(struct net_device *dev)
  1276. {
  1277. __netif_tx_unlock_bh(&dev->tx_queue);
  1278. }
  1279. #define HARD_TX_LOCK(dev, txq, cpu) { \
  1280. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1281. __netif_tx_lock(txq, cpu); \
  1282. } \
  1283. }
  1284. #define HARD_TX_UNLOCK(dev, txq) { \
  1285. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1286. __netif_tx_unlock(txq); \
  1287. } \
  1288. }
  1289. static inline void netif_tx_disable(struct net_device *dev)
  1290. {
  1291. netif_tx_lock_bh(dev);
  1292. netif_stop_queue(dev);
  1293. netif_tx_unlock_bh(dev);
  1294. }
  1295. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  1296. extern void ether_setup(struct net_device *dev);
  1297. /* Support for loadable net-drivers */
  1298. extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
  1299. void (*setup)(struct net_device *),
  1300. unsigned int queue_count);
  1301. #define alloc_netdev(sizeof_priv, name, setup) \
  1302. alloc_netdev_mq(sizeof_priv, name, setup, 1)
  1303. extern int register_netdev(struct net_device *dev);
  1304. extern void unregister_netdev(struct net_device *dev);
  1305. /* Functions used for secondary unicast and multicast support */
  1306. extern void dev_set_rx_mode(struct net_device *dev);
  1307. extern void __dev_set_rx_mode(struct net_device *dev);
  1308. extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
  1309. extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
  1310. extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
  1311. extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
  1312. extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
  1313. extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
  1314. extern int dev_mc_sync(struct net_device *to, struct net_device *from);
  1315. extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  1316. extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
  1317. extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
  1318. extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
  1319. extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
  1320. extern int dev_set_promiscuity(struct net_device *dev, int inc);
  1321. extern int dev_set_allmulti(struct net_device *dev, int inc);
  1322. extern void netdev_state_change(struct net_device *dev);
  1323. extern void netdev_bonding_change(struct net_device *dev);
  1324. extern void netdev_features_change(struct net_device *dev);
  1325. /* Load a device via the kmod */
  1326. extern void dev_load(struct net *net, const char *name);
  1327. extern void dev_mcast_init(void);
  1328. extern int netdev_max_backlog;
  1329. extern int weight_p;
  1330. extern int netdev_set_master(struct net_device *dev, struct net_device *master);
  1331. extern int skb_checksum_help(struct sk_buff *skb);
  1332. extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  1333. #ifdef CONFIG_BUG
  1334. extern void netdev_rx_csum_fault(struct net_device *dev);
  1335. #else
  1336. static inline void netdev_rx_csum_fault(struct net_device *dev)
  1337. {
  1338. }
  1339. #endif
  1340. /* rx skb timestamps */
  1341. extern void net_enable_timestamp(void);
  1342. extern void net_disable_timestamp(void);
  1343. #ifdef CONFIG_PROC_FS
  1344. extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  1345. extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  1346. extern void dev_seq_stop(struct seq_file *seq, void *v);
  1347. #endif
  1348. extern int netdev_class_create_file(struct class_attribute *class_attr);
  1349. extern void netdev_class_remove_file(struct class_attribute *class_attr);
  1350. extern void linkwatch_run_queue(void);
  1351. extern int netdev_compute_features(unsigned long all, unsigned long one);
  1352. static inline int net_gso_ok(int features, int gso_type)
  1353. {
  1354. int feature = gso_type << NETIF_F_GSO_SHIFT;
  1355. return (features & feature) == feature;
  1356. }
  1357. static inline int skb_gso_ok(struct sk_buff *skb, int features)
  1358. {
  1359. return net_gso_ok(features, skb_shinfo(skb)->gso_type);
  1360. }
  1361. static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  1362. {
  1363. return skb_is_gso(skb) &&
  1364. (!skb_gso_ok(skb, dev->features) ||
  1365. unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
  1366. }
  1367. static inline void netif_set_gso_max_size(struct net_device *dev,
  1368. unsigned int size)
  1369. {
  1370. dev->gso_max_size = size;
  1371. }
  1372. /* On bonding slaves other than the currently active slave, suppress
  1373. * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
  1374. * ARP on active-backup slaves with arp_validate enabled.
  1375. */
  1376. static inline int skb_bond_should_drop(struct sk_buff *skb)
  1377. {
  1378. struct net_device *dev = skb->dev;
  1379. struct net_device *master = dev->master;
  1380. if (master &&
  1381. (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
  1382. if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
  1383. skb->protocol == __constant_htons(ETH_P_ARP))
  1384. return 0;
  1385. if (master->priv_flags & IFF_MASTER_ALB) {
  1386. if (skb->pkt_type != PACKET_BROADCAST &&
  1387. skb->pkt_type != PACKET_MULTICAST)
  1388. return 0;
  1389. }
  1390. if (master->priv_flags & IFF_MASTER_8023AD &&
  1391. skb->protocol == __constant_htons(ETH_P_SLOW))
  1392. return 0;
  1393. return 1;
  1394. }
  1395. return 0;
  1396. }
  1397. #endif /* __KERNEL__ */
  1398. #endif /* _LINUX_DEV_H */