netdevice.h 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <Alan.Cox@linux.org>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/if.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_packet.h>
  30. #ifdef __KERNEL__
  31. #include <linux/timer.h>
  32. #include <linux/delay.h>
  33. #include <asm/atomic.h>
  34. #include <asm/cache.h>
  35. #include <asm/byteorder.h>
  36. #include <linux/device.h>
  37. #include <linux/percpu.h>
  38. #include <linux/dmaengine.h>
  39. #include <linux/workqueue.h>
  40. #include <net/net_namespace.h>
  41. struct vlan_group;
  42. struct ethtool_ops;
  43. struct netpoll_info;
  44. /* 802.11 specific */
  45. struct wireless_dev;
  46. /* source back-compat hooks */
  47. #define SET_ETHTOOL_OPS(netdev,ops) \
  48. ( (netdev)->ethtool_ops = (ops) )
  49. #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
  50. functions are available. */
  51. #define HAVE_FREE_NETDEV /* free_netdev() */
  52. #define HAVE_NETDEV_PRIV /* netdev_priv() */
  53. #define NET_XMIT_SUCCESS 0
  54. #define NET_XMIT_DROP 1 /* skb dropped */
  55. #define NET_XMIT_CN 2 /* congestion notification */
  56. #define NET_XMIT_POLICED 3 /* skb is shot by police */
  57. #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
  58. (TC use only - dev_queue_xmit
  59. returns this as NET_XMIT_SUCCESS) */
  60. /* Backlog congestion levels */
  61. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  62. #define NET_RX_DROP 1 /* packet dropped */
  63. #define NET_RX_CN_LOW 2 /* storm alert, just in case */
  64. #define NET_RX_CN_MOD 3 /* Storm on its way! */
  65. #define NET_RX_CN_HIGH 4 /* The storm is here */
  66. #define NET_RX_BAD 5 /* packet dropped due to kernel error */
  67. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  68. * indicates that the device will soon be dropping packets, or already drops
  69. * some packets of the same priority; prompting us to send less aggressively. */
  70. #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
  71. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  72. #endif
  73. #define MAX_ADDR_LEN 32 /* Largest hardware address length */
  74. /* Driver transmit return codes */
  75. #define NETDEV_TX_OK 0 /* driver took care of packet */
  76. #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
  77. #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
  78. #ifdef __KERNEL__
  79. /*
  80. * Compute the worst case header length according to the protocols
  81. * used.
  82. */
  83. #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
  84. # if defined(CONFIG_MAC80211_MESH)
  85. # define LL_MAX_HEADER 128
  86. # else
  87. # define LL_MAX_HEADER 96
  88. # endif
  89. #elif defined(CONFIG_TR)
  90. # define LL_MAX_HEADER 48
  91. #else
  92. # define LL_MAX_HEADER 32
  93. #endif
  94. #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
  95. !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
  96. !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
  97. !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
  98. #define MAX_HEADER LL_MAX_HEADER
  99. #else
  100. #define MAX_HEADER (LL_MAX_HEADER + 48)
  101. #endif
  102. #endif /* __KERNEL__ */
  103. struct net_device_subqueue
  104. {
  105. /* Give a control state for each queue. This struct may contain
  106. * per-queue locks in the future.
  107. */
  108. unsigned long state;
  109. };
  110. /*
  111. * Network device statistics. Akin to the 2.0 ether stats but
  112. * with byte counters.
  113. */
  114. struct net_device_stats
  115. {
  116. unsigned long rx_packets; /* total packets received */
  117. unsigned long tx_packets; /* total packets transmitted */
  118. unsigned long rx_bytes; /* total bytes received */
  119. unsigned long tx_bytes; /* total bytes transmitted */
  120. unsigned long rx_errors; /* bad packets received */
  121. unsigned long tx_errors; /* packet transmit problems */
  122. unsigned long rx_dropped; /* no space in linux buffers */
  123. unsigned long tx_dropped; /* no space available in linux */
  124. unsigned long multicast; /* multicast packets received */
  125. unsigned long collisions;
  126. /* detailed rx_errors: */
  127. unsigned long rx_length_errors;
  128. unsigned long rx_over_errors; /* receiver ring buff overflow */
  129. unsigned long rx_crc_errors; /* recved pkt with crc error */
  130. unsigned long rx_frame_errors; /* recv'd frame alignment error */
  131. unsigned long rx_fifo_errors; /* recv'r fifo overrun */
  132. unsigned long rx_missed_errors; /* receiver missed packet */
  133. /* detailed tx_errors */
  134. unsigned long tx_aborted_errors;
  135. unsigned long tx_carrier_errors;
  136. unsigned long tx_fifo_errors;
  137. unsigned long tx_heartbeat_errors;
  138. unsigned long tx_window_errors;
  139. /* for cslip etc */
  140. unsigned long rx_compressed;
  141. unsigned long tx_compressed;
  142. };
  143. /* Media selection options. */
  144. enum {
  145. IF_PORT_UNKNOWN = 0,
  146. IF_PORT_10BASE2,
  147. IF_PORT_10BASET,
  148. IF_PORT_AUI,
  149. IF_PORT_100BASET,
  150. IF_PORT_100BASETX,
  151. IF_PORT_100BASEFX
  152. };
  153. #ifdef __KERNEL__
  154. #include <linux/cache.h>
  155. #include <linux/skbuff.h>
  156. struct neighbour;
  157. struct neigh_parms;
  158. struct sk_buff;
  159. struct netif_rx_stats
  160. {
  161. unsigned total;
  162. unsigned dropped;
  163. unsigned time_squeeze;
  164. unsigned cpu_collision;
  165. };
  166. DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
  167. struct dev_addr_list
  168. {
  169. struct dev_addr_list *next;
  170. u8 da_addr[MAX_ADDR_LEN];
  171. u8 da_addrlen;
  172. u8 da_synced;
  173. int da_users;
  174. int da_gusers;
  175. };
  176. /*
  177. * We tag multicasts with these structures.
  178. */
  179. #define dev_mc_list dev_addr_list
  180. #define dmi_addr da_addr
  181. #define dmi_addrlen da_addrlen
  182. #define dmi_users da_users
  183. #define dmi_gusers da_gusers
  184. struct hh_cache
  185. {
  186. struct hh_cache *hh_next; /* Next entry */
  187. atomic_t hh_refcnt; /* number of users */
  188. /*
  189. * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
  190. * cache line on SMP.
  191. * They are mostly read, but hh_refcnt may be changed quite frequently,
  192. * incurring cache line ping pongs.
  193. */
  194. __be16 hh_type ____cacheline_aligned_in_smp;
  195. /* protocol identifier, f.e ETH_P_IP
  196. * NOTE: For VLANs, this will be the
  197. * encapuslated type. --BLG
  198. */
  199. u16 hh_len; /* length of header */
  200. int (*hh_output)(struct sk_buff *skb);
  201. seqlock_t hh_lock;
  202. /* cached hardware header; allow for machine alignment needs. */
  203. #define HH_DATA_MOD 16
  204. #define HH_DATA_OFF(__len) \
  205. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  206. #define HH_DATA_ALIGN(__len) \
  207. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  208. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  209. };
  210. /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  211. * Alternative is:
  212. * dev->hard_header_len ? (dev->hard_header_len +
  213. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  214. *
  215. * We could use other alignment values, but we must maintain the
  216. * relationship HH alignment <= LL alignment.
  217. *
  218. * LL_ALLOCATED_SPACE also takes into account the tailroom the device
  219. * may need.
  220. */
  221. #define LL_RESERVED_SPACE(dev) \
  222. ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  223. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  224. ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  225. #define LL_ALLOCATED_SPACE(dev) \
  226. ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  227. struct header_ops {
  228. int (*create) (struct sk_buff *skb, struct net_device *dev,
  229. unsigned short type, const void *daddr,
  230. const void *saddr, unsigned len);
  231. int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
  232. int (*rebuild)(struct sk_buff *skb);
  233. #define HAVE_HEADER_CACHE
  234. int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
  235. void (*cache_update)(struct hh_cache *hh,
  236. const struct net_device *dev,
  237. const unsigned char *haddr);
  238. };
  239. /* These flag bits are private to the generic network queueing
  240. * layer, they may not be explicitly referenced by any other
  241. * code.
  242. */
  243. enum netdev_state_t
  244. {
  245. __LINK_STATE_START,
  246. __LINK_STATE_PRESENT,
  247. __LINK_STATE_SCHED,
  248. __LINK_STATE_NOCARRIER,
  249. __LINK_STATE_LINKWATCH_PENDING,
  250. __LINK_STATE_DORMANT,
  251. };
  252. /*
  253. * This structure holds at boot time configured netdevice settings. They
  254. * are then used in the device probing.
  255. */
  256. struct netdev_boot_setup {
  257. char name[IFNAMSIZ];
  258. struct ifmap map;
  259. };
  260. #define NETDEV_BOOT_SETUP_MAX 8
  261. extern int __init netdev_boot_setup(char *str);
  262. /*
  263. * Structure for NAPI scheduling similar to tasklet but with weighting
  264. */
  265. struct napi_struct {
  266. /* The poll_list must only be managed by the entity which
  267. * changes the state of the NAPI_STATE_SCHED bit. This means
  268. * whoever atomically sets that bit can add this napi_struct
  269. * to the per-cpu poll_list, and whoever clears that bit
  270. * can remove from the list right before clearing the bit.
  271. */
  272. struct list_head poll_list;
  273. unsigned long state;
  274. int weight;
  275. int (*poll)(struct napi_struct *, int);
  276. #ifdef CONFIG_NETPOLL
  277. spinlock_t poll_lock;
  278. int poll_owner;
  279. struct net_device *dev;
  280. struct list_head dev_list;
  281. #endif
  282. };
  283. enum
  284. {
  285. NAPI_STATE_SCHED, /* Poll is scheduled */
  286. NAPI_STATE_DISABLE, /* Disable pending */
  287. };
  288. extern void __napi_schedule(struct napi_struct *n);
  289. static inline int napi_disable_pending(struct napi_struct *n)
  290. {
  291. return test_bit(NAPI_STATE_DISABLE, &n->state);
  292. }
  293. /**
  294. * napi_schedule_prep - check if napi can be scheduled
  295. * @n: napi context
  296. *
  297. * Test if NAPI routine is already running, and if not mark
  298. * it as running. This is used as a condition variable
  299. * insure only one NAPI poll instance runs. We also make
  300. * sure there is no pending NAPI disable.
  301. */
  302. static inline int napi_schedule_prep(struct napi_struct *n)
  303. {
  304. return !napi_disable_pending(n) &&
  305. !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  306. }
  307. /**
  308. * napi_schedule - schedule NAPI poll
  309. * @n: napi context
  310. *
  311. * Schedule NAPI poll routine to be called if it is not already
  312. * running.
  313. */
  314. static inline void napi_schedule(struct napi_struct *n)
  315. {
  316. if (napi_schedule_prep(n))
  317. __napi_schedule(n);
  318. }
  319. /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
  320. static inline int napi_reschedule(struct napi_struct *napi)
  321. {
  322. if (napi_schedule_prep(napi)) {
  323. __napi_schedule(napi);
  324. return 1;
  325. }
  326. return 0;
  327. }
  328. /**
  329. * napi_complete - NAPI processing complete
  330. * @n: napi context
  331. *
  332. * Mark NAPI processing as complete.
  333. */
  334. static inline void __napi_complete(struct napi_struct *n)
  335. {
  336. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  337. list_del(&n->poll_list);
  338. smp_mb__before_clear_bit();
  339. clear_bit(NAPI_STATE_SCHED, &n->state);
  340. }
  341. static inline void napi_complete(struct napi_struct *n)
  342. {
  343. unsigned long flags;
  344. local_irq_save(flags);
  345. __napi_complete(n);
  346. local_irq_restore(flags);
  347. }
  348. /**
  349. * napi_disable - prevent NAPI from scheduling
  350. * @n: napi context
  351. *
  352. * Stop NAPI from being scheduled on this context.
  353. * Waits till any outstanding processing completes.
  354. */
  355. static inline void napi_disable(struct napi_struct *n)
  356. {
  357. set_bit(NAPI_STATE_DISABLE, &n->state);
  358. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  359. msleep(1);
  360. clear_bit(NAPI_STATE_DISABLE, &n->state);
  361. }
  362. /**
  363. * napi_enable - enable NAPI scheduling
  364. * @n: napi context
  365. *
  366. * Resume NAPI from being scheduled on this context.
  367. * Must be paired with napi_disable.
  368. */
  369. static inline void napi_enable(struct napi_struct *n)
  370. {
  371. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  372. smp_mb__before_clear_bit();
  373. clear_bit(NAPI_STATE_SCHED, &n->state);
  374. }
  375. #ifdef CONFIG_SMP
  376. /**
  377. * napi_synchronize - wait until NAPI is not running
  378. * @n: napi context
  379. *
  380. * Wait until NAPI is done being scheduled on this context.
  381. * Waits till any outstanding processing completes but
  382. * does not disable future activations.
  383. */
  384. static inline void napi_synchronize(const struct napi_struct *n)
  385. {
  386. while (test_bit(NAPI_STATE_SCHED, &n->state))
  387. msleep(1);
  388. }
  389. #else
  390. # define napi_synchronize(n) barrier()
  391. #endif
  392. enum netdev_queue_state_t
  393. {
  394. __QUEUE_STATE_XOFF,
  395. __QUEUE_STATE_QDISC_RUNNING,
  396. };
  397. struct netdev_queue {
  398. spinlock_t lock;
  399. struct net_device *dev;
  400. struct Qdisc *qdisc;
  401. unsigned long state;
  402. struct sk_buff *gso_skb;
  403. spinlock_t _xmit_lock;
  404. int xmit_lock_owner;
  405. struct Qdisc *qdisc_sleeping;
  406. struct list_head qdisc_list;
  407. struct netdev_queue *next_sched;
  408. };
  409. /*
  410. * The DEVICE structure.
  411. * Actually, this whole structure is a big mistake. It mixes I/O
  412. * data with strictly "high-level" data, and it has to know about
  413. * almost every data structure used in the INET module.
  414. *
  415. * FIXME: cleanup struct net_device such that network protocol info
  416. * moves out.
  417. */
  418. struct net_device
  419. {
  420. /*
  421. * This is the first field of the "visible" part of this structure
  422. * (i.e. as seen by users in the "Space.c" file). It is the name
  423. * the interface.
  424. */
  425. char name[IFNAMSIZ];
  426. /* device name hash chain */
  427. struct hlist_node name_hlist;
  428. /*
  429. * I/O specific fields
  430. * FIXME: Merge these and struct ifmap into one
  431. */
  432. unsigned long mem_end; /* shared mem end */
  433. unsigned long mem_start; /* shared mem start */
  434. unsigned long base_addr; /* device I/O address */
  435. unsigned int irq; /* device IRQ number */
  436. /*
  437. * Some hardware also needs these fields, but they are not
  438. * part of the usual set specified in Space.c.
  439. */
  440. unsigned char if_port; /* Selectable AUI, TP,..*/
  441. unsigned char dma; /* DMA channel */
  442. unsigned long state;
  443. struct list_head dev_list;
  444. #ifdef CONFIG_NETPOLL
  445. struct list_head napi_list;
  446. #endif
  447. /* The device initialization function. Called only once. */
  448. int (*init)(struct net_device *dev);
  449. /* ------- Fields preinitialized in Space.c finish here ------- */
  450. /* Net device features */
  451. unsigned long features;
  452. #define NETIF_F_SG 1 /* Scatter/gather IO. */
  453. #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
  454. #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
  455. #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
  456. #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
  457. #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
  458. #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
  459. #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
  460. #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
  461. #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
  462. #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
  463. #define NETIF_F_GSO 2048 /* Enable software GSO. */
  464. #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
  465. /* do not use LLTX in new drivers */
  466. #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
  467. #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
  468. #define NETIF_F_LRO 32768 /* large receive offload */
  469. /* Segmentation offload features */
  470. #define NETIF_F_GSO_SHIFT 16
  471. #define NETIF_F_GSO_MASK 0xffff0000
  472. #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  473. #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
  474. #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  475. #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
  476. #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
  477. /* List of features with software fallbacks. */
  478. #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
  479. #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  480. #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
  481. #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
  482. #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
  483. /* Interface index. Unique device identifier */
  484. int ifindex;
  485. int iflink;
  486. struct net_device_stats* (*get_stats)(struct net_device *dev);
  487. struct net_device_stats stats;
  488. #ifdef CONFIG_WIRELESS_EXT
  489. /* List of functions to handle Wireless Extensions (instead of ioctl).
  490. * See <net/iw_handler.h> for details. Jean II */
  491. const struct iw_handler_def * wireless_handlers;
  492. /* Instance data managed by the core of Wireless Extensions. */
  493. struct iw_public_data * wireless_data;
  494. #endif
  495. const struct ethtool_ops *ethtool_ops;
  496. /* Hardware header description */
  497. const struct header_ops *header_ops;
  498. /*
  499. * This marks the end of the "visible" part of the structure. All
  500. * fields hereafter are internal to the system, and may change at
  501. * will (read: may be cleaned up at will).
  502. */
  503. unsigned int flags; /* interface flags (a la BSD) */
  504. unsigned short gflags;
  505. unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
  506. unsigned short padded; /* How much padding added by alloc_netdev() */
  507. unsigned char operstate; /* RFC2863 operstate */
  508. unsigned char link_mode; /* mapping policy to operstate */
  509. unsigned mtu; /* interface MTU value */
  510. unsigned short type; /* interface hardware type */
  511. unsigned short hard_header_len; /* hardware hdr length */
  512. /* extra head- and tailroom the hardware may need, but not in all cases
  513. * can this be guaranteed, especially tailroom. Some cases also use
  514. * LL_MAX_HEADER instead to allocate the skb.
  515. */
  516. unsigned short needed_headroom;
  517. unsigned short needed_tailroom;
  518. struct net_device *master; /* Pointer to master device of a group,
  519. * which this device is member of.
  520. */
  521. /* Interface address info. */
  522. unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
  523. unsigned char addr_len; /* hardware address length */
  524. unsigned short dev_id; /* for shared network cards */
  525. spinlock_t addr_list_lock;
  526. struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
  527. int uc_count; /* Number of installed ucasts */
  528. int uc_promisc;
  529. struct dev_addr_list *mc_list; /* Multicast mac addresses */
  530. int mc_count; /* Number of installed mcasts */
  531. unsigned int promiscuity;
  532. unsigned int allmulti;
  533. /* Protocol specific pointers */
  534. void *atalk_ptr; /* AppleTalk link */
  535. void *ip_ptr; /* IPv4 specific data */
  536. void *dn_ptr; /* DECnet specific data */
  537. void *ip6_ptr; /* IPv6 specific data */
  538. void *ec_ptr; /* Econet specific data */
  539. void *ax25_ptr; /* AX.25 specific data */
  540. struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
  541. assign before registering */
  542. /*
  543. * Cache line mostly used on receive path (including eth_type_trans())
  544. */
  545. unsigned long last_rx; /* Time of last Rx */
  546. /* Interface address info used in eth_type_trans() */
  547. unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
  548. because most packets are unicast) */
  549. unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
  550. struct netdev_queue rx_queue;
  551. struct netdev_queue tx_queue ____cacheline_aligned_in_smp;
  552. unsigned long tx_queue_len; /* Max frames per queue allowed */
  553. /*
  554. * One part is mostly used on xmit path (device)
  555. */
  556. void *priv; /* pointer to private data */
  557. int (*hard_start_xmit) (struct sk_buff *skb,
  558. struct net_device *dev);
  559. /* These may be needed for future network-power-down code. */
  560. unsigned long trans_start; /* Time (in jiffies) of last Tx */
  561. int watchdog_timeo; /* used by dev_watchdog() */
  562. struct timer_list watchdog_timer;
  563. /*
  564. * refcnt is a very hot point, so align it on SMP
  565. */
  566. /* Number of references to this device */
  567. atomic_t refcnt ____cacheline_aligned_in_smp;
  568. /* delayed register/unregister */
  569. struct list_head todo_list;
  570. /* device index hash chain */
  571. struct hlist_node index_hlist;
  572. struct net_device *link_watch_next;
  573. /* register/unregister state machine */
  574. enum { NETREG_UNINITIALIZED=0,
  575. NETREG_REGISTERED, /* completed register_netdevice */
  576. NETREG_UNREGISTERING, /* called unregister_netdevice */
  577. NETREG_UNREGISTERED, /* completed unregister todo */
  578. NETREG_RELEASED, /* called free_netdev */
  579. } reg_state;
  580. /* Called after device is detached from network. */
  581. void (*uninit)(struct net_device *dev);
  582. /* Called after last user reference disappears. */
  583. void (*destructor)(struct net_device *dev);
  584. /* Pointers to interface service routines. */
  585. int (*open)(struct net_device *dev);
  586. int (*stop)(struct net_device *dev);
  587. #define HAVE_NETDEV_POLL
  588. #define HAVE_CHANGE_RX_FLAGS
  589. void (*change_rx_flags)(struct net_device *dev,
  590. int flags);
  591. #define HAVE_SET_RX_MODE
  592. void (*set_rx_mode)(struct net_device *dev);
  593. #define HAVE_MULTICAST
  594. void (*set_multicast_list)(struct net_device *dev);
  595. #define HAVE_SET_MAC_ADDR
  596. int (*set_mac_address)(struct net_device *dev,
  597. void *addr);
  598. #define HAVE_VALIDATE_ADDR
  599. int (*validate_addr)(struct net_device *dev);
  600. #define HAVE_PRIVATE_IOCTL
  601. int (*do_ioctl)(struct net_device *dev,
  602. struct ifreq *ifr, int cmd);
  603. #define HAVE_SET_CONFIG
  604. int (*set_config)(struct net_device *dev,
  605. struct ifmap *map);
  606. #define HAVE_CHANGE_MTU
  607. int (*change_mtu)(struct net_device *dev, int new_mtu);
  608. #define HAVE_TX_TIMEOUT
  609. void (*tx_timeout) (struct net_device *dev);
  610. void (*vlan_rx_register)(struct net_device *dev,
  611. struct vlan_group *grp);
  612. void (*vlan_rx_add_vid)(struct net_device *dev,
  613. unsigned short vid);
  614. void (*vlan_rx_kill_vid)(struct net_device *dev,
  615. unsigned short vid);
  616. int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
  617. #ifdef CONFIG_NETPOLL
  618. struct netpoll_info *npinfo;
  619. #endif
  620. #ifdef CONFIG_NET_POLL_CONTROLLER
  621. void (*poll_controller)(struct net_device *dev);
  622. #endif
  623. #ifdef CONFIG_NET_NS
  624. /* Network namespace this network device is inside */
  625. struct net *nd_net;
  626. #endif
  627. /* mid-layer private */
  628. void *ml_priv;
  629. /* bridge stuff */
  630. struct net_bridge_port *br_port;
  631. /* macvlan */
  632. struct macvlan_port *macvlan_port;
  633. /* GARP */
  634. struct garp_port *garp_port;
  635. /* class/net/name entry */
  636. struct device dev;
  637. /* space for optional statistics and wireless sysfs groups */
  638. struct attribute_group *sysfs_groups[3];
  639. /* rtnetlink link ops */
  640. const struct rtnl_link_ops *rtnl_link_ops;
  641. /* VLAN feature mask */
  642. unsigned long vlan_features;
  643. /* for setting kernel sock attribute on TCP connection setup */
  644. #define GSO_MAX_SIZE 65536
  645. unsigned int gso_max_size;
  646. /* The TX queue control structures */
  647. unsigned int egress_subqueue_count;
  648. struct net_device_subqueue egress_subqueue[1];
  649. };
  650. #define to_net_dev(d) container_of(d, struct net_device, dev)
  651. #define NETDEV_ALIGN 32
  652. #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
  653. /*
  654. * Net namespace inlines
  655. */
  656. static inline
  657. struct net *dev_net(const struct net_device *dev)
  658. {
  659. #ifdef CONFIG_NET_NS
  660. return dev->nd_net;
  661. #else
  662. return &init_net;
  663. #endif
  664. }
  665. static inline
  666. void dev_net_set(struct net_device *dev, struct net *net)
  667. {
  668. #ifdef CONFIG_NET_NS
  669. release_net(dev->nd_net);
  670. dev->nd_net = hold_net(net);
  671. #endif
  672. }
  673. /**
  674. * netdev_priv - access network device private data
  675. * @dev: network device
  676. *
  677. * Get network device private data
  678. */
  679. static inline void *netdev_priv(const struct net_device *dev)
  680. {
  681. return dev->priv;
  682. }
  683. /* Set the sysfs physical device reference for the network logical device
  684. * if set prior to registration will cause a symlink during initialization.
  685. */
  686. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  687. /**
  688. * netif_napi_add - initialize a napi context
  689. * @dev: network device
  690. * @napi: napi context
  691. * @poll: polling function
  692. * @weight: default weight
  693. *
  694. * netif_napi_add() must be used to initialize a napi context prior to calling
  695. * *any* of the other napi related functions.
  696. */
  697. static inline void netif_napi_add(struct net_device *dev,
  698. struct napi_struct *napi,
  699. int (*poll)(struct napi_struct *, int),
  700. int weight)
  701. {
  702. INIT_LIST_HEAD(&napi->poll_list);
  703. napi->poll = poll;
  704. napi->weight = weight;
  705. #ifdef CONFIG_NETPOLL
  706. napi->dev = dev;
  707. list_add(&napi->dev_list, &dev->napi_list);
  708. spin_lock_init(&napi->poll_lock);
  709. napi->poll_owner = -1;
  710. #endif
  711. set_bit(NAPI_STATE_SCHED, &napi->state);
  712. }
  713. /**
  714. * netif_napi_del - remove a napi context
  715. * @napi: napi context
  716. *
  717. * netif_napi_del() removes a napi context from the network device napi list
  718. */
  719. static inline void netif_napi_del(struct napi_struct *napi)
  720. {
  721. #ifdef CONFIG_NETPOLL
  722. list_del(&napi->dev_list);
  723. #endif
  724. }
  725. struct packet_type {
  726. __be16 type; /* This is really htons(ether_type). */
  727. struct net_device *dev; /* NULL is wildcarded here */
  728. int (*func) (struct sk_buff *,
  729. struct net_device *,
  730. struct packet_type *,
  731. struct net_device *);
  732. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  733. int features);
  734. int (*gso_send_check)(struct sk_buff *skb);
  735. void *af_packet_priv;
  736. struct list_head list;
  737. };
  738. #include <linux/interrupt.h>
  739. #include <linux/notifier.h>
  740. extern rwlock_t dev_base_lock; /* Device list lock */
  741. #define for_each_netdev(net, d) \
  742. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  743. #define for_each_netdev_safe(net, d, n) \
  744. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  745. #define for_each_netdev_continue(net, d) \
  746. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  747. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  748. static inline struct net_device *next_net_device(struct net_device *dev)
  749. {
  750. struct list_head *lh;
  751. struct net *net;
  752. net = dev_net(dev);
  753. lh = dev->dev_list.next;
  754. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  755. }
  756. static inline struct net_device *first_net_device(struct net *net)
  757. {
  758. return list_empty(&net->dev_base_head) ? NULL :
  759. net_device_entry(net->dev_base_head.next);
  760. }
  761. extern int netdev_boot_setup_check(struct net_device *dev);
  762. extern unsigned long netdev_boot_base(const char *prefix, int unit);
  763. extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
  764. extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  765. extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  766. extern void dev_add_pack(struct packet_type *pt);
  767. extern void dev_remove_pack(struct packet_type *pt);
  768. extern void __dev_remove_pack(struct packet_type *pt);
  769. extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
  770. unsigned short mask);
  771. extern struct net_device *dev_get_by_name(struct net *net, const char *name);
  772. extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
  773. extern int dev_alloc_name(struct net_device *dev, const char *name);
  774. extern int dev_open(struct net_device *dev);
  775. extern int dev_close(struct net_device *dev);
  776. extern void dev_disable_lro(struct net_device *dev);
  777. extern int dev_queue_xmit(struct sk_buff *skb);
  778. extern int register_netdevice(struct net_device *dev);
  779. extern void unregister_netdevice(struct net_device *dev);
  780. extern void free_netdev(struct net_device *dev);
  781. extern void synchronize_net(void);
  782. extern int register_netdevice_notifier(struct notifier_block *nb);
  783. extern int unregister_netdevice_notifier(struct notifier_block *nb);
  784. extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  785. extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
  786. extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  787. extern int dev_restart(struct net_device *dev);
  788. #ifdef CONFIG_NETPOLL_TRAP
  789. extern int netpoll_trap(void);
  790. #endif
  791. static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
  792. unsigned short type,
  793. const void *daddr, const void *saddr,
  794. unsigned len)
  795. {
  796. if (!dev->header_ops || !dev->header_ops->create)
  797. return 0;
  798. return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
  799. }
  800. static inline int dev_parse_header(const struct sk_buff *skb,
  801. unsigned char *haddr)
  802. {
  803. const struct net_device *dev = skb->dev;
  804. if (!dev->header_ops || !dev->header_ops->parse)
  805. return 0;
  806. return dev->header_ops->parse(skb, haddr);
  807. }
  808. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  809. extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
  810. static inline int unregister_gifconf(unsigned int family)
  811. {
  812. return register_gifconf(family, NULL);
  813. }
  814. /*
  815. * Incoming packets are placed on per-cpu queues so that
  816. * no locking is needed.
  817. */
  818. struct softnet_data
  819. {
  820. struct netdev_queue *output_queue;
  821. struct sk_buff_head input_pkt_queue;
  822. struct list_head poll_list;
  823. struct sk_buff *completion_queue;
  824. struct napi_struct backlog;
  825. #ifdef CONFIG_NET_DMA
  826. struct dma_chan *net_dma;
  827. #endif
  828. };
  829. DECLARE_PER_CPU(struct softnet_data,softnet_data);
  830. #define HAVE_NETIF_QUEUE
  831. extern void __netif_schedule(struct netdev_queue *txq);
  832. static inline void netif_schedule_queue(struct netdev_queue *txq)
  833. {
  834. if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
  835. __netif_schedule(txq);
  836. }
  837. static inline void netif_schedule(struct net_device *dev)
  838. {
  839. netif_schedule_queue(&dev->tx_queue);
  840. }
  841. /**
  842. * netif_start_queue - allow transmit
  843. * @dev: network device
  844. *
  845. * Allow upper layers to call the device hard_start_xmit routine.
  846. */
  847. static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
  848. {
  849. clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  850. }
  851. static inline void netif_start_queue(struct net_device *dev)
  852. {
  853. netif_tx_start_queue(&dev->tx_queue);
  854. }
  855. /**
  856. * netif_wake_queue - restart transmit
  857. * @dev: network device
  858. *
  859. * Allow upper layers to call the device hard_start_xmit routine.
  860. * Used for flow control when transmit resources are available.
  861. */
  862. static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  863. {
  864. #ifdef CONFIG_NETPOLL_TRAP
  865. if (netpoll_trap()) {
  866. clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  867. return;
  868. }
  869. #endif
  870. if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
  871. __netif_schedule(dev_queue);
  872. }
  873. static inline void netif_wake_queue(struct net_device *dev)
  874. {
  875. netif_tx_wake_queue(&dev->tx_queue);
  876. }
  877. /**
  878. * netif_stop_queue - stop transmitted packets
  879. * @dev: network device
  880. *
  881. * Stop upper layers calling the device hard_start_xmit routine.
  882. * Used for flow control when transmit resources are unavailable.
  883. */
  884. static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
  885. {
  886. set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  887. }
  888. static inline void netif_stop_queue(struct net_device *dev)
  889. {
  890. netif_tx_stop_queue(&dev->tx_queue);
  891. }
  892. /**
  893. * netif_queue_stopped - test if transmit queue is flowblocked
  894. * @dev: network device
  895. *
  896. * Test if transmit queue on device is currently unable to send.
  897. */
  898. static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
  899. {
  900. return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  901. }
  902. static inline int netif_queue_stopped(const struct net_device *dev)
  903. {
  904. return netif_tx_queue_stopped(&dev->tx_queue);
  905. }
  906. /**
  907. * netif_running - test if up
  908. * @dev: network device
  909. *
  910. * Test if the device has been brought up.
  911. */
  912. static inline int netif_running(const struct net_device *dev)
  913. {
  914. return test_bit(__LINK_STATE_START, &dev->state);
  915. }
  916. /*
  917. * Routines to manage the subqueues on a device. We only need start
  918. * stop, and a check if it's stopped. All other device management is
  919. * done at the overall netdevice level.
  920. * Also test the device if we're multiqueue.
  921. */
  922. /**
  923. * netif_start_subqueue - allow sending packets on subqueue
  924. * @dev: network device
  925. * @queue_index: sub queue index
  926. *
  927. * Start individual transmit queue of a device with multiple transmit queues.
  928. */
  929. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  930. {
  931. clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
  932. }
  933. /**
  934. * netif_stop_subqueue - stop sending packets on subqueue
  935. * @dev: network device
  936. * @queue_index: sub queue index
  937. *
  938. * Stop individual transmit queue of a device with multiple transmit queues.
  939. */
  940. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  941. {
  942. #ifdef CONFIG_NETPOLL_TRAP
  943. if (netpoll_trap())
  944. return;
  945. #endif
  946. set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
  947. }
  948. /**
  949. * netif_subqueue_stopped - test status of subqueue
  950. * @dev: network device
  951. * @queue_index: sub queue index
  952. *
  953. * Check individual transmit queue of a device with multiple transmit queues.
  954. */
  955. static inline int __netif_subqueue_stopped(const struct net_device *dev,
  956. u16 queue_index)
  957. {
  958. return test_bit(__QUEUE_STATE_XOFF,
  959. &dev->egress_subqueue[queue_index].state);
  960. }
  961. static inline int netif_subqueue_stopped(const struct net_device *dev,
  962. struct sk_buff *skb)
  963. {
  964. return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
  965. }
  966. /**
  967. * netif_wake_subqueue - allow sending packets on subqueue
  968. * @dev: network device
  969. * @queue_index: sub queue index
  970. *
  971. * Resume individual transmit queue of a device with multiple transmit queues.
  972. */
  973. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  974. {
  975. #ifdef CONFIG_NETPOLL_TRAP
  976. if (netpoll_trap())
  977. return;
  978. #endif
  979. if (test_and_clear_bit(__QUEUE_STATE_XOFF,
  980. &dev->egress_subqueue[queue_index].state))
  981. __netif_schedule(&dev->tx_queue);
  982. }
  983. /**
  984. * netif_is_multiqueue - test if device has multiple transmit queues
  985. * @dev: network device
  986. *
  987. * Check if device has multiple transmit queues
  988. * Always falls if NETDEVICE_MULTIQUEUE is not configured
  989. */
  990. static inline int netif_is_multiqueue(const struct net_device *dev)
  991. {
  992. return (!!(NETIF_F_MULTI_QUEUE & dev->features));
  993. }
  994. /* Use this variant when it is known for sure that it
  995. * is executing from hardware interrupt context or with hardware interrupts
  996. * disabled.
  997. */
  998. extern void dev_kfree_skb_irq(struct sk_buff *skb);
  999. /* Use this variant in places where it could be invoked
  1000. * from either hardware interrupt or other context, with hardware interrupts
  1001. * either disabled or enabled.
  1002. */
  1003. extern void dev_kfree_skb_any(struct sk_buff *skb);
  1004. #define HAVE_NETIF_RX 1
  1005. extern int netif_rx(struct sk_buff *skb);
  1006. extern int netif_rx_ni(struct sk_buff *skb);
  1007. #define HAVE_NETIF_RECEIVE_SKB 1
  1008. extern int netif_receive_skb(struct sk_buff *skb);
  1009. extern void netif_nit_deliver(struct sk_buff *skb);
  1010. extern int dev_valid_name(const char *name);
  1011. extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  1012. extern int dev_ethtool(struct net *net, struct ifreq *);
  1013. extern unsigned dev_get_flags(const struct net_device *);
  1014. extern int dev_change_flags(struct net_device *, unsigned);
  1015. extern int dev_change_name(struct net_device *, char *);
  1016. extern int dev_change_net_namespace(struct net_device *,
  1017. struct net *, const char *);
  1018. extern int dev_set_mtu(struct net_device *, int);
  1019. extern int dev_set_mac_address(struct net_device *,
  1020. struct sockaddr *);
  1021. extern int dev_hard_start_xmit(struct sk_buff *skb,
  1022. struct net_device *dev);
  1023. extern int netdev_budget;
  1024. /* Called by rtnetlink.c:rtnl_unlock() */
  1025. extern void netdev_run_todo(void);
  1026. /**
  1027. * dev_put - release reference to device
  1028. * @dev: network device
  1029. *
  1030. * Release reference to device to allow it to be freed.
  1031. */
  1032. static inline void dev_put(struct net_device *dev)
  1033. {
  1034. atomic_dec(&dev->refcnt);
  1035. }
  1036. /**
  1037. * dev_hold - get reference to device
  1038. * @dev: network device
  1039. *
  1040. * Hold reference to device to keep it from being freed.
  1041. */
  1042. static inline void dev_hold(struct net_device *dev)
  1043. {
  1044. atomic_inc(&dev->refcnt);
  1045. }
  1046. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  1047. * and _off may be called from IRQ context, but it is caller
  1048. * who is responsible for serialization of these calls.
  1049. *
  1050. * The name carrier is inappropriate, these functions should really be
  1051. * called netif_lowerlayer_*() because they represent the state of any
  1052. * kind of lower layer not just hardware media.
  1053. */
  1054. extern void linkwatch_fire_event(struct net_device *dev);
  1055. /**
  1056. * netif_carrier_ok - test if carrier present
  1057. * @dev: network device
  1058. *
  1059. * Check if carrier is present on device
  1060. */
  1061. static inline int netif_carrier_ok(const struct net_device *dev)
  1062. {
  1063. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  1064. }
  1065. extern void __netdev_watchdog_up(struct net_device *dev);
  1066. extern void netif_carrier_on(struct net_device *dev);
  1067. extern void netif_carrier_off(struct net_device *dev);
  1068. /**
  1069. * netif_dormant_on - mark device as dormant.
  1070. * @dev: network device
  1071. *
  1072. * Mark device as dormant (as per RFC2863).
  1073. *
  1074. * The dormant state indicates that the relevant interface is not
  1075. * actually in a condition to pass packets (i.e., it is not 'up') but is
  1076. * in a "pending" state, waiting for some external event. For "on-
  1077. * demand" interfaces, this new state identifies the situation where the
  1078. * interface is waiting for events to place it in the up state.
  1079. *
  1080. */
  1081. static inline void netif_dormant_on(struct net_device *dev)
  1082. {
  1083. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  1084. linkwatch_fire_event(dev);
  1085. }
  1086. /**
  1087. * netif_dormant_off - set device as not dormant.
  1088. * @dev: network device
  1089. *
  1090. * Device is not in dormant state.
  1091. */
  1092. static inline void netif_dormant_off(struct net_device *dev)
  1093. {
  1094. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  1095. linkwatch_fire_event(dev);
  1096. }
  1097. /**
  1098. * netif_dormant - test if carrier present
  1099. * @dev: network device
  1100. *
  1101. * Check if carrier is present on device
  1102. */
  1103. static inline int netif_dormant(const struct net_device *dev)
  1104. {
  1105. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  1106. }
  1107. /**
  1108. * netif_oper_up - test if device is operational
  1109. * @dev: network device
  1110. *
  1111. * Check if carrier is operational
  1112. */
  1113. static inline int netif_oper_up(const struct net_device *dev) {
  1114. return (dev->operstate == IF_OPER_UP ||
  1115. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  1116. }
  1117. /**
  1118. * netif_device_present - is device available or removed
  1119. * @dev: network device
  1120. *
  1121. * Check if device has not been removed from system.
  1122. */
  1123. static inline int netif_device_present(struct net_device *dev)
  1124. {
  1125. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  1126. }
  1127. extern void netif_device_detach(struct net_device *dev);
  1128. extern void netif_device_attach(struct net_device *dev);
  1129. /*
  1130. * Network interface message level settings
  1131. */
  1132. #define HAVE_NETIF_MSG 1
  1133. enum {
  1134. NETIF_MSG_DRV = 0x0001,
  1135. NETIF_MSG_PROBE = 0x0002,
  1136. NETIF_MSG_LINK = 0x0004,
  1137. NETIF_MSG_TIMER = 0x0008,
  1138. NETIF_MSG_IFDOWN = 0x0010,
  1139. NETIF_MSG_IFUP = 0x0020,
  1140. NETIF_MSG_RX_ERR = 0x0040,
  1141. NETIF_MSG_TX_ERR = 0x0080,
  1142. NETIF_MSG_TX_QUEUED = 0x0100,
  1143. NETIF_MSG_INTR = 0x0200,
  1144. NETIF_MSG_TX_DONE = 0x0400,
  1145. NETIF_MSG_RX_STATUS = 0x0800,
  1146. NETIF_MSG_PKTDATA = 0x1000,
  1147. NETIF_MSG_HW = 0x2000,
  1148. NETIF_MSG_WOL = 0x4000,
  1149. };
  1150. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  1151. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  1152. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  1153. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  1154. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  1155. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  1156. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  1157. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  1158. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  1159. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  1160. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  1161. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  1162. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  1163. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  1164. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  1165. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  1166. {
  1167. /* use default */
  1168. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  1169. return default_msg_enable_bits;
  1170. if (debug_value == 0) /* no output */
  1171. return 0;
  1172. /* set low N bits */
  1173. return (1 << debug_value) - 1;
  1174. }
  1175. /* Test if receive needs to be scheduled but only if up */
  1176. static inline int netif_rx_schedule_prep(struct net_device *dev,
  1177. struct napi_struct *napi)
  1178. {
  1179. return napi_schedule_prep(napi);
  1180. }
  1181. /* Add interface to tail of rx poll list. This assumes that _prep has
  1182. * already been called and returned 1.
  1183. */
  1184. static inline void __netif_rx_schedule(struct net_device *dev,
  1185. struct napi_struct *napi)
  1186. {
  1187. __napi_schedule(napi);
  1188. }
  1189. /* Try to reschedule poll. Called by irq handler. */
  1190. static inline void netif_rx_schedule(struct net_device *dev,
  1191. struct napi_struct *napi)
  1192. {
  1193. if (netif_rx_schedule_prep(dev, napi))
  1194. __netif_rx_schedule(dev, napi);
  1195. }
  1196. /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
  1197. static inline int netif_rx_reschedule(struct net_device *dev,
  1198. struct napi_struct *napi)
  1199. {
  1200. if (napi_schedule_prep(napi)) {
  1201. __netif_rx_schedule(dev, napi);
  1202. return 1;
  1203. }
  1204. return 0;
  1205. }
  1206. /* same as netif_rx_complete, except that local_irq_save(flags)
  1207. * has already been issued
  1208. */
  1209. static inline void __netif_rx_complete(struct net_device *dev,
  1210. struct napi_struct *napi)
  1211. {
  1212. __napi_complete(napi);
  1213. }
  1214. /* Remove interface from poll list: it must be in the poll list
  1215. * on current cpu. This primitive is called by dev->poll(), when
  1216. * it completes the work. The device cannot be out of poll list at this
  1217. * moment, it is BUG().
  1218. */
  1219. static inline void netif_rx_complete(struct net_device *dev,
  1220. struct napi_struct *napi)
  1221. {
  1222. unsigned long flags;
  1223. local_irq_save(flags);
  1224. __netif_rx_complete(dev, napi);
  1225. local_irq_restore(flags);
  1226. }
  1227. /**
  1228. * netif_tx_lock - grab network device transmit lock
  1229. * @dev: network device
  1230. * @cpu: cpu number of lock owner
  1231. *
  1232. * Get network device transmit lock
  1233. */
  1234. static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
  1235. {
  1236. spin_lock(&txq->_xmit_lock);
  1237. txq->xmit_lock_owner = cpu;
  1238. }
  1239. static inline void netif_tx_lock(struct net_device *dev)
  1240. {
  1241. __netif_tx_lock(&dev->tx_queue, smp_processor_id());
  1242. }
  1243. static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
  1244. {
  1245. spin_lock_bh(&txq->_xmit_lock);
  1246. txq->xmit_lock_owner = smp_processor_id();
  1247. }
  1248. static inline void netif_tx_lock_bh(struct net_device *dev)
  1249. {
  1250. __netif_tx_lock_bh(&dev->tx_queue);
  1251. }
  1252. static inline int __netif_tx_trylock(struct netdev_queue *txq)
  1253. {
  1254. int ok = spin_trylock(&txq->_xmit_lock);
  1255. if (likely(ok))
  1256. txq->xmit_lock_owner = smp_processor_id();
  1257. return ok;
  1258. }
  1259. static inline int netif_tx_trylock(struct net_device *dev)
  1260. {
  1261. return __netif_tx_trylock(&dev->tx_queue);
  1262. }
  1263. static inline void __netif_tx_unlock(struct netdev_queue *txq)
  1264. {
  1265. txq->xmit_lock_owner = -1;
  1266. spin_unlock(&txq->_xmit_lock);
  1267. }
  1268. static inline void netif_tx_unlock(struct net_device *dev)
  1269. {
  1270. __netif_tx_unlock(&dev->tx_queue);
  1271. }
  1272. static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
  1273. {
  1274. txq->xmit_lock_owner = -1;
  1275. spin_unlock_bh(&txq->_xmit_lock);
  1276. }
  1277. static inline void netif_tx_unlock_bh(struct net_device *dev)
  1278. {
  1279. __netif_tx_unlock_bh(&dev->tx_queue);
  1280. }
  1281. #define HARD_TX_LOCK(dev, txq, cpu) { \
  1282. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1283. __netif_tx_lock(txq, cpu); \
  1284. } \
  1285. }
  1286. #define HARD_TX_UNLOCK(dev, txq) { \
  1287. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1288. __netif_tx_unlock(txq); \
  1289. } \
  1290. }
  1291. static inline void netif_tx_disable(struct net_device *dev)
  1292. {
  1293. netif_tx_lock_bh(dev);
  1294. netif_stop_queue(dev);
  1295. netif_tx_unlock_bh(dev);
  1296. }
  1297. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  1298. extern void ether_setup(struct net_device *dev);
  1299. /* Support for loadable net-drivers */
  1300. extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
  1301. void (*setup)(struct net_device *),
  1302. unsigned int queue_count);
  1303. #define alloc_netdev(sizeof_priv, name, setup) \
  1304. alloc_netdev_mq(sizeof_priv, name, setup, 1)
  1305. extern int register_netdev(struct net_device *dev);
  1306. extern void unregister_netdev(struct net_device *dev);
  1307. /* Functions used for secondary unicast and multicast support */
  1308. extern void dev_set_rx_mode(struct net_device *dev);
  1309. extern void __dev_set_rx_mode(struct net_device *dev);
  1310. extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
  1311. extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
  1312. extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
  1313. extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
  1314. extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
  1315. extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
  1316. extern int dev_mc_sync(struct net_device *to, struct net_device *from);
  1317. extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  1318. extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
  1319. extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
  1320. extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
  1321. extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
  1322. extern int dev_set_promiscuity(struct net_device *dev, int inc);
  1323. extern int dev_set_allmulti(struct net_device *dev, int inc);
  1324. extern void netdev_state_change(struct net_device *dev);
  1325. extern void netdev_bonding_change(struct net_device *dev);
  1326. extern void netdev_features_change(struct net_device *dev);
  1327. /* Load a device via the kmod */
  1328. extern void dev_load(struct net *net, const char *name);
  1329. extern void dev_mcast_init(void);
  1330. extern int netdev_max_backlog;
  1331. extern int weight_p;
  1332. extern int netdev_set_master(struct net_device *dev, struct net_device *master);
  1333. extern int skb_checksum_help(struct sk_buff *skb);
  1334. extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  1335. #ifdef CONFIG_BUG
  1336. extern void netdev_rx_csum_fault(struct net_device *dev);
  1337. #else
  1338. static inline void netdev_rx_csum_fault(struct net_device *dev)
  1339. {
  1340. }
  1341. #endif
  1342. /* rx skb timestamps */
  1343. extern void net_enable_timestamp(void);
  1344. extern void net_disable_timestamp(void);
  1345. #ifdef CONFIG_PROC_FS
  1346. extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  1347. extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  1348. extern void dev_seq_stop(struct seq_file *seq, void *v);
  1349. #endif
  1350. extern int netdev_class_create_file(struct class_attribute *class_attr);
  1351. extern void netdev_class_remove_file(struct class_attribute *class_attr);
  1352. extern void linkwatch_run_queue(void);
  1353. extern int netdev_compute_features(unsigned long all, unsigned long one);
  1354. static inline int net_gso_ok(int features, int gso_type)
  1355. {
  1356. int feature = gso_type << NETIF_F_GSO_SHIFT;
  1357. return (features & feature) == feature;
  1358. }
  1359. static inline int skb_gso_ok(struct sk_buff *skb, int features)
  1360. {
  1361. return net_gso_ok(features, skb_shinfo(skb)->gso_type);
  1362. }
  1363. static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  1364. {
  1365. return skb_is_gso(skb) &&
  1366. (!skb_gso_ok(skb, dev->features) ||
  1367. unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
  1368. }
  1369. static inline void netif_set_gso_max_size(struct net_device *dev,
  1370. unsigned int size)
  1371. {
  1372. dev->gso_max_size = size;
  1373. }
  1374. /* On bonding slaves other than the currently active slave, suppress
  1375. * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
  1376. * ARP on active-backup slaves with arp_validate enabled.
  1377. */
  1378. static inline int skb_bond_should_drop(struct sk_buff *skb)
  1379. {
  1380. struct net_device *dev = skb->dev;
  1381. struct net_device *master = dev->master;
  1382. if (master &&
  1383. (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
  1384. if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
  1385. skb->protocol == __constant_htons(ETH_P_ARP))
  1386. return 0;
  1387. if (master->priv_flags & IFF_MASTER_ALB) {
  1388. if (skb->pkt_type != PACKET_BROADCAST &&
  1389. skb->pkt_type != PACKET_MULTICAST)
  1390. return 0;
  1391. }
  1392. if (master->priv_flags & IFF_MASTER_8023AD &&
  1393. skb->protocol == __constant_htons(ETH_P_SLOW))
  1394. return 0;
  1395. return 1;
  1396. }
  1397. return 0;
  1398. }
  1399. #endif /* __KERNEL__ */
  1400. #endif /* _LINUX_DEV_H */