netdevice.h 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <Alan.Cox@linux.org>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/if.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_packet.h>
  30. #ifdef __KERNEL__
  31. #include <linux/timer.h>
  32. #include <linux/delay.h>
  33. #include <asm/atomic.h>
  34. #include <asm/cache.h>
  35. #include <asm/byteorder.h>
  36. #include <linux/device.h>
  37. #include <linux/percpu.h>
  38. #include <linux/dmaengine.h>
  39. #include <linux/workqueue.h>
  40. #include <net/net_namespace.h>
  41. struct vlan_group;
  42. struct ethtool_ops;
  43. struct netpoll_info;
  44. /* 802.11 specific */
  45. struct wireless_dev;
  46. /* source back-compat hooks */
  47. #define SET_ETHTOOL_OPS(netdev,ops) \
  48. ( (netdev)->ethtool_ops = (ops) )
  49. #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
  50. functions are available. */
  51. #define HAVE_FREE_NETDEV /* free_netdev() */
  52. #define HAVE_NETDEV_PRIV /* netdev_priv() */
  53. #define NET_XMIT_SUCCESS 0
  54. #define NET_XMIT_DROP 1 /* skb dropped */
  55. #define NET_XMIT_CN 2 /* congestion notification */
  56. #define NET_XMIT_POLICED 3 /* skb is shot by police */
  57. #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
  58. (TC use only - dev_queue_xmit
  59. returns this as NET_XMIT_SUCCESS) */
  60. /* Backlog congestion levels */
  61. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  62. #define NET_RX_DROP 1 /* packet dropped */
  63. #define NET_RX_CN_LOW 2 /* storm alert, just in case */
  64. #define NET_RX_CN_MOD 3 /* Storm on its way! */
  65. #define NET_RX_CN_HIGH 4 /* The storm is here */
  66. #define NET_RX_BAD 5 /* packet dropped due to kernel error */
  67. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  68. * indicates that the device will soon be dropping packets, or already drops
  69. * some packets of the same priority; prompting us to send less aggressively. */
  70. #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
  71. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  72. #endif
  73. #define MAX_ADDR_LEN 32 /* Largest hardware address length */
  74. /* Driver transmit return codes */
  75. #define NETDEV_TX_OK 0 /* driver took care of packet */
  76. #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
  77. #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
  78. #ifdef __KERNEL__
  79. /*
  80. * Compute the worst case header length according to the protocols
  81. * used.
  82. */
  83. #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
  84. # if defined(CONFIG_MAC80211_MESH)
  85. # define LL_MAX_HEADER 128
  86. # else
  87. # define LL_MAX_HEADER 96
  88. # endif
  89. #elif defined(CONFIG_TR)
  90. # define LL_MAX_HEADER 48
  91. #else
  92. # define LL_MAX_HEADER 32
  93. #endif
  94. #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
  95. !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
  96. !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
  97. !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
  98. #define MAX_HEADER LL_MAX_HEADER
  99. #else
  100. #define MAX_HEADER (LL_MAX_HEADER + 48)
  101. #endif
  102. #endif /* __KERNEL__ */
  103. /*
  104. * Network device statistics. Akin to the 2.0 ether stats but
  105. * with byte counters.
  106. */
  107. struct net_device_stats
  108. {
  109. unsigned long rx_packets; /* total packets received */
  110. unsigned long tx_packets; /* total packets transmitted */
  111. unsigned long rx_bytes; /* total bytes received */
  112. unsigned long tx_bytes; /* total bytes transmitted */
  113. unsigned long rx_errors; /* bad packets received */
  114. unsigned long tx_errors; /* packet transmit problems */
  115. unsigned long rx_dropped; /* no space in linux buffers */
  116. unsigned long tx_dropped; /* no space available in linux */
  117. unsigned long multicast; /* multicast packets received */
  118. unsigned long collisions;
  119. /* detailed rx_errors: */
  120. unsigned long rx_length_errors;
  121. unsigned long rx_over_errors; /* receiver ring buff overflow */
  122. unsigned long rx_crc_errors; /* recved pkt with crc error */
  123. unsigned long rx_frame_errors; /* recv'd frame alignment error */
  124. unsigned long rx_fifo_errors; /* recv'r fifo overrun */
  125. unsigned long rx_missed_errors; /* receiver missed packet */
  126. /* detailed tx_errors */
  127. unsigned long tx_aborted_errors;
  128. unsigned long tx_carrier_errors;
  129. unsigned long tx_fifo_errors;
  130. unsigned long tx_heartbeat_errors;
  131. unsigned long tx_window_errors;
  132. /* for cslip etc */
  133. unsigned long rx_compressed;
  134. unsigned long tx_compressed;
  135. };
  136. /* Media selection options. */
  137. enum {
  138. IF_PORT_UNKNOWN = 0,
  139. IF_PORT_10BASE2,
  140. IF_PORT_10BASET,
  141. IF_PORT_AUI,
  142. IF_PORT_100BASET,
  143. IF_PORT_100BASETX,
  144. IF_PORT_100BASEFX
  145. };
  146. #ifdef __KERNEL__
  147. #include <linux/cache.h>
  148. #include <linux/skbuff.h>
  149. struct neighbour;
  150. struct neigh_parms;
  151. struct sk_buff;
  152. struct netif_rx_stats
  153. {
  154. unsigned total;
  155. unsigned dropped;
  156. unsigned time_squeeze;
  157. unsigned cpu_collision;
  158. };
  159. DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
  160. struct dev_addr_list
  161. {
  162. struct dev_addr_list *next;
  163. u8 da_addr[MAX_ADDR_LEN];
  164. u8 da_addrlen;
  165. u8 da_synced;
  166. int da_users;
  167. int da_gusers;
  168. };
  169. /*
  170. * We tag multicasts with these structures.
  171. */
  172. #define dev_mc_list dev_addr_list
  173. #define dmi_addr da_addr
  174. #define dmi_addrlen da_addrlen
  175. #define dmi_users da_users
  176. #define dmi_gusers da_gusers
  177. struct hh_cache
  178. {
  179. struct hh_cache *hh_next; /* Next entry */
  180. atomic_t hh_refcnt; /* number of users */
  181. /*
  182. * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
  183. * cache line on SMP.
  184. * They are mostly read, but hh_refcnt may be changed quite frequently,
  185. * incurring cache line ping pongs.
  186. */
  187. __be16 hh_type ____cacheline_aligned_in_smp;
  188. /* protocol identifier, f.e ETH_P_IP
  189. * NOTE: For VLANs, this will be the
  190. * encapuslated type. --BLG
  191. */
  192. u16 hh_len; /* length of header */
  193. int (*hh_output)(struct sk_buff *skb);
  194. seqlock_t hh_lock;
  195. /* cached hardware header; allow for machine alignment needs. */
  196. #define HH_DATA_MOD 16
  197. #define HH_DATA_OFF(__len) \
  198. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  199. #define HH_DATA_ALIGN(__len) \
  200. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  201. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  202. };
  203. /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  204. * Alternative is:
  205. * dev->hard_header_len ? (dev->hard_header_len +
  206. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  207. *
  208. * We could use other alignment values, but we must maintain the
  209. * relationship HH alignment <= LL alignment.
  210. *
  211. * LL_ALLOCATED_SPACE also takes into account the tailroom the device
  212. * may need.
  213. */
  214. #define LL_RESERVED_SPACE(dev) \
  215. ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  216. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  217. ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  218. #define LL_ALLOCATED_SPACE(dev) \
  219. ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  220. struct header_ops {
  221. int (*create) (struct sk_buff *skb, struct net_device *dev,
  222. unsigned short type, const void *daddr,
  223. const void *saddr, unsigned len);
  224. int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
  225. int (*rebuild)(struct sk_buff *skb);
  226. #define HAVE_HEADER_CACHE
  227. int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
  228. void (*cache_update)(struct hh_cache *hh,
  229. const struct net_device *dev,
  230. const unsigned char *haddr);
  231. };
  232. /* These flag bits are private to the generic network queueing
  233. * layer, they may not be explicitly referenced by any other
  234. * code.
  235. */
  236. enum netdev_state_t
  237. {
  238. __LINK_STATE_START,
  239. __LINK_STATE_PRESENT,
  240. __LINK_STATE_SCHED,
  241. __LINK_STATE_NOCARRIER,
  242. __LINK_STATE_LINKWATCH_PENDING,
  243. __LINK_STATE_DORMANT,
  244. };
  245. /*
  246. * This structure holds at boot time configured netdevice settings. They
  247. * are then used in the device probing.
  248. */
  249. struct netdev_boot_setup {
  250. char name[IFNAMSIZ];
  251. struct ifmap map;
  252. };
  253. #define NETDEV_BOOT_SETUP_MAX 8
  254. extern int __init netdev_boot_setup(char *str);
  255. /*
  256. * Structure for NAPI scheduling similar to tasklet but with weighting
  257. */
  258. struct napi_struct {
  259. /* The poll_list must only be managed by the entity which
  260. * changes the state of the NAPI_STATE_SCHED bit. This means
  261. * whoever atomically sets that bit can add this napi_struct
  262. * to the per-cpu poll_list, and whoever clears that bit
  263. * can remove from the list right before clearing the bit.
  264. */
  265. struct list_head poll_list;
  266. unsigned long state;
  267. int weight;
  268. int (*poll)(struct napi_struct *, int);
  269. #ifdef CONFIG_NETPOLL
  270. spinlock_t poll_lock;
  271. int poll_owner;
  272. struct net_device *dev;
  273. struct list_head dev_list;
  274. #endif
  275. };
  276. enum
  277. {
  278. NAPI_STATE_SCHED, /* Poll is scheduled */
  279. NAPI_STATE_DISABLE, /* Disable pending */
  280. };
  281. extern void __napi_schedule(struct napi_struct *n);
  282. static inline int napi_disable_pending(struct napi_struct *n)
  283. {
  284. return test_bit(NAPI_STATE_DISABLE, &n->state);
  285. }
  286. /**
  287. * napi_schedule_prep - check if napi can be scheduled
  288. * @n: napi context
  289. *
  290. * Test if NAPI routine is already running, and if not mark
  291. * it as running. This is used as a condition variable
  292. * insure only one NAPI poll instance runs. We also make
  293. * sure there is no pending NAPI disable.
  294. */
  295. static inline int napi_schedule_prep(struct napi_struct *n)
  296. {
  297. return !napi_disable_pending(n) &&
  298. !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  299. }
  300. /**
  301. * napi_schedule - schedule NAPI poll
  302. * @n: napi context
  303. *
  304. * Schedule NAPI poll routine to be called if it is not already
  305. * running.
  306. */
  307. static inline void napi_schedule(struct napi_struct *n)
  308. {
  309. if (napi_schedule_prep(n))
  310. __napi_schedule(n);
  311. }
  312. /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
  313. static inline int napi_reschedule(struct napi_struct *napi)
  314. {
  315. if (napi_schedule_prep(napi)) {
  316. __napi_schedule(napi);
  317. return 1;
  318. }
  319. return 0;
  320. }
  321. /**
  322. * napi_complete - NAPI processing complete
  323. * @n: napi context
  324. *
  325. * Mark NAPI processing as complete.
  326. */
  327. static inline void __napi_complete(struct napi_struct *n)
  328. {
  329. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  330. list_del(&n->poll_list);
  331. smp_mb__before_clear_bit();
  332. clear_bit(NAPI_STATE_SCHED, &n->state);
  333. }
  334. static inline void napi_complete(struct napi_struct *n)
  335. {
  336. unsigned long flags;
  337. local_irq_save(flags);
  338. __napi_complete(n);
  339. local_irq_restore(flags);
  340. }
  341. /**
  342. * napi_disable - prevent NAPI from scheduling
  343. * @n: napi context
  344. *
  345. * Stop NAPI from being scheduled on this context.
  346. * Waits till any outstanding processing completes.
  347. */
  348. static inline void napi_disable(struct napi_struct *n)
  349. {
  350. set_bit(NAPI_STATE_DISABLE, &n->state);
  351. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  352. msleep(1);
  353. clear_bit(NAPI_STATE_DISABLE, &n->state);
  354. }
  355. /**
  356. * napi_enable - enable NAPI scheduling
  357. * @n: napi context
  358. *
  359. * Resume NAPI from being scheduled on this context.
  360. * Must be paired with napi_disable.
  361. */
  362. static inline void napi_enable(struct napi_struct *n)
  363. {
  364. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  365. smp_mb__before_clear_bit();
  366. clear_bit(NAPI_STATE_SCHED, &n->state);
  367. }
  368. #ifdef CONFIG_SMP
  369. /**
  370. * napi_synchronize - wait until NAPI is not running
  371. * @n: napi context
  372. *
  373. * Wait until NAPI is done being scheduled on this context.
  374. * Waits till any outstanding processing completes but
  375. * does not disable future activations.
  376. */
  377. static inline void napi_synchronize(const struct napi_struct *n)
  378. {
  379. while (test_bit(NAPI_STATE_SCHED, &n->state))
  380. msleep(1);
  381. }
  382. #else
  383. # define napi_synchronize(n) barrier()
  384. #endif
  385. enum netdev_queue_state_t
  386. {
  387. __QUEUE_STATE_XOFF,
  388. __QUEUE_STATE_QDISC_RUNNING,
  389. };
  390. struct netdev_queue {
  391. spinlock_t lock;
  392. struct net_device *dev;
  393. struct Qdisc *qdisc;
  394. unsigned long state;
  395. spinlock_t _xmit_lock;
  396. int xmit_lock_owner;
  397. struct Qdisc *qdisc_sleeping;
  398. struct list_head qdisc_list;
  399. struct netdev_queue *next_sched;
  400. } ____cacheline_aligned_in_smp;
  401. /*
  402. * The DEVICE structure.
  403. * Actually, this whole structure is a big mistake. It mixes I/O
  404. * data with strictly "high-level" data, and it has to know about
  405. * almost every data structure used in the INET module.
  406. *
  407. * FIXME: cleanup struct net_device such that network protocol info
  408. * moves out.
  409. */
  410. struct net_device
  411. {
  412. /*
  413. * This is the first field of the "visible" part of this structure
  414. * (i.e. as seen by users in the "Space.c" file). It is the name
  415. * the interface.
  416. */
  417. char name[IFNAMSIZ];
  418. /* device name hash chain */
  419. struct hlist_node name_hlist;
  420. /*
  421. * I/O specific fields
  422. * FIXME: Merge these and struct ifmap into one
  423. */
  424. unsigned long mem_end; /* shared mem end */
  425. unsigned long mem_start; /* shared mem start */
  426. unsigned long base_addr; /* device I/O address */
  427. unsigned int irq; /* device IRQ number */
  428. /*
  429. * Some hardware also needs these fields, but they are not
  430. * part of the usual set specified in Space.c.
  431. */
  432. unsigned char if_port; /* Selectable AUI, TP,..*/
  433. unsigned char dma; /* DMA channel */
  434. unsigned long state;
  435. struct list_head dev_list;
  436. #ifdef CONFIG_NETPOLL
  437. struct list_head napi_list;
  438. #endif
  439. /* The device initialization function. Called only once. */
  440. int (*init)(struct net_device *dev);
  441. /* ------- Fields preinitialized in Space.c finish here ------- */
  442. /* Net device features */
  443. unsigned long features;
  444. #define NETIF_F_SG 1 /* Scatter/gather IO. */
  445. #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
  446. #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
  447. #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
  448. #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
  449. #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
  450. #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
  451. #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
  452. #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
  453. #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
  454. #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
  455. #define NETIF_F_GSO 2048 /* Enable software GSO. */
  456. #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
  457. /* do not use LLTX in new drivers */
  458. #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
  459. #define NETIF_F_LRO 32768 /* large receive offload */
  460. /* Segmentation offload features */
  461. #define NETIF_F_GSO_SHIFT 16
  462. #define NETIF_F_GSO_MASK 0xffff0000
  463. #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  464. #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
  465. #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  466. #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
  467. #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
  468. /* List of features with software fallbacks. */
  469. #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
  470. #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  471. #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
  472. #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
  473. #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
  474. /* Interface index. Unique device identifier */
  475. int ifindex;
  476. int iflink;
  477. struct net_device_stats* (*get_stats)(struct net_device *dev);
  478. struct net_device_stats stats;
  479. #ifdef CONFIG_WIRELESS_EXT
  480. /* List of functions to handle Wireless Extensions (instead of ioctl).
  481. * See <net/iw_handler.h> for details. Jean II */
  482. const struct iw_handler_def * wireless_handlers;
  483. /* Instance data managed by the core of Wireless Extensions. */
  484. struct iw_public_data * wireless_data;
  485. #endif
  486. const struct ethtool_ops *ethtool_ops;
  487. /* Hardware header description */
  488. const struct header_ops *header_ops;
  489. /*
  490. * This marks the end of the "visible" part of the structure. All
  491. * fields hereafter are internal to the system, and may change at
  492. * will (read: may be cleaned up at will).
  493. */
  494. unsigned int flags; /* interface flags (a la BSD) */
  495. unsigned short gflags;
  496. unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
  497. unsigned short padded; /* How much padding added by alloc_netdev() */
  498. unsigned char operstate; /* RFC2863 operstate */
  499. unsigned char link_mode; /* mapping policy to operstate */
  500. unsigned mtu; /* interface MTU value */
  501. unsigned short type; /* interface hardware type */
  502. unsigned short hard_header_len; /* hardware hdr length */
  503. /* extra head- and tailroom the hardware may need, but not in all cases
  504. * can this be guaranteed, especially tailroom. Some cases also use
  505. * LL_MAX_HEADER instead to allocate the skb.
  506. */
  507. unsigned short needed_headroom;
  508. unsigned short needed_tailroom;
  509. struct net_device *master; /* Pointer to master device of a group,
  510. * which this device is member of.
  511. */
  512. /* Interface address info. */
  513. unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
  514. unsigned char addr_len; /* hardware address length */
  515. unsigned short dev_id; /* for shared network cards */
  516. spinlock_t addr_list_lock;
  517. struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
  518. int uc_count; /* Number of installed ucasts */
  519. int uc_promisc;
  520. struct dev_addr_list *mc_list; /* Multicast mac addresses */
  521. int mc_count; /* Number of installed mcasts */
  522. unsigned int promiscuity;
  523. unsigned int allmulti;
  524. /* Protocol specific pointers */
  525. void *atalk_ptr; /* AppleTalk link */
  526. void *ip_ptr; /* IPv4 specific data */
  527. void *dn_ptr; /* DECnet specific data */
  528. void *ip6_ptr; /* IPv6 specific data */
  529. void *ec_ptr; /* Econet specific data */
  530. void *ax25_ptr; /* AX.25 specific data */
  531. struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
  532. assign before registering */
  533. /*
  534. * Cache line mostly used on receive path (including eth_type_trans())
  535. */
  536. unsigned long last_rx; /* Time of last Rx */
  537. /* Interface address info used in eth_type_trans() */
  538. unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
  539. because most packets are unicast) */
  540. unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
  541. struct netdev_queue rx_queue;
  542. struct netdev_queue *_tx ____cacheline_aligned_in_smp;
  543. /* Number of TX queues allocated at alloc_netdev_mq() time */
  544. unsigned int num_tx_queues;
  545. /* Number of TX queues currently active in device */
  546. unsigned int real_num_tx_queues;
  547. unsigned long tx_queue_len; /* Max frames per queue allowed */
  548. /*
  549. * One part is mostly used on xmit path (device)
  550. */
  551. void *priv; /* pointer to private data */
  552. int (*hard_start_xmit) (struct sk_buff *skb,
  553. struct net_device *dev);
  554. /* These may be needed for future network-power-down code. */
  555. unsigned long trans_start; /* Time (in jiffies) of last Tx */
  556. int watchdog_timeo; /* used by dev_watchdog() */
  557. struct timer_list watchdog_timer;
  558. /*
  559. * refcnt is a very hot point, so align it on SMP
  560. */
  561. /* Number of references to this device */
  562. atomic_t refcnt ____cacheline_aligned_in_smp;
  563. /* delayed register/unregister */
  564. struct list_head todo_list;
  565. /* device index hash chain */
  566. struct hlist_node index_hlist;
  567. struct net_device *link_watch_next;
  568. /* register/unregister state machine */
  569. enum { NETREG_UNINITIALIZED=0,
  570. NETREG_REGISTERED, /* completed register_netdevice */
  571. NETREG_UNREGISTERING, /* called unregister_netdevice */
  572. NETREG_UNREGISTERED, /* completed unregister todo */
  573. NETREG_RELEASED, /* called free_netdev */
  574. } reg_state;
  575. /* Called after device is detached from network. */
  576. void (*uninit)(struct net_device *dev);
  577. /* Called after last user reference disappears. */
  578. void (*destructor)(struct net_device *dev);
  579. /* Pointers to interface service routines. */
  580. int (*open)(struct net_device *dev);
  581. int (*stop)(struct net_device *dev);
  582. #define HAVE_NETDEV_POLL
  583. #define HAVE_CHANGE_RX_FLAGS
  584. void (*change_rx_flags)(struct net_device *dev,
  585. int flags);
  586. #define HAVE_SET_RX_MODE
  587. void (*set_rx_mode)(struct net_device *dev);
  588. #define HAVE_MULTICAST
  589. void (*set_multicast_list)(struct net_device *dev);
  590. #define HAVE_SET_MAC_ADDR
  591. int (*set_mac_address)(struct net_device *dev,
  592. void *addr);
  593. #define HAVE_VALIDATE_ADDR
  594. int (*validate_addr)(struct net_device *dev);
  595. #define HAVE_PRIVATE_IOCTL
  596. int (*do_ioctl)(struct net_device *dev,
  597. struct ifreq *ifr, int cmd);
  598. #define HAVE_SET_CONFIG
  599. int (*set_config)(struct net_device *dev,
  600. struct ifmap *map);
  601. #define HAVE_CHANGE_MTU
  602. int (*change_mtu)(struct net_device *dev, int new_mtu);
  603. #define HAVE_TX_TIMEOUT
  604. void (*tx_timeout) (struct net_device *dev);
  605. void (*vlan_rx_register)(struct net_device *dev,
  606. struct vlan_group *grp);
  607. void (*vlan_rx_add_vid)(struct net_device *dev,
  608. unsigned short vid);
  609. void (*vlan_rx_kill_vid)(struct net_device *dev,
  610. unsigned short vid);
  611. int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
  612. #ifdef CONFIG_NETPOLL
  613. struct netpoll_info *npinfo;
  614. #endif
  615. #ifdef CONFIG_NET_POLL_CONTROLLER
  616. void (*poll_controller)(struct net_device *dev);
  617. #endif
  618. u16 (*select_queue)(struct net_device *dev,
  619. struct sk_buff *skb);
  620. #ifdef CONFIG_NET_NS
  621. /* Network namespace this network device is inside */
  622. struct net *nd_net;
  623. #endif
  624. /* mid-layer private */
  625. void *ml_priv;
  626. /* bridge stuff */
  627. struct net_bridge_port *br_port;
  628. /* macvlan */
  629. struct macvlan_port *macvlan_port;
  630. /* GARP */
  631. struct garp_port *garp_port;
  632. /* class/net/name entry */
  633. struct device dev;
  634. /* space for optional statistics and wireless sysfs groups */
  635. struct attribute_group *sysfs_groups[3];
  636. /* rtnetlink link ops */
  637. const struct rtnl_link_ops *rtnl_link_ops;
  638. /* VLAN feature mask */
  639. unsigned long vlan_features;
  640. /* for setting kernel sock attribute on TCP connection setup */
  641. #define GSO_MAX_SIZE 65536
  642. unsigned int gso_max_size;
  643. };
  644. #define to_net_dev(d) container_of(d, struct net_device, dev)
  645. #define NETDEV_ALIGN 32
  646. #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
  647. static inline
  648. struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
  649. unsigned int index)
  650. {
  651. return &dev->_tx[index];
  652. }
  653. static inline void netdev_for_each_tx_queue(struct net_device *dev,
  654. void (*f)(struct net_device *,
  655. struct netdev_queue *,
  656. void *),
  657. void *arg)
  658. {
  659. unsigned int i;
  660. for (i = 0; i < dev->num_tx_queues; i++)
  661. f(dev, &dev->_tx[i], arg);
  662. }
  663. /*
  664. * Net namespace inlines
  665. */
  666. static inline
  667. struct net *dev_net(const struct net_device *dev)
  668. {
  669. #ifdef CONFIG_NET_NS
  670. return dev->nd_net;
  671. #else
  672. return &init_net;
  673. #endif
  674. }
  675. static inline
  676. void dev_net_set(struct net_device *dev, struct net *net)
  677. {
  678. #ifdef CONFIG_NET_NS
  679. release_net(dev->nd_net);
  680. dev->nd_net = hold_net(net);
  681. #endif
  682. }
  683. /**
  684. * netdev_priv - access network device private data
  685. * @dev: network device
  686. *
  687. * Get network device private data
  688. */
  689. static inline void *netdev_priv(const struct net_device *dev)
  690. {
  691. return (char *)dev + ((sizeof(struct net_device)
  692. + NETDEV_ALIGN_CONST)
  693. & ~NETDEV_ALIGN_CONST);
  694. }
  695. /* Set the sysfs physical device reference for the network logical device
  696. * if set prior to registration will cause a symlink during initialization.
  697. */
  698. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  699. /**
  700. * netif_napi_add - initialize a napi context
  701. * @dev: network device
  702. * @napi: napi context
  703. * @poll: polling function
  704. * @weight: default weight
  705. *
  706. * netif_napi_add() must be used to initialize a napi context prior to calling
  707. * *any* of the other napi related functions.
  708. */
  709. static inline void netif_napi_add(struct net_device *dev,
  710. struct napi_struct *napi,
  711. int (*poll)(struct napi_struct *, int),
  712. int weight)
  713. {
  714. INIT_LIST_HEAD(&napi->poll_list);
  715. napi->poll = poll;
  716. napi->weight = weight;
  717. #ifdef CONFIG_NETPOLL
  718. napi->dev = dev;
  719. list_add(&napi->dev_list, &dev->napi_list);
  720. spin_lock_init(&napi->poll_lock);
  721. napi->poll_owner = -1;
  722. #endif
  723. set_bit(NAPI_STATE_SCHED, &napi->state);
  724. }
  725. /**
  726. * netif_napi_del - remove a napi context
  727. * @napi: napi context
  728. *
  729. * netif_napi_del() removes a napi context from the network device napi list
  730. */
  731. static inline void netif_napi_del(struct napi_struct *napi)
  732. {
  733. #ifdef CONFIG_NETPOLL
  734. list_del(&napi->dev_list);
  735. #endif
  736. }
  737. struct packet_type {
  738. __be16 type; /* This is really htons(ether_type). */
  739. struct net_device *dev; /* NULL is wildcarded here */
  740. int (*func) (struct sk_buff *,
  741. struct net_device *,
  742. struct packet_type *,
  743. struct net_device *);
  744. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  745. int features);
  746. int (*gso_send_check)(struct sk_buff *skb);
  747. void *af_packet_priv;
  748. struct list_head list;
  749. };
  750. #include <linux/interrupt.h>
  751. #include <linux/notifier.h>
  752. extern rwlock_t dev_base_lock; /* Device list lock */
  753. #define for_each_netdev(net, d) \
  754. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  755. #define for_each_netdev_safe(net, d, n) \
  756. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  757. #define for_each_netdev_continue(net, d) \
  758. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  759. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  760. static inline struct net_device *next_net_device(struct net_device *dev)
  761. {
  762. struct list_head *lh;
  763. struct net *net;
  764. net = dev_net(dev);
  765. lh = dev->dev_list.next;
  766. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  767. }
  768. static inline struct net_device *first_net_device(struct net *net)
  769. {
  770. return list_empty(&net->dev_base_head) ? NULL :
  771. net_device_entry(net->dev_base_head.next);
  772. }
  773. extern int netdev_boot_setup_check(struct net_device *dev);
  774. extern unsigned long netdev_boot_base(const char *prefix, int unit);
  775. extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
  776. extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  777. extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  778. extern void dev_add_pack(struct packet_type *pt);
  779. extern void dev_remove_pack(struct packet_type *pt);
  780. extern void __dev_remove_pack(struct packet_type *pt);
  781. extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
  782. unsigned short mask);
  783. extern struct net_device *dev_get_by_name(struct net *net, const char *name);
  784. extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
  785. extern int dev_alloc_name(struct net_device *dev, const char *name);
  786. extern int dev_open(struct net_device *dev);
  787. extern int dev_close(struct net_device *dev);
  788. extern void dev_disable_lro(struct net_device *dev);
  789. extern int dev_queue_xmit(struct sk_buff *skb);
  790. extern int register_netdevice(struct net_device *dev);
  791. extern void unregister_netdevice(struct net_device *dev);
  792. extern void free_netdev(struct net_device *dev);
  793. extern void synchronize_net(void);
  794. extern int register_netdevice_notifier(struct notifier_block *nb);
  795. extern int unregister_netdevice_notifier(struct notifier_block *nb);
  796. extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  797. extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
  798. extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  799. extern int dev_restart(struct net_device *dev);
  800. #ifdef CONFIG_NETPOLL_TRAP
  801. extern int netpoll_trap(void);
  802. #endif
  803. static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
  804. unsigned short type,
  805. const void *daddr, const void *saddr,
  806. unsigned len)
  807. {
  808. if (!dev->header_ops || !dev->header_ops->create)
  809. return 0;
  810. return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
  811. }
  812. static inline int dev_parse_header(const struct sk_buff *skb,
  813. unsigned char *haddr)
  814. {
  815. const struct net_device *dev = skb->dev;
  816. if (!dev->header_ops || !dev->header_ops->parse)
  817. return 0;
  818. return dev->header_ops->parse(skb, haddr);
  819. }
  820. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  821. extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
  822. static inline int unregister_gifconf(unsigned int family)
  823. {
  824. return register_gifconf(family, NULL);
  825. }
  826. /*
  827. * Incoming packets are placed on per-cpu queues so that
  828. * no locking is needed.
  829. */
  830. struct softnet_data
  831. {
  832. struct netdev_queue *output_queue;
  833. struct sk_buff_head input_pkt_queue;
  834. struct list_head poll_list;
  835. struct sk_buff *completion_queue;
  836. struct napi_struct backlog;
  837. #ifdef CONFIG_NET_DMA
  838. struct dma_chan *net_dma;
  839. #endif
  840. };
  841. DECLARE_PER_CPU(struct softnet_data,softnet_data);
  842. #define HAVE_NETIF_QUEUE
  843. extern void __netif_schedule(struct netdev_queue *txq);
  844. static inline void netif_schedule_queue(struct netdev_queue *txq)
  845. {
  846. if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
  847. __netif_schedule(txq);
  848. }
  849. static inline void netif_tx_schedule_all(struct net_device *dev)
  850. {
  851. unsigned int i;
  852. for (i = 0; i < dev->num_tx_queues; i++)
  853. netif_schedule_queue(netdev_get_tx_queue(dev, i));
  854. }
  855. /**
  856. * netif_start_queue - allow transmit
  857. * @dev: network device
  858. *
  859. * Allow upper layers to call the device hard_start_xmit routine.
  860. */
  861. static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
  862. {
  863. clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  864. }
  865. static inline void netif_start_queue(struct net_device *dev)
  866. {
  867. netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
  868. }
  869. static inline void netif_tx_start_all_queues(struct net_device *dev)
  870. {
  871. unsigned int i;
  872. for (i = 0; i < dev->num_tx_queues; i++) {
  873. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  874. netif_tx_start_queue(txq);
  875. }
  876. }
  877. /**
  878. * netif_wake_queue - restart transmit
  879. * @dev: network device
  880. *
  881. * Allow upper layers to call the device hard_start_xmit routine.
  882. * Used for flow control when transmit resources are available.
  883. */
  884. static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  885. {
  886. #ifdef CONFIG_NETPOLL_TRAP
  887. if (netpoll_trap()) {
  888. clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  889. return;
  890. }
  891. #endif
  892. if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
  893. __netif_schedule(dev_queue);
  894. }
  895. static inline void netif_wake_queue(struct net_device *dev)
  896. {
  897. netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
  898. }
  899. static inline void netif_tx_wake_all_queues(struct net_device *dev)
  900. {
  901. unsigned int i;
  902. for (i = 0; i < dev->num_tx_queues; i++) {
  903. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  904. netif_tx_wake_queue(txq);
  905. }
  906. }
  907. /**
  908. * netif_stop_queue - stop transmitted packets
  909. * @dev: network device
  910. *
  911. * Stop upper layers calling the device hard_start_xmit routine.
  912. * Used for flow control when transmit resources are unavailable.
  913. */
  914. static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
  915. {
  916. set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  917. }
  918. static inline void netif_stop_queue(struct net_device *dev)
  919. {
  920. netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
  921. }
  922. static inline void netif_tx_stop_all_queues(struct net_device *dev)
  923. {
  924. unsigned int i;
  925. for (i = 0; i < dev->num_tx_queues; i++) {
  926. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  927. netif_tx_stop_queue(txq);
  928. }
  929. }
  930. /**
  931. * netif_queue_stopped - test if transmit queue is flowblocked
  932. * @dev: network device
  933. *
  934. * Test if transmit queue on device is currently unable to send.
  935. */
  936. static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
  937. {
  938. return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
  939. }
  940. static inline int netif_queue_stopped(const struct net_device *dev)
  941. {
  942. return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
  943. }
  944. /**
  945. * netif_running - test if up
  946. * @dev: network device
  947. *
  948. * Test if the device has been brought up.
  949. */
  950. static inline int netif_running(const struct net_device *dev)
  951. {
  952. return test_bit(__LINK_STATE_START, &dev->state);
  953. }
  954. /*
  955. * Routines to manage the subqueues on a device. We only need start
  956. * stop, and a check if it's stopped. All other device management is
  957. * done at the overall netdevice level.
  958. * Also test the device if we're multiqueue.
  959. */
  960. /**
  961. * netif_start_subqueue - allow sending packets on subqueue
  962. * @dev: network device
  963. * @queue_index: sub queue index
  964. *
  965. * Start individual transmit queue of a device with multiple transmit queues.
  966. */
  967. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  968. {
  969. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  970. clear_bit(__QUEUE_STATE_XOFF, &txq->state);
  971. }
  972. /**
  973. * netif_stop_subqueue - stop sending packets on subqueue
  974. * @dev: network device
  975. * @queue_index: sub queue index
  976. *
  977. * Stop individual transmit queue of a device with multiple transmit queues.
  978. */
  979. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  980. {
  981. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  982. #ifdef CONFIG_NETPOLL_TRAP
  983. if (netpoll_trap())
  984. return;
  985. #endif
  986. set_bit(__QUEUE_STATE_XOFF, &txq->state);
  987. }
  988. /**
  989. * netif_subqueue_stopped - test status of subqueue
  990. * @dev: network device
  991. * @queue_index: sub queue index
  992. *
  993. * Check individual transmit queue of a device with multiple transmit queues.
  994. */
  995. static inline int __netif_subqueue_stopped(const struct net_device *dev,
  996. u16 queue_index)
  997. {
  998. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  999. return test_bit(__QUEUE_STATE_XOFF, &txq->state);
  1000. }
  1001. static inline int netif_subqueue_stopped(const struct net_device *dev,
  1002. struct sk_buff *skb)
  1003. {
  1004. return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
  1005. }
  1006. /**
  1007. * netif_wake_subqueue - allow sending packets on subqueue
  1008. * @dev: network device
  1009. * @queue_index: sub queue index
  1010. *
  1011. * Resume individual transmit queue of a device with multiple transmit queues.
  1012. */
  1013. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  1014. {
  1015. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
  1016. #ifdef CONFIG_NETPOLL_TRAP
  1017. if (netpoll_trap())
  1018. return;
  1019. #endif
  1020. if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
  1021. __netif_schedule(txq);
  1022. }
  1023. /**
  1024. * netif_is_multiqueue - test if device has multiple transmit queues
  1025. * @dev: network device
  1026. *
  1027. * Check if device has multiple transmit queues
  1028. */
  1029. static inline int netif_is_multiqueue(const struct net_device *dev)
  1030. {
  1031. return (dev->num_tx_queues > 1);
  1032. }
  1033. /* Use this variant when it is known for sure that it
  1034. * is executing from hardware interrupt context or with hardware interrupts
  1035. * disabled.
  1036. */
  1037. extern void dev_kfree_skb_irq(struct sk_buff *skb);
  1038. /* Use this variant in places where it could be invoked
  1039. * from either hardware interrupt or other context, with hardware interrupts
  1040. * either disabled or enabled.
  1041. */
  1042. extern void dev_kfree_skb_any(struct sk_buff *skb);
  1043. #define HAVE_NETIF_RX 1
  1044. extern int netif_rx(struct sk_buff *skb);
  1045. extern int netif_rx_ni(struct sk_buff *skb);
  1046. #define HAVE_NETIF_RECEIVE_SKB 1
  1047. extern int netif_receive_skb(struct sk_buff *skb);
  1048. extern void netif_nit_deliver(struct sk_buff *skb);
  1049. extern int dev_valid_name(const char *name);
  1050. extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  1051. extern int dev_ethtool(struct net *net, struct ifreq *);
  1052. extern unsigned dev_get_flags(const struct net_device *);
  1053. extern int dev_change_flags(struct net_device *, unsigned);
  1054. extern int dev_change_name(struct net_device *, char *);
  1055. extern int dev_change_net_namespace(struct net_device *,
  1056. struct net *, const char *);
  1057. extern int dev_set_mtu(struct net_device *, int);
  1058. extern int dev_set_mac_address(struct net_device *,
  1059. struct sockaddr *);
  1060. extern int dev_hard_start_xmit(struct sk_buff *skb,
  1061. struct net_device *dev,
  1062. struct netdev_queue *txq);
  1063. extern int netdev_budget;
  1064. /* Called by rtnetlink.c:rtnl_unlock() */
  1065. extern void netdev_run_todo(void);
  1066. /**
  1067. * dev_put - release reference to device
  1068. * @dev: network device
  1069. *
  1070. * Release reference to device to allow it to be freed.
  1071. */
  1072. static inline void dev_put(struct net_device *dev)
  1073. {
  1074. atomic_dec(&dev->refcnt);
  1075. }
  1076. /**
  1077. * dev_hold - get reference to device
  1078. * @dev: network device
  1079. *
  1080. * Hold reference to device to keep it from being freed.
  1081. */
  1082. static inline void dev_hold(struct net_device *dev)
  1083. {
  1084. atomic_inc(&dev->refcnt);
  1085. }
  1086. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  1087. * and _off may be called from IRQ context, but it is caller
  1088. * who is responsible for serialization of these calls.
  1089. *
  1090. * The name carrier is inappropriate, these functions should really be
  1091. * called netif_lowerlayer_*() because they represent the state of any
  1092. * kind of lower layer not just hardware media.
  1093. */
  1094. extern void linkwatch_fire_event(struct net_device *dev);
  1095. /**
  1096. * netif_carrier_ok - test if carrier present
  1097. * @dev: network device
  1098. *
  1099. * Check if carrier is present on device
  1100. */
  1101. static inline int netif_carrier_ok(const struct net_device *dev)
  1102. {
  1103. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  1104. }
  1105. extern void __netdev_watchdog_up(struct net_device *dev);
  1106. extern void netif_carrier_on(struct net_device *dev);
  1107. extern void netif_carrier_off(struct net_device *dev);
  1108. /**
  1109. * netif_dormant_on - mark device as dormant.
  1110. * @dev: network device
  1111. *
  1112. * Mark device as dormant (as per RFC2863).
  1113. *
  1114. * The dormant state indicates that the relevant interface is not
  1115. * actually in a condition to pass packets (i.e., it is not 'up') but is
  1116. * in a "pending" state, waiting for some external event. For "on-
  1117. * demand" interfaces, this new state identifies the situation where the
  1118. * interface is waiting for events to place it in the up state.
  1119. *
  1120. */
  1121. static inline void netif_dormant_on(struct net_device *dev)
  1122. {
  1123. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  1124. linkwatch_fire_event(dev);
  1125. }
  1126. /**
  1127. * netif_dormant_off - set device as not dormant.
  1128. * @dev: network device
  1129. *
  1130. * Device is not in dormant state.
  1131. */
  1132. static inline void netif_dormant_off(struct net_device *dev)
  1133. {
  1134. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  1135. linkwatch_fire_event(dev);
  1136. }
  1137. /**
  1138. * netif_dormant - test if carrier present
  1139. * @dev: network device
  1140. *
  1141. * Check if carrier is present on device
  1142. */
  1143. static inline int netif_dormant(const struct net_device *dev)
  1144. {
  1145. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  1146. }
  1147. /**
  1148. * netif_oper_up - test if device is operational
  1149. * @dev: network device
  1150. *
  1151. * Check if carrier is operational
  1152. */
  1153. static inline int netif_oper_up(const struct net_device *dev) {
  1154. return (dev->operstate == IF_OPER_UP ||
  1155. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  1156. }
  1157. /**
  1158. * netif_device_present - is device available or removed
  1159. * @dev: network device
  1160. *
  1161. * Check if device has not been removed from system.
  1162. */
  1163. static inline int netif_device_present(struct net_device *dev)
  1164. {
  1165. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  1166. }
  1167. extern void netif_device_detach(struct net_device *dev);
  1168. extern void netif_device_attach(struct net_device *dev);
  1169. /*
  1170. * Network interface message level settings
  1171. */
  1172. #define HAVE_NETIF_MSG 1
  1173. enum {
  1174. NETIF_MSG_DRV = 0x0001,
  1175. NETIF_MSG_PROBE = 0x0002,
  1176. NETIF_MSG_LINK = 0x0004,
  1177. NETIF_MSG_TIMER = 0x0008,
  1178. NETIF_MSG_IFDOWN = 0x0010,
  1179. NETIF_MSG_IFUP = 0x0020,
  1180. NETIF_MSG_RX_ERR = 0x0040,
  1181. NETIF_MSG_TX_ERR = 0x0080,
  1182. NETIF_MSG_TX_QUEUED = 0x0100,
  1183. NETIF_MSG_INTR = 0x0200,
  1184. NETIF_MSG_TX_DONE = 0x0400,
  1185. NETIF_MSG_RX_STATUS = 0x0800,
  1186. NETIF_MSG_PKTDATA = 0x1000,
  1187. NETIF_MSG_HW = 0x2000,
  1188. NETIF_MSG_WOL = 0x4000,
  1189. };
  1190. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  1191. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  1192. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  1193. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  1194. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  1195. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  1196. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  1197. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  1198. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  1199. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  1200. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  1201. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  1202. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  1203. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  1204. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  1205. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  1206. {
  1207. /* use default */
  1208. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  1209. return default_msg_enable_bits;
  1210. if (debug_value == 0) /* no output */
  1211. return 0;
  1212. /* set low N bits */
  1213. return (1 << debug_value) - 1;
  1214. }
  1215. /* Test if receive needs to be scheduled but only if up */
  1216. static inline int netif_rx_schedule_prep(struct net_device *dev,
  1217. struct napi_struct *napi)
  1218. {
  1219. return napi_schedule_prep(napi);
  1220. }
  1221. /* Add interface to tail of rx poll list. This assumes that _prep has
  1222. * already been called and returned 1.
  1223. */
  1224. static inline void __netif_rx_schedule(struct net_device *dev,
  1225. struct napi_struct *napi)
  1226. {
  1227. __napi_schedule(napi);
  1228. }
  1229. /* Try to reschedule poll. Called by irq handler. */
  1230. static inline void netif_rx_schedule(struct net_device *dev,
  1231. struct napi_struct *napi)
  1232. {
  1233. if (netif_rx_schedule_prep(dev, napi))
  1234. __netif_rx_schedule(dev, napi);
  1235. }
  1236. /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
  1237. static inline int netif_rx_reschedule(struct net_device *dev,
  1238. struct napi_struct *napi)
  1239. {
  1240. if (napi_schedule_prep(napi)) {
  1241. __netif_rx_schedule(dev, napi);
  1242. return 1;
  1243. }
  1244. return 0;
  1245. }
  1246. /* same as netif_rx_complete, except that local_irq_save(flags)
  1247. * has already been issued
  1248. */
  1249. static inline void __netif_rx_complete(struct net_device *dev,
  1250. struct napi_struct *napi)
  1251. {
  1252. __napi_complete(napi);
  1253. }
  1254. /* Remove interface from poll list: it must be in the poll list
  1255. * on current cpu. This primitive is called by dev->poll(), when
  1256. * it completes the work. The device cannot be out of poll list at this
  1257. * moment, it is BUG().
  1258. */
  1259. static inline void netif_rx_complete(struct net_device *dev,
  1260. struct napi_struct *napi)
  1261. {
  1262. unsigned long flags;
  1263. local_irq_save(flags);
  1264. __netif_rx_complete(dev, napi);
  1265. local_irq_restore(flags);
  1266. }
  1267. /**
  1268. * netif_tx_lock - grab network device transmit lock
  1269. * @dev: network device
  1270. * @cpu: cpu number of lock owner
  1271. *
  1272. * Get network device transmit lock
  1273. */
  1274. static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
  1275. {
  1276. spin_lock(&txq->_xmit_lock);
  1277. txq->xmit_lock_owner = cpu;
  1278. }
  1279. static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
  1280. {
  1281. spin_lock_bh(&txq->_xmit_lock);
  1282. txq->xmit_lock_owner = smp_processor_id();
  1283. }
  1284. static inline void netif_tx_lock(struct net_device *dev)
  1285. {
  1286. int cpu = smp_processor_id();
  1287. unsigned int i;
  1288. for (i = 0; i < dev->num_tx_queues; i++) {
  1289. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1290. __netif_tx_lock(txq, cpu);
  1291. }
  1292. }
  1293. static inline void netif_tx_lock_bh(struct net_device *dev)
  1294. {
  1295. local_bh_disable();
  1296. netif_tx_lock(dev);
  1297. }
  1298. static inline int __netif_tx_trylock(struct netdev_queue *txq)
  1299. {
  1300. int ok = spin_trylock(&txq->_xmit_lock);
  1301. if (likely(ok))
  1302. txq->xmit_lock_owner = smp_processor_id();
  1303. return ok;
  1304. }
  1305. static inline int netif_tx_trylock(struct net_device *dev)
  1306. {
  1307. return __netif_tx_trylock(netdev_get_tx_queue(dev, 0));
  1308. }
  1309. static inline void __netif_tx_unlock(struct netdev_queue *txq)
  1310. {
  1311. txq->xmit_lock_owner = -1;
  1312. spin_unlock(&txq->_xmit_lock);
  1313. }
  1314. static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
  1315. {
  1316. txq->xmit_lock_owner = -1;
  1317. spin_unlock_bh(&txq->_xmit_lock);
  1318. }
  1319. static inline void netif_tx_unlock(struct net_device *dev)
  1320. {
  1321. unsigned int i;
  1322. for (i = 0; i < dev->num_tx_queues; i++) {
  1323. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1324. __netif_tx_unlock(txq);
  1325. }
  1326. }
  1327. static inline void netif_tx_unlock_bh(struct net_device *dev)
  1328. {
  1329. netif_tx_unlock(dev);
  1330. local_bh_enable();
  1331. }
  1332. #define HARD_TX_LOCK(dev, txq, cpu) { \
  1333. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1334. __netif_tx_lock(txq, cpu); \
  1335. } \
  1336. }
  1337. #define HARD_TX_UNLOCK(dev, txq) { \
  1338. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1339. __netif_tx_unlock(txq); \
  1340. } \
  1341. }
  1342. static inline void netif_tx_disable(struct net_device *dev)
  1343. {
  1344. unsigned int i;
  1345. netif_tx_lock_bh(dev);
  1346. for (i = 0; i < dev->num_tx_queues; i++) {
  1347. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  1348. netif_tx_stop_queue(txq);
  1349. }
  1350. netif_tx_unlock_bh(dev);
  1351. }
  1352. static inline void netif_addr_lock(struct net_device *dev)
  1353. {
  1354. spin_lock(&dev->addr_list_lock);
  1355. }
  1356. static inline void netif_addr_lock_bh(struct net_device *dev)
  1357. {
  1358. spin_lock_bh(&dev->addr_list_lock);
  1359. }
  1360. static inline void netif_addr_unlock(struct net_device *dev)
  1361. {
  1362. spin_unlock(&dev->addr_list_lock);
  1363. }
  1364. static inline void netif_addr_unlock_bh(struct net_device *dev)
  1365. {
  1366. spin_unlock_bh(&dev->addr_list_lock);
  1367. }
  1368. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  1369. extern void ether_setup(struct net_device *dev);
  1370. /* Support for loadable net-drivers */
  1371. extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
  1372. void (*setup)(struct net_device *),
  1373. unsigned int queue_count);
  1374. #define alloc_netdev(sizeof_priv, name, setup) \
  1375. alloc_netdev_mq(sizeof_priv, name, setup, 1)
  1376. extern int register_netdev(struct net_device *dev);
  1377. extern void unregister_netdev(struct net_device *dev);
  1378. /* Functions used for secondary unicast and multicast support */
  1379. extern void dev_set_rx_mode(struct net_device *dev);
  1380. extern void __dev_set_rx_mode(struct net_device *dev);
  1381. extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
  1382. extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
  1383. extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
  1384. extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
  1385. extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
  1386. extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
  1387. extern int dev_mc_sync(struct net_device *to, struct net_device *from);
  1388. extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  1389. extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
  1390. extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
  1391. extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
  1392. extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
  1393. extern int dev_set_promiscuity(struct net_device *dev, int inc);
  1394. extern int dev_set_allmulti(struct net_device *dev, int inc);
  1395. extern void netdev_state_change(struct net_device *dev);
  1396. extern void netdev_bonding_change(struct net_device *dev);
  1397. extern void netdev_features_change(struct net_device *dev);
  1398. /* Load a device via the kmod */
  1399. extern void dev_load(struct net *net, const char *name);
  1400. extern void dev_mcast_init(void);
  1401. extern int netdev_max_backlog;
  1402. extern int weight_p;
  1403. extern int netdev_set_master(struct net_device *dev, struct net_device *master);
  1404. extern int skb_checksum_help(struct sk_buff *skb);
  1405. extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  1406. #ifdef CONFIG_BUG
  1407. extern void netdev_rx_csum_fault(struct net_device *dev);
  1408. #else
  1409. static inline void netdev_rx_csum_fault(struct net_device *dev)
  1410. {
  1411. }
  1412. #endif
  1413. /* rx skb timestamps */
  1414. extern void net_enable_timestamp(void);
  1415. extern void net_disable_timestamp(void);
  1416. #ifdef CONFIG_PROC_FS
  1417. extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  1418. extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  1419. extern void dev_seq_stop(struct seq_file *seq, void *v);
  1420. #endif
  1421. extern int netdev_class_create_file(struct class_attribute *class_attr);
  1422. extern void netdev_class_remove_file(struct class_attribute *class_attr);
  1423. extern void linkwatch_run_queue(void);
  1424. extern int netdev_compute_features(unsigned long all, unsigned long one);
  1425. static inline int net_gso_ok(int features, int gso_type)
  1426. {
  1427. int feature = gso_type << NETIF_F_GSO_SHIFT;
  1428. return (features & feature) == feature;
  1429. }
  1430. static inline int skb_gso_ok(struct sk_buff *skb, int features)
  1431. {
  1432. return net_gso_ok(features, skb_shinfo(skb)->gso_type);
  1433. }
  1434. static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  1435. {
  1436. return skb_is_gso(skb) &&
  1437. (!skb_gso_ok(skb, dev->features) ||
  1438. unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
  1439. }
  1440. static inline void netif_set_gso_max_size(struct net_device *dev,
  1441. unsigned int size)
  1442. {
  1443. dev->gso_max_size = size;
  1444. }
  1445. /* On bonding slaves other than the currently active slave, suppress
  1446. * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
  1447. * ARP on active-backup slaves with arp_validate enabled.
  1448. */
  1449. static inline int skb_bond_should_drop(struct sk_buff *skb)
  1450. {
  1451. struct net_device *dev = skb->dev;
  1452. struct net_device *master = dev->master;
  1453. if (master &&
  1454. (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
  1455. if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
  1456. skb->protocol == __constant_htons(ETH_P_ARP))
  1457. return 0;
  1458. if (master->priv_flags & IFF_MASTER_ALB) {
  1459. if (skb->pkt_type != PACKET_BROADCAST &&
  1460. skb->pkt_type != PACKET_MULTICAST)
  1461. return 0;
  1462. }
  1463. if (master->priv_flags & IFF_MASTER_8023AD &&
  1464. skb->protocol == __constant_htons(ETH_P_SLOW))
  1465. return 0;
  1466. return 1;
  1467. }
  1468. return 0;
  1469. }
  1470. #endif /* __KERNEL__ */
  1471. #endif /* _LINUX_DEV_H */