netdevice.h 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the Interfaces handler.
  7. *
  8. * Version: @(#)dev.h 1.0.10 08/12/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14. * Alan Cox, <Alan.Cox@linux.org>
  15. * Bjorn Ekwall. <bj0rn@blox.se>
  16. * Pekka Riikonen <priikone@poseidon.pspt.fi>
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. *
  23. * Moved to /usr/include/linux for NET3
  24. */
  25. #ifndef _LINUX_NETDEVICE_H
  26. #define _LINUX_NETDEVICE_H
  27. #include <linux/if.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/if_packet.h>
  30. #ifdef __KERNEL__
  31. #include <linux/timer.h>
  32. #include <linux/delay.h>
  33. #include <asm/atomic.h>
  34. #include <asm/cache.h>
  35. #include <asm/byteorder.h>
  36. #include <linux/device.h>
  37. #include <linux/percpu.h>
  38. #include <linux/dmaengine.h>
  39. #include <linux/workqueue.h>
  40. #include <net/net_namespace.h>
  41. struct vlan_group;
  42. struct ethtool_ops;
  43. struct netpoll_info;
  44. /* 802.11 specific */
  45. struct wireless_dev;
  46. /* source back-compat hooks */
  47. #define SET_ETHTOOL_OPS(netdev,ops) \
  48. ( (netdev)->ethtool_ops = (ops) )
  49. #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
  50. functions are available. */
  51. #define HAVE_FREE_NETDEV /* free_netdev() */
  52. #define HAVE_NETDEV_PRIV /* netdev_priv() */
  53. #define NET_XMIT_SUCCESS 0
  54. #define NET_XMIT_DROP 1 /* skb dropped */
  55. #define NET_XMIT_CN 2 /* congestion notification */
  56. #define NET_XMIT_POLICED 3 /* skb is shot by police */
  57. #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
  58. (TC use only - dev_queue_xmit
  59. returns this as NET_XMIT_SUCCESS) */
  60. /* Backlog congestion levels */
  61. #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
  62. #define NET_RX_DROP 1 /* packet dropped */
  63. #define NET_RX_CN_LOW 2 /* storm alert, just in case */
  64. #define NET_RX_CN_MOD 3 /* Storm on its way! */
  65. #define NET_RX_CN_HIGH 4 /* The storm is here */
  66. #define NET_RX_BAD 5 /* packet dropped due to kernel error */
  67. /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  68. * indicates that the device will soon be dropping packets, or already drops
  69. * some packets of the same priority; prompting us to send less aggressively. */
  70. #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
  71. #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  72. #endif
  73. #define MAX_ADDR_LEN 32 /* Largest hardware address length */
  74. /* Driver transmit return codes */
  75. #define NETDEV_TX_OK 0 /* driver took care of packet */
  76. #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
  77. #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
  78. /*
  79. * Compute the worst case header length according to the protocols
  80. * used.
  81. */
  82. #if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
  83. #define LL_MAX_HEADER 32
  84. #else
  85. #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
  86. #define LL_MAX_HEADER 96
  87. #else
  88. #define LL_MAX_HEADER 48
  89. #endif
  90. #endif
  91. #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
  92. !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
  93. !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
  94. !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
  95. #define MAX_HEADER LL_MAX_HEADER
  96. #else
  97. #define MAX_HEADER (LL_MAX_HEADER + 48)
  98. #endif
  99. struct net_device_subqueue
  100. {
  101. /* Give a control state for each queue. This struct may contain
  102. * per-queue locks in the future.
  103. */
  104. unsigned long state;
  105. };
  106. /*
  107. * Network device statistics. Akin to the 2.0 ether stats but
  108. * with byte counters.
  109. */
  110. struct net_device_stats
  111. {
  112. unsigned long rx_packets; /* total packets received */
  113. unsigned long tx_packets; /* total packets transmitted */
  114. unsigned long rx_bytes; /* total bytes received */
  115. unsigned long tx_bytes; /* total bytes transmitted */
  116. unsigned long rx_errors; /* bad packets received */
  117. unsigned long tx_errors; /* packet transmit problems */
  118. unsigned long rx_dropped; /* no space in linux buffers */
  119. unsigned long tx_dropped; /* no space available in linux */
  120. unsigned long multicast; /* multicast packets received */
  121. unsigned long collisions;
  122. /* detailed rx_errors: */
  123. unsigned long rx_length_errors;
  124. unsigned long rx_over_errors; /* receiver ring buff overflow */
  125. unsigned long rx_crc_errors; /* recved pkt with crc error */
  126. unsigned long rx_frame_errors; /* recv'd frame alignment error */
  127. unsigned long rx_fifo_errors; /* recv'r fifo overrun */
  128. unsigned long rx_missed_errors; /* receiver missed packet */
  129. /* detailed tx_errors */
  130. unsigned long tx_aborted_errors;
  131. unsigned long tx_carrier_errors;
  132. unsigned long tx_fifo_errors;
  133. unsigned long tx_heartbeat_errors;
  134. unsigned long tx_window_errors;
  135. /* for cslip etc */
  136. unsigned long rx_compressed;
  137. unsigned long tx_compressed;
  138. };
  139. /* Media selection options. */
  140. enum {
  141. IF_PORT_UNKNOWN = 0,
  142. IF_PORT_10BASE2,
  143. IF_PORT_10BASET,
  144. IF_PORT_AUI,
  145. IF_PORT_100BASET,
  146. IF_PORT_100BASETX,
  147. IF_PORT_100BASEFX
  148. };
  149. #ifdef __KERNEL__
  150. #include <linux/cache.h>
  151. #include <linux/skbuff.h>
  152. struct neighbour;
  153. struct neigh_parms;
  154. struct sk_buff;
  155. struct netif_rx_stats
  156. {
  157. unsigned total;
  158. unsigned dropped;
  159. unsigned time_squeeze;
  160. unsigned cpu_collision;
  161. };
  162. DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
  163. struct dev_addr_list
  164. {
  165. struct dev_addr_list *next;
  166. u8 da_addr[MAX_ADDR_LEN];
  167. u8 da_addrlen;
  168. u8 da_synced;
  169. int da_users;
  170. int da_gusers;
  171. };
  172. /*
  173. * We tag multicasts with these structures.
  174. */
  175. #define dev_mc_list dev_addr_list
  176. #define dmi_addr da_addr
  177. #define dmi_addrlen da_addrlen
  178. #define dmi_users da_users
  179. #define dmi_gusers da_gusers
  180. struct hh_cache
  181. {
  182. struct hh_cache *hh_next; /* Next entry */
  183. atomic_t hh_refcnt; /* number of users */
  184. /*
  185. * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
  186. * cache line on SMP.
  187. * They are mostly read, but hh_refcnt may be changed quite frequently,
  188. * incurring cache line ping pongs.
  189. */
  190. __be16 hh_type ____cacheline_aligned_in_smp;
  191. /* protocol identifier, f.e ETH_P_IP
  192. * NOTE: For VLANs, this will be the
  193. * encapuslated type. --BLG
  194. */
  195. u16 hh_len; /* length of header */
  196. int (*hh_output)(struct sk_buff *skb);
  197. seqlock_t hh_lock;
  198. /* cached hardware header; allow for machine alignment needs. */
  199. #define HH_DATA_MOD 16
  200. #define HH_DATA_OFF(__len) \
  201. (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
  202. #define HH_DATA_ALIGN(__len) \
  203. (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
  204. unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
  205. };
  206. /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
  207. * Alternative is:
  208. * dev->hard_header_len ? (dev->hard_header_len +
  209. * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
  210. *
  211. * We could use other alignment values, but we must maintain the
  212. * relationship HH alignment <= LL alignment.
  213. */
  214. #define LL_RESERVED_SPACE(dev) \
  215. (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  216. #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
  217. ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
  218. struct header_ops {
  219. int (*create) (struct sk_buff *skb, struct net_device *dev,
  220. unsigned short type, const void *daddr,
  221. const void *saddr, unsigned len);
  222. int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
  223. int (*rebuild)(struct sk_buff *skb);
  224. #define HAVE_HEADER_CACHE
  225. int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
  226. void (*cache_update)(struct hh_cache *hh,
  227. const struct net_device *dev,
  228. const unsigned char *haddr);
  229. };
  230. /* These flag bits are private to the generic network queueing
  231. * layer, they may not be explicitly referenced by any other
  232. * code.
  233. */
  234. enum netdev_state_t
  235. {
  236. __LINK_STATE_XOFF=0,
  237. __LINK_STATE_START,
  238. __LINK_STATE_PRESENT,
  239. __LINK_STATE_SCHED,
  240. __LINK_STATE_NOCARRIER,
  241. __LINK_STATE_LINKWATCH_PENDING,
  242. __LINK_STATE_DORMANT,
  243. __LINK_STATE_QDISC_RUNNING,
  244. };
  245. /*
  246. * This structure holds at boot time configured netdevice settings. They
  247. * are then used in the device probing.
  248. */
  249. struct netdev_boot_setup {
  250. char name[IFNAMSIZ];
  251. struct ifmap map;
  252. };
  253. #define NETDEV_BOOT_SETUP_MAX 8
  254. extern int __init netdev_boot_setup(char *str);
  255. /*
  256. * Structure for NAPI scheduling similar to tasklet but with weighting
  257. */
  258. struct napi_struct {
  259. /* The poll_list must only be managed by the entity which
  260. * changes the state of the NAPI_STATE_SCHED bit. This means
  261. * whoever atomically sets that bit can add this napi_struct
  262. * to the per-cpu poll_list, and whoever clears that bit
  263. * can remove from the list right before clearing the bit.
  264. */
  265. struct list_head poll_list;
  266. unsigned long state;
  267. int weight;
  268. int (*poll)(struct napi_struct *, int);
  269. #ifdef CONFIG_NETPOLL
  270. spinlock_t poll_lock;
  271. int poll_owner;
  272. struct net_device *dev;
  273. struct list_head dev_list;
  274. #endif
  275. };
  276. enum
  277. {
  278. NAPI_STATE_SCHED, /* Poll is scheduled */
  279. };
  280. extern void FASTCALL(__napi_schedule(struct napi_struct *n));
  281. /**
  282. * napi_schedule_prep - check if napi can be scheduled
  283. * @n: napi context
  284. *
  285. * Test if NAPI routine is already running, and if not mark
  286. * it as running. This is used as a condition variable
  287. * insure only one NAPI poll instance runs
  288. */
  289. static inline int napi_schedule_prep(struct napi_struct *n)
  290. {
  291. return !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
  292. }
  293. /**
  294. * napi_schedule - schedule NAPI poll
  295. * @n: napi context
  296. *
  297. * Schedule NAPI poll routine to be called if it is not already
  298. * running.
  299. */
  300. static inline void napi_schedule(struct napi_struct *n)
  301. {
  302. if (napi_schedule_prep(n))
  303. __napi_schedule(n);
  304. }
  305. /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
  306. static inline int napi_reschedule(struct napi_struct *napi)
  307. {
  308. if (napi_schedule_prep(napi)) {
  309. __napi_schedule(napi);
  310. return 1;
  311. }
  312. return 0;
  313. }
  314. /**
  315. * napi_complete - NAPI processing complete
  316. * @n: napi context
  317. *
  318. * Mark NAPI processing as complete.
  319. */
  320. static inline void __napi_complete(struct napi_struct *n)
  321. {
  322. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  323. list_del(&n->poll_list);
  324. smp_mb__before_clear_bit();
  325. clear_bit(NAPI_STATE_SCHED, &n->state);
  326. }
  327. static inline void napi_complete(struct napi_struct *n)
  328. {
  329. local_irq_disable();
  330. __napi_complete(n);
  331. local_irq_enable();
  332. }
  333. /**
  334. * napi_disable - prevent NAPI from scheduling
  335. * @n: napi context
  336. *
  337. * Stop NAPI from being scheduled on this context.
  338. * Waits till any outstanding processing completes.
  339. */
  340. static inline void napi_disable(struct napi_struct *n)
  341. {
  342. while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
  343. msleep_interruptible(1);
  344. }
  345. /**
  346. * napi_enable - enable NAPI scheduling
  347. * @n: napi context
  348. *
  349. * Resume NAPI from being scheduled on this context.
  350. * Must be paired with napi_disable.
  351. */
  352. static inline void napi_enable(struct napi_struct *n)
  353. {
  354. BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
  355. smp_mb__before_clear_bit();
  356. clear_bit(NAPI_STATE_SCHED, &n->state);
  357. }
  358. #ifdef CONFIG_SMP
  359. /**
  360. * napi_synchronize - wait until NAPI is not running
  361. * @n: napi context
  362. *
  363. * Wait until NAPI is done being scheduled on this context.
  364. * Waits till any outstanding processing completes but
  365. * does not disable future activations.
  366. */
  367. static inline void napi_synchronize(const struct napi_struct *n)
  368. {
  369. while (test_bit(NAPI_STATE_SCHED, &n->state))
  370. msleep(1);
  371. }
  372. #else
  373. # define napi_synchronize(n) barrier()
  374. #endif
  375. /*
  376. * The DEVICE structure.
  377. * Actually, this whole structure is a big mistake. It mixes I/O
  378. * data with strictly "high-level" data, and it has to know about
  379. * almost every data structure used in the INET module.
  380. *
  381. * FIXME: cleanup struct net_device such that network protocol info
  382. * moves out.
  383. */
  384. struct net_device
  385. {
  386. /*
  387. * This is the first field of the "visible" part of this structure
  388. * (i.e. as seen by users in the "Space.c" file). It is the name
  389. * the interface.
  390. */
  391. char name[IFNAMSIZ];
  392. /* device name hash chain */
  393. struct hlist_node name_hlist;
  394. /*
  395. * I/O specific fields
  396. * FIXME: Merge these and struct ifmap into one
  397. */
  398. unsigned long mem_end; /* shared mem end */
  399. unsigned long mem_start; /* shared mem start */
  400. unsigned long base_addr; /* device I/O address */
  401. unsigned int irq; /* device IRQ number */
  402. /*
  403. * Some hardware also needs these fields, but they are not
  404. * part of the usual set specified in Space.c.
  405. */
  406. unsigned char if_port; /* Selectable AUI, TP,..*/
  407. unsigned char dma; /* DMA channel */
  408. unsigned long state;
  409. struct list_head dev_list;
  410. #ifdef CONFIG_NETPOLL
  411. struct list_head napi_list;
  412. #endif
  413. /* The device initialization function. Called only once. */
  414. int (*init)(struct net_device *dev);
  415. /* ------- Fields preinitialized in Space.c finish here ------- */
  416. /* Net device features */
  417. unsigned long features;
  418. #define NETIF_F_SG 1 /* Scatter/gather IO. */
  419. #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
  420. #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
  421. #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
  422. #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
  423. #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
  424. #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
  425. #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
  426. #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
  427. #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
  428. #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
  429. #define NETIF_F_GSO 2048 /* Enable software GSO. */
  430. #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
  431. /* do not use LLTX in new drivers */
  432. #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
  433. #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
  434. #define NETIF_F_LRO 32768 /* large receive offload */
  435. /* Segmentation offload features */
  436. #define NETIF_F_GSO_SHIFT 16
  437. #define NETIF_F_GSO_MASK 0xffff0000
  438. #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
  439. #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
  440. #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
  441. #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
  442. #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
  443. /* List of features with software fallbacks. */
  444. #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
  445. #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
  446. #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
  447. #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
  448. #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
  449. struct net_device *next_sched;
  450. /* Interface index. Unique device identifier */
  451. int ifindex;
  452. int iflink;
  453. struct net_device_stats* (*get_stats)(struct net_device *dev);
  454. struct net_device_stats stats;
  455. #ifdef CONFIG_WIRELESS_EXT
  456. /* List of functions to handle Wireless Extensions (instead of ioctl).
  457. * See <net/iw_handler.h> for details. Jean II */
  458. const struct iw_handler_def * wireless_handlers;
  459. /* Instance data managed by the core of Wireless Extensions. */
  460. struct iw_public_data * wireless_data;
  461. #endif
  462. const struct ethtool_ops *ethtool_ops;
  463. /* Hardware header description */
  464. const struct header_ops *header_ops;
  465. /*
  466. * This marks the end of the "visible" part of the structure. All
  467. * fields hereafter are internal to the system, and may change at
  468. * will (read: may be cleaned up at will).
  469. */
  470. unsigned int flags; /* interface flags (a la BSD) */
  471. unsigned short gflags;
  472. unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
  473. unsigned short padded; /* How much padding added by alloc_netdev() */
  474. unsigned char operstate; /* RFC2863 operstate */
  475. unsigned char link_mode; /* mapping policy to operstate */
  476. unsigned mtu; /* interface MTU value */
  477. unsigned short type; /* interface hardware type */
  478. unsigned short hard_header_len; /* hardware hdr length */
  479. struct net_device *master; /* Pointer to master device of a group,
  480. * which this device is member of.
  481. */
  482. /* Interface address info. */
  483. unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
  484. unsigned char addr_len; /* hardware address length */
  485. unsigned short dev_id; /* for shared network cards */
  486. struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
  487. int uc_count; /* Number of installed ucasts */
  488. int uc_promisc;
  489. struct dev_addr_list *mc_list; /* Multicast mac addresses */
  490. int mc_count; /* Number of installed mcasts */
  491. int promiscuity;
  492. int allmulti;
  493. /* Protocol specific pointers */
  494. void *atalk_ptr; /* AppleTalk link */
  495. void *ip_ptr; /* IPv4 specific data */
  496. void *dn_ptr; /* DECnet specific data */
  497. void *ip6_ptr; /* IPv6 specific data */
  498. void *ec_ptr; /* Econet specific data */
  499. void *ax25_ptr; /* AX.25 specific data */
  500. struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
  501. assign before registering */
  502. /*
  503. * Cache line mostly used on receive path (including eth_type_trans())
  504. */
  505. unsigned long last_rx; /* Time of last Rx */
  506. /* Interface address info used in eth_type_trans() */
  507. unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
  508. because most packets are unicast) */
  509. unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
  510. /*
  511. * Cache line mostly used on queue transmit path (qdisc)
  512. */
  513. /* device queue lock */
  514. spinlock_t queue_lock ____cacheline_aligned_in_smp;
  515. struct Qdisc *qdisc;
  516. struct Qdisc *qdisc_sleeping;
  517. struct list_head qdisc_list;
  518. unsigned long tx_queue_len; /* Max frames per queue allowed */
  519. /* Partially transmitted GSO packet. */
  520. struct sk_buff *gso_skb;
  521. /* ingress path synchronizer */
  522. spinlock_t ingress_lock;
  523. struct Qdisc *qdisc_ingress;
  524. /*
  525. * One part is mostly used on xmit path (device)
  526. */
  527. /* hard_start_xmit synchronizer */
  528. spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
  529. /* cpu id of processor entered to hard_start_xmit or -1,
  530. if nobody entered there.
  531. */
  532. int xmit_lock_owner;
  533. void *priv; /* pointer to private data */
  534. int (*hard_start_xmit) (struct sk_buff *skb,
  535. struct net_device *dev);
  536. /* These may be needed for future network-power-down code. */
  537. unsigned long trans_start; /* Time (in jiffies) of last Tx */
  538. int watchdog_timeo; /* used by dev_watchdog() */
  539. struct timer_list watchdog_timer;
  540. /*
  541. * refcnt is a very hot point, so align it on SMP
  542. */
  543. /* Number of references to this device */
  544. atomic_t refcnt ____cacheline_aligned_in_smp;
  545. /* delayed register/unregister */
  546. struct list_head todo_list;
  547. /* device index hash chain */
  548. struct hlist_node index_hlist;
  549. struct net_device *link_watch_next;
  550. /* register/unregister state machine */
  551. enum { NETREG_UNINITIALIZED=0,
  552. NETREG_REGISTERED, /* completed register_netdevice */
  553. NETREG_UNREGISTERING, /* called unregister_netdevice */
  554. NETREG_UNREGISTERED, /* completed unregister todo */
  555. NETREG_RELEASED, /* called free_netdev */
  556. } reg_state;
  557. /* Called after device is detached from network. */
  558. void (*uninit)(struct net_device *dev);
  559. /* Called after last user reference disappears. */
  560. void (*destructor)(struct net_device *dev);
  561. /* Pointers to interface service routines. */
  562. int (*open)(struct net_device *dev);
  563. int (*stop)(struct net_device *dev);
  564. #define HAVE_NETDEV_POLL
  565. #define HAVE_CHANGE_RX_FLAGS
  566. void (*change_rx_flags)(struct net_device *dev,
  567. int flags);
  568. #define HAVE_SET_RX_MODE
  569. void (*set_rx_mode)(struct net_device *dev);
  570. #define HAVE_MULTICAST
  571. void (*set_multicast_list)(struct net_device *dev);
  572. #define HAVE_SET_MAC_ADDR
  573. int (*set_mac_address)(struct net_device *dev,
  574. void *addr);
  575. #define HAVE_VALIDATE_ADDR
  576. int (*validate_addr)(struct net_device *dev);
  577. #define HAVE_PRIVATE_IOCTL
  578. int (*do_ioctl)(struct net_device *dev,
  579. struct ifreq *ifr, int cmd);
  580. #define HAVE_SET_CONFIG
  581. int (*set_config)(struct net_device *dev,
  582. struct ifmap *map);
  583. #define HAVE_CHANGE_MTU
  584. int (*change_mtu)(struct net_device *dev, int new_mtu);
  585. #define HAVE_TX_TIMEOUT
  586. void (*tx_timeout) (struct net_device *dev);
  587. void (*vlan_rx_register)(struct net_device *dev,
  588. struct vlan_group *grp);
  589. void (*vlan_rx_add_vid)(struct net_device *dev,
  590. unsigned short vid);
  591. void (*vlan_rx_kill_vid)(struct net_device *dev,
  592. unsigned short vid);
  593. int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
  594. #ifdef CONFIG_NETPOLL
  595. struct netpoll_info *npinfo;
  596. #endif
  597. #ifdef CONFIG_NET_POLL_CONTROLLER
  598. void (*poll_controller)(struct net_device *dev);
  599. #endif
  600. /* Network namespace this network device is inside */
  601. struct net *nd_net;
  602. /* bridge stuff */
  603. struct net_bridge_port *br_port;
  604. /* macvlan */
  605. struct macvlan_port *macvlan_port;
  606. /* class/net/name entry */
  607. struct device dev;
  608. /* space for optional statistics and wireless sysfs groups */
  609. struct attribute_group *sysfs_groups[3];
  610. /* rtnetlink link ops */
  611. const struct rtnl_link_ops *rtnl_link_ops;
  612. /* The TX queue control structures */
  613. unsigned int egress_subqueue_count;
  614. struct net_device_subqueue egress_subqueue[1];
  615. };
  616. #define to_net_dev(d) container_of(d, struct net_device, dev)
  617. #define NETDEV_ALIGN 32
  618. #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
  619. /**
  620. * netdev_priv - access network device private data
  621. * @dev: network device
  622. *
  623. * Get network device private data
  624. */
  625. static inline void *netdev_priv(const struct net_device *dev)
  626. {
  627. return dev->priv;
  628. }
  629. /* Set the sysfs physical device reference for the network logical device
  630. * if set prior to registration will cause a symlink during initialization.
  631. */
  632. #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
  633. static inline void netif_napi_add(struct net_device *dev,
  634. struct napi_struct *napi,
  635. int (*poll)(struct napi_struct *, int),
  636. int weight)
  637. {
  638. INIT_LIST_HEAD(&napi->poll_list);
  639. napi->poll = poll;
  640. napi->weight = weight;
  641. #ifdef CONFIG_NETPOLL
  642. napi->dev = dev;
  643. list_add(&napi->dev_list, &dev->napi_list);
  644. spin_lock_init(&napi->poll_lock);
  645. napi->poll_owner = -1;
  646. #endif
  647. set_bit(NAPI_STATE_SCHED, &napi->state);
  648. }
  649. struct packet_type {
  650. __be16 type; /* This is really htons(ether_type). */
  651. struct net_device *dev; /* NULL is wildcarded here */
  652. int (*func) (struct sk_buff *,
  653. struct net_device *,
  654. struct packet_type *,
  655. struct net_device *);
  656. struct sk_buff *(*gso_segment)(struct sk_buff *skb,
  657. int features);
  658. int (*gso_send_check)(struct sk_buff *skb);
  659. void *af_packet_priv;
  660. struct list_head list;
  661. };
  662. #include <linux/interrupt.h>
  663. #include <linux/notifier.h>
  664. extern rwlock_t dev_base_lock; /* Device list lock */
  665. #define for_each_netdev(net, d) \
  666. list_for_each_entry(d, &(net)->dev_base_head, dev_list)
  667. #define for_each_netdev_safe(net, d, n) \
  668. list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
  669. #define for_each_netdev_continue(net, d) \
  670. list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
  671. #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
  672. static inline struct net_device *next_net_device(struct net_device *dev)
  673. {
  674. struct list_head *lh;
  675. struct net *net;
  676. net = dev->nd_net;
  677. lh = dev->dev_list.next;
  678. return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
  679. }
  680. static inline struct net_device *first_net_device(struct net *net)
  681. {
  682. return list_empty(&net->dev_base_head) ? NULL :
  683. net_device_entry(net->dev_base_head.next);
  684. }
  685. extern int netdev_boot_setup_check(struct net_device *dev);
  686. extern unsigned long netdev_boot_base(const char *prefix, int unit);
  687. extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
  688. extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
  689. extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
  690. extern void dev_add_pack(struct packet_type *pt);
  691. extern void dev_remove_pack(struct packet_type *pt);
  692. extern void __dev_remove_pack(struct packet_type *pt);
  693. extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
  694. unsigned short mask);
  695. extern struct net_device *dev_get_by_name(struct net *net, const char *name);
  696. extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
  697. extern int dev_alloc_name(struct net_device *dev, const char *name);
  698. extern int dev_open(struct net_device *dev);
  699. extern int dev_close(struct net_device *dev);
  700. extern int dev_queue_xmit(struct sk_buff *skb);
  701. extern int register_netdevice(struct net_device *dev);
  702. extern void unregister_netdevice(struct net_device *dev);
  703. extern void free_netdev(struct net_device *dev);
  704. extern void synchronize_net(void);
  705. extern int register_netdevice_notifier(struct notifier_block *nb);
  706. extern int unregister_netdevice_notifier(struct notifier_block *nb);
  707. extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
  708. extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
  709. extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
  710. extern int dev_restart(struct net_device *dev);
  711. #ifdef CONFIG_NETPOLL_TRAP
  712. extern int netpoll_trap(void);
  713. #endif
  714. static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
  715. unsigned short type,
  716. const void *daddr, const void *saddr,
  717. unsigned len)
  718. {
  719. if (!dev->header_ops || !dev->header_ops->create)
  720. return 0;
  721. return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
  722. }
  723. static inline int dev_parse_header(const struct sk_buff *skb,
  724. unsigned char *haddr)
  725. {
  726. const struct net_device *dev = skb->dev;
  727. if (!dev->header_ops || !dev->header_ops->parse)
  728. return 0;
  729. return dev->header_ops->parse(skb, haddr);
  730. }
  731. typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
  732. extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
  733. static inline int unregister_gifconf(unsigned int family)
  734. {
  735. return register_gifconf(family, NULL);
  736. }
  737. /*
  738. * Incoming packets are placed on per-cpu queues so that
  739. * no locking is needed.
  740. */
  741. struct softnet_data
  742. {
  743. struct net_device *output_queue;
  744. struct sk_buff_head input_pkt_queue;
  745. struct list_head poll_list;
  746. struct sk_buff *completion_queue;
  747. struct napi_struct backlog;
  748. #ifdef CONFIG_NET_DMA
  749. struct dma_chan *net_dma;
  750. #endif
  751. };
  752. DECLARE_PER_CPU(struct softnet_data,softnet_data);
  753. #define HAVE_NETIF_QUEUE
  754. extern void __netif_schedule(struct net_device *dev);
  755. static inline void netif_schedule(struct net_device *dev)
  756. {
  757. if (!test_bit(__LINK_STATE_XOFF, &dev->state))
  758. __netif_schedule(dev);
  759. }
  760. /**
  761. * netif_start_queue - allow transmit
  762. * @dev: network device
  763. *
  764. * Allow upper layers to call the device hard_start_xmit routine.
  765. */
  766. static inline void netif_start_queue(struct net_device *dev)
  767. {
  768. clear_bit(__LINK_STATE_XOFF, &dev->state);
  769. }
  770. /**
  771. * netif_wake_queue - restart transmit
  772. * @dev: network device
  773. *
  774. * Allow upper layers to call the device hard_start_xmit routine.
  775. * Used for flow control when transmit resources are available.
  776. */
  777. static inline void netif_wake_queue(struct net_device *dev)
  778. {
  779. #ifdef CONFIG_NETPOLL_TRAP
  780. if (netpoll_trap()) {
  781. clear_bit(__LINK_STATE_XOFF, &dev->state);
  782. return;
  783. }
  784. #endif
  785. if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
  786. __netif_schedule(dev);
  787. }
  788. /**
  789. * netif_stop_queue - stop transmitted packets
  790. * @dev: network device
  791. *
  792. * Stop upper layers calling the device hard_start_xmit routine.
  793. * Used for flow control when transmit resources are unavailable.
  794. */
  795. static inline void netif_stop_queue(struct net_device *dev)
  796. {
  797. set_bit(__LINK_STATE_XOFF, &dev->state);
  798. }
  799. /**
  800. * netif_queue_stopped - test if transmit queue is flowblocked
  801. * @dev: network device
  802. *
  803. * Test if transmit queue on device is currently unable to send.
  804. */
  805. static inline int netif_queue_stopped(const struct net_device *dev)
  806. {
  807. return test_bit(__LINK_STATE_XOFF, &dev->state);
  808. }
  809. /**
  810. * netif_running - test if up
  811. * @dev: network device
  812. *
  813. * Test if the device has been brought up.
  814. */
  815. static inline int netif_running(const struct net_device *dev)
  816. {
  817. return test_bit(__LINK_STATE_START, &dev->state);
  818. }
  819. /*
  820. * Routines to manage the subqueues on a device. We only need start
  821. * stop, and a check if it's stopped. All other device management is
  822. * done at the overall netdevice level.
  823. * Also test the device if we're multiqueue.
  824. */
  825. /**
  826. * netif_start_subqueue - allow sending packets on subqueue
  827. * @dev: network device
  828. * @queue_index: sub queue index
  829. *
  830. * Start individual transmit queue of a device with multiple transmit queues.
  831. */
  832. static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
  833. {
  834. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  835. clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
  836. #endif
  837. }
  838. /**
  839. * netif_stop_subqueue - stop sending packets on subqueue
  840. * @dev: network device
  841. * @queue_index: sub queue index
  842. *
  843. * Stop individual transmit queue of a device with multiple transmit queues.
  844. */
  845. static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
  846. {
  847. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  848. #ifdef CONFIG_NETPOLL_TRAP
  849. if (netpoll_trap())
  850. return;
  851. #endif
  852. set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
  853. #endif
  854. }
  855. /**
  856. * netif_subqueue_stopped - test status of subqueue
  857. * @dev: network device
  858. * @queue_index: sub queue index
  859. *
  860. * Check individual transmit queue of a device with multiple transmit queues.
  861. */
  862. static inline int __netif_subqueue_stopped(const struct net_device *dev,
  863. u16 queue_index)
  864. {
  865. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  866. return test_bit(__LINK_STATE_XOFF,
  867. &dev->egress_subqueue[queue_index].state);
  868. #else
  869. return 0;
  870. #endif
  871. }
  872. static inline int netif_subqueue_stopped(const struct net_device *dev,
  873. struct sk_buff *skb)
  874. {
  875. return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
  876. }
  877. /**
  878. * netif_wake_subqueue - allow sending packets on subqueue
  879. * @dev: network device
  880. * @queue_index: sub queue index
  881. *
  882. * Resume individual transmit queue of a device with multiple transmit queues.
  883. */
  884. static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
  885. {
  886. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  887. #ifdef CONFIG_NETPOLL_TRAP
  888. if (netpoll_trap())
  889. return;
  890. #endif
  891. if (test_and_clear_bit(__LINK_STATE_XOFF,
  892. &dev->egress_subqueue[queue_index].state))
  893. __netif_schedule(dev);
  894. #endif
  895. }
  896. /**
  897. * netif_is_multiqueue - test if device has multiple transmit queues
  898. * @dev: network device
  899. *
  900. * Check if device has multiple transmit queues
  901. * Always falls if NETDEVICE_MULTIQUEUE is not configured
  902. */
  903. static inline int netif_is_multiqueue(const struct net_device *dev)
  904. {
  905. #ifdef CONFIG_NETDEVICES_MULTIQUEUE
  906. return (!!(NETIF_F_MULTI_QUEUE & dev->features));
  907. #else
  908. return 0;
  909. #endif
  910. }
  911. /* Use this variant when it is known for sure that it
  912. * is executing from interrupt context.
  913. */
  914. extern void dev_kfree_skb_irq(struct sk_buff *skb);
  915. /* Use this variant in places where it could be invoked
  916. * either from interrupt or non-interrupt context.
  917. */
  918. extern void dev_kfree_skb_any(struct sk_buff *skb);
  919. #define HAVE_NETIF_RX 1
  920. extern int netif_rx(struct sk_buff *skb);
  921. extern int netif_rx_ni(struct sk_buff *skb);
  922. #define HAVE_NETIF_RECEIVE_SKB 1
  923. extern int netif_receive_skb(struct sk_buff *skb);
  924. extern int dev_valid_name(const char *name);
  925. extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
  926. extern int dev_ethtool(struct net *net, struct ifreq *);
  927. extern unsigned dev_get_flags(const struct net_device *);
  928. extern int dev_change_flags(struct net_device *, unsigned);
  929. extern int dev_change_name(struct net_device *, char *);
  930. extern int dev_change_net_namespace(struct net_device *,
  931. struct net *, const char *);
  932. extern int dev_set_mtu(struct net_device *, int);
  933. extern int dev_set_mac_address(struct net_device *,
  934. struct sockaddr *);
  935. extern int dev_hard_start_xmit(struct sk_buff *skb,
  936. struct net_device *dev);
  937. extern int netdev_budget;
  938. /* Called by rtnetlink.c:rtnl_unlock() */
  939. extern void netdev_run_todo(void);
  940. /**
  941. * dev_put - release reference to device
  942. * @dev: network device
  943. *
  944. * Release reference to device to allow it to be freed.
  945. */
  946. static inline void dev_put(struct net_device *dev)
  947. {
  948. atomic_dec(&dev->refcnt);
  949. }
  950. /**
  951. * dev_hold - get reference to device
  952. * @dev: network device
  953. *
  954. * Hold reference to device to keep it from being freed.
  955. */
  956. static inline void dev_hold(struct net_device *dev)
  957. {
  958. atomic_inc(&dev->refcnt);
  959. }
  960. /* Carrier loss detection, dial on demand. The functions netif_carrier_on
  961. * and _off may be called from IRQ context, but it is caller
  962. * who is responsible for serialization of these calls.
  963. *
  964. * The name carrier is inappropriate, these functions should really be
  965. * called netif_lowerlayer_*() because they represent the state of any
  966. * kind of lower layer not just hardware media.
  967. */
  968. extern void linkwatch_fire_event(struct net_device *dev);
  969. /**
  970. * netif_carrier_ok - test if carrier present
  971. * @dev: network device
  972. *
  973. * Check if carrier is present on device
  974. */
  975. static inline int netif_carrier_ok(const struct net_device *dev)
  976. {
  977. return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
  978. }
  979. extern void __netdev_watchdog_up(struct net_device *dev);
  980. extern void netif_carrier_on(struct net_device *dev);
  981. extern void netif_carrier_off(struct net_device *dev);
  982. /**
  983. * netif_dormant_on - mark device as dormant.
  984. * @dev: network device
  985. *
  986. * Mark device as dormant (as per RFC2863).
  987. *
  988. * The dormant state indicates that the relevant interface is not
  989. * actually in a condition to pass packets (i.e., it is not 'up') but is
  990. * in a "pending" state, waiting for some external event. For "on-
  991. * demand" interfaces, this new state identifies the situation where the
  992. * interface is waiting for events to place it in the up state.
  993. *
  994. */
  995. static inline void netif_dormant_on(struct net_device *dev)
  996. {
  997. if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
  998. linkwatch_fire_event(dev);
  999. }
  1000. /**
  1001. * netif_dormant_off - set device as not dormant.
  1002. * @dev: network device
  1003. *
  1004. * Device is not in dormant state.
  1005. */
  1006. static inline void netif_dormant_off(struct net_device *dev)
  1007. {
  1008. if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
  1009. linkwatch_fire_event(dev);
  1010. }
  1011. /**
  1012. * netif_dormant - test if carrier present
  1013. * @dev: network device
  1014. *
  1015. * Check if carrier is present on device
  1016. */
  1017. static inline int netif_dormant(const struct net_device *dev)
  1018. {
  1019. return test_bit(__LINK_STATE_DORMANT, &dev->state);
  1020. }
  1021. /**
  1022. * netif_oper_up - test if device is operational
  1023. * @dev: network device
  1024. *
  1025. * Check if carrier is operational
  1026. */
  1027. static inline int netif_oper_up(const struct net_device *dev) {
  1028. return (dev->operstate == IF_OPER_UP ||
  1029. dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
  1030. }
  1031. /**
  1032. * netif_device_present - is device available or removed
  1033. * @dev: network device
  1034. *
  1035. * Check if device has not been removed from system.
  1036. */
  1037. static inline int netif_device_present(struct net_device *dev)
  1038. {
  1039. return test_bit(__LINK_STATE_PRESENT, &dev->state);
  1040. }
  1041. extern void netif_device_detach(struct net_device *dev);
  1042. extern void netif_device_attach(struct net_device *dev);
  1043. /*
  1044. * Network interface message level settings
  1045. */
  1046. #define HAVE_NETIF_MSG 1
  1047. enum {
  1048. NETIF_MSG_DRV = 0x0001,
  1049. NETIF_MSG_PROBE = 0x0002,
  1050. NETIF_MSG_LINK = 0x0004,
  1051. NETIF_MSG_TIMER = 0x0008,
  1052. NETIF_MSG_IFDOWN = 0x0010,
  1053. NETIF_MSG_IFUP = 0x0020,
  1054. NETIF_MSG_RX_ERR = 0x0040,
  1055. NETIF_MSG_TX_ERR = 0x0080,
  1056. NETIF_MSG_TX_QUEUED = 0x0100,
  1057. NETIF_MSG_INTR = 0x0200,
  1058. NETIF_MSG_TX_DONE = 0x0400,
  1059. NETIF_MSG_RX_STATUS = 0x0800,
  1060. NETIF_MSG_PKTDATA = 0x1000,
  1061. NETIF_MSG_HW = 0x2000,
  1062. NETIF_MSG_WOL = 0x4000,
  1063. };
  1064. #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
  1065. #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
  1066. #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
  1067. #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
  1068. #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
  1069. #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
  1070. #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
  1071. #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
  1072. #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
  1073. #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
  1074. #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
  1075. #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
  1076. #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
  1077. #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
  1078. #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
  1079. static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
  1080. {
  1081. /* use default */
  1082. if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
  1083. return default_msg_enable_bits;
  1084. if (debug_value == 0) /* no output */
  1085. return 0;
  1086. /* set low N bits */
  1087. return (1 << debug_value) - 1;
  1088. }
  1089. /* Test if receive needs to be scheduled but only if up */
  1090. static inline int netif_rx_schedule_prep(struct net_device *dev,
  1091. struct napi_struct *napi)
  1092. {
  1093. return netif_running(dev) && napi_schedule_prep(napi);
  1094. }
  1095. /* Add interface to tail of rx poll list. This assumes that _prep has
  1096. * already been called and returned 1.
  1097. */
  1098. static inline void __netif_rx_schedule(struct net_device *dev,
  1099. struct napi_struct *napi)
  1100. {
  1101. dev_hold(dev);
  1102. __napi_schedule(napi);
  1103. }
  1104. /* Try to reschedule poll. Called by irq handler. */
  1105. static inline void netif_rx_schedule(struct net_device *dev,
  1106. struct napi_struct *napi)
  1107. {
  1108. if (netif_rx_schedule_prep(dev, napi))
  1109. __netif_rx_schedule(dev, napi);
  1110. }
  1111. /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
  1112. static inline int netif_rx_reschedule(struct net_device *dev,
  1113. struct napi_struct *napi)
  1114. {
  1115. if (napi_schedule_prep(napi)) {
  1116. __netif_rx_schedule(dev, napi);
  1117. return 1;
  1118. }
  1119. return 0;
  1120. }
  1121. /* same as netif_rx_complete, except that local_irq_save(flags)
  1122. * has already been issued
  1123. */
  1124. static inline void __netif_rx_complete(struct net_device *dev,
  1125. struct napi_struct *napi)
  1126. {
  1127. __napi_complete(napi);
  1128. dev_put(dev);
  1129. }
  1130. /* Remove interface from poll list: it must be in the poll list
  1131. * on current cpu. This primitive is called by dev->poll(), when
  1132. * it completes the work. The device cannot be out of poll list at this
  1133. * moment, it is BUG().
  1134. */
  1135. static inline void netif_rx_complete(struct net_device *dev,
  1136. struct napi_struct *napi)
  1137. {
  1138. unsigned long flags;
  1139. local_irq_save(flags);
  1140. __netif_rx_complete(dev, napi);
  1141. local_irq_restore(flags);
  1142. }
  1143. /**
  1144. * netif_tx_lock - grab network device transmit lock
  1145. * @dev: network device
  1146. * @cpu: cpu number of lock owner
  1147. *
  1148. * Get network device transmit lock
  1149. */
  1150. static inline void __netif_tx_lock(struct net_device *dev, int cpu)
  1151. {
  1152. spin_lock(&dev->_xmit_lock);
  1153. dev->xmit_lock_owner = cpu;
  1154. }
  1155. static inline void netif_tx_lock(struct net_device *dev)
  1156. {
  1157. __netif_tx_lock(dev, smp_processor_id());
  1158. }
  1159. static inline void netif_tx_lock_bh(struct net_device *dev)
  1160. {
  1161. spin_lock_bh(&dev->_xmit_lock);
  1162. dev->xmit_lock_owner = smp_processor_id();
  1163. }
  1164. static inline int netif_tx_trylock(struct net_device *dev)
  1165. {
  1166. int ok = spin_trylock(&dev->_xmit_lock);
  1167. if (likely(ok))
  1168. dev->xmit_lock_owner = smp_processor_id();
  1169. return ok;
  1170. }
  1171. static inline void netif_tx_unlock(struct net_device *dev)
  1172. {
  1173. dev->xmit_lock_owner = -1;
  1174. spin_unlock(&dev->_xmit_lock);
  1175. }
  1176. static inline void netif_tx_unlock_bh(struct net_device *dev)
  1177. {
  1178. dev->xmit_lock_owner = -1;
  1179. spin_unlock_bh(&dev->_xmit_lock);
  1180. }
  1181. #define HARD_TX_LOCK(dev, cpu) { \
  1182. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1183. __netif_tx_lock(dev, cpu); \
  1184. } \
  1185. }
  1186. #define HARD_TX_UNLOCK(dev) { \
  1187. if ((dev->features & NETIF_F_LLTX) == 0) { \
  1188. netif_tx_unlock(dev); \
  1189. } \
  1190. }
  1191. static inline void netif_tx_disable(struct net_device *dev)
  1192. {
  1193. netif_tx_lock_bh(dev);
  1194. netif_stop_queue(dev);
  1195. netif_tx_unlock_bh(dev);
  1196. }
  1197. /* These functions live elsewhere (drivers/net/net_init.c, but related) */
  1198. extern void ether_setup(struct net_device *dev);
  1199. /* Support for loadable net-drivers */
  1200. extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
  1201. void (*setup)(struct net_device *),
  1202. unsigned int queue_count);
  1203. #define alloc_netdev(sizeof_priv, name, setup) \
  1204. alloc_netdev_mq(sizeof_priv, name, setup, 1)
  1205. extern int register_netdev(struct net_device *dev);
  1206. extern void unregister_netdev(struct net_device *dev);
  1207. /* Functions used for secondary unicast and multicast support */
  1208. extern void dev_set_rx_mode(struct net_device *dev);
  1209. extern void __dev_set_rx_mode(struct net_device *dev);
  1210. extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
  1211. extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
  1212. extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
  1213. extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
  1214. extern int dev_mc_sync(struct net_device *to, struct net_device *from);
  1215. extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
  1216. extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
  1217. extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
  1218. extern void dev_set_promiscuity(struct net_device *dev, int inc);
  1219. extern void dev_set_allmulti(struct net_device *dev, int inc);
  1220. extern void netdev_state_change(struct net_device *dev);
  1221. extern void netdev_features_change(struct net_device *dev);
  1222. /* Load a device via the kmod */
  1223. extern void dev_load(struct net *net, const char *name);
  1224. extern void dev_mcast_init(void);
  1225. extern int netdev_max_backlog;
  1226. extern int weight_p;
  1227. extern int netdev_set_master(struct net_device *dev, struct net_device *master);
  1228. extern int skb_checksum_help(struct sk_buff *skb);
  1229. extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
  1230. #ifdef CONFIG_BUG
  1231. extern void netdev_rx_csum_fault(struct net_device *dev);
  1232. #else
  1233. static inline void netdev_rx_csum_fault(struct net_device *dev)
  1234. {
  1235. }
  1236. #endif
  1237. /* rx skb timestamps */
  1238. extern void net_enable_timestamp(void);
  1239. extern void net_disable_timestamp(void);
  1240. #ifdef CONFIG_PROC_FS
  1241. extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
  1242. extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
  1243. extern void dev_seq_stop(struct seq_file *seq, void *v);
  1244. #endif
  1245. extern void linkwatch_run_queue(void);
  1246. extern int netdev_compute_features(unsigned long all, unsigned long one);
  1247. static inline int net_gso_ok(int features, int gso_type)
  1248. {
  1249. int feature = gso_type << NETIF_F_GSO_SHIFT;
  1250. return (features & feature) == feature;
  1251. }
  1252. static inline int skb_gso_ok(struct sk_buff *skb, int features)
  1253. {
  1254. return net_gso_ok(features, skb_shinfo(skb)->gso_type);
  1255. }
  1256. static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
  1257. {
  1258. return skb_is_gso(skb) &&
  1259. (!skb_gso_ok(skb, dev->features) ||
  1260. unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
  1261. }
  1262. /* On bonding slaves other than the currently active slave, suppress
  1263. * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
  1264. * ARP on active-backup slaves with arp_validate enabled.
  1265. */
  1266. static inline int skb_bond_should_drop(struct sk_buff *skb)
  1267. {
  1268. struct net_device *dev = skb->dev;
  1269. struct net_device *master = dev->master;
  1270. if (master &&
  1271. (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
  1272. if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
  1273. skb->protocol == __constant_htons(ETH_P_ARP))
  1274. return 0;
  1275. if (master->priv_flags & IFF_MASTER_ALB) {
  1276. if (skb->pkt_type != PACKET_BROADCAST &&
  1277. skb->pkt_type != PACKET_MULTICAST)
  1278. return 0;
  1279. }
  1280. if (master->priv_flags & IFF_MASTER_8023AD &&
  1281. skb->protocol == __constant_htons(ETH_P_SLOW))
  1282. return 0;
  1283. return 1;
  1284. }
  1285. return 0;
  1286. }
  1287. #endif /* __KERNEL__ */
  1288. #endif /* _LINUX_DEV_H */