skbuff.h 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591
  1. /*
  2. * Definitions for the 'struct sk_buff' memory handlers.
  3. *
  4. * Authors:
  5. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  6. * Florian La Roche, <rzsfl@rz.uni-sb.de>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #ifndef _LINUX_SKBUFF_H
  14. #define _LINUX_SKBUFF_H
  15. #include <linux/kernel.h>
  16. #include <linux/kmemcheck.h>
  17. #include <linux/compiler.h>
  18. #include <linux/time.h>
  19. #include <linux/bug.h>
  20. #include <linux/cache.h>
  21. #include <linux/atomic.h>
  22. #include <asm/types.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/net.h>
  25. #include <linux/textsearch.h>
  26. #include <net/checksum.h>
  27. #include <linux/rcupdate.h>
  28. #include <linux/dmaengine.h>
  29. #include <linux/hrtimer.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/netdev_features.h>
  32. /* Don't change this without changing skb_csum_unnecessary! */
  33. #define CHECKSUM_NONE 0
  34. #define CHECKSUM_UNNECESSARY 1
  35. #define CHECKSUM_COMPLETE 2
  36. #define CHECKSUM_PARTIAL 3
  37. #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
  38. ~(SMP_CACHE_BYTES - 1))
  39. #define SKB_WITH_OVERHEAD(X) \
  40. ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  41. #define SKB_MAX_ORDER(X, ORDER) \
  42. SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
  43. #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
  44. #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
  45. /* return minimum truesize of one skb containing X bytes of data */
  46. #define SKB_TRUESIZE(X) ((X) + \
  47. SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
  48. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  49. /* A. Checksumming of received packets by device.
  50. *
  51. * NONE: device failed to checksum this packet.
  52. * skb->csum is undefined.
  53. *
  54. * UNNECESSARY: device parsed packet and wouldbe verified checksum.
  55. * skb->csum is undefined.
  56. * It is bad option, but, unfortunately, many of vendors do this.
  57. * Apparently with secret goal to sell you new device, when you
  58. * will add new protocol to your host. F.e. IPv6. 8)
  59. *
  60. * COMPLETE: the most generic way. Device supplied checksum of _all_
  61. * the packet as seen by netif_rx in skb->csum.
  62. * NOTE: Even if device supports only some protocols, but
  63. * is able to produce some skb->csum, it MUST use COMPLETE,
  64. * not UNNECESSARY.
  65. *
  66. * PARTIAL: identical to the case for output below. This may occur
  67. * on a packet received directly from another Linux OS, e.g.,
  68. * a virtualised Linux kernel on the same host. The packet can
  69. * be treated in the same way as UNNECESSARY except that on
  70. * output (i.e., forwarding) the checksum must be filled in
  71. * by the OS or the hardware.
  72. *
  73. * B. Checksumming on output.
  74. *
  75. * NONE: skb is checksummed by protocol or csum is not required.
  76. *
  77. * PARTIAL: device is required to csum packet as seen by hard_start_xmit
  78. * from skb->csum_start to the end and to record the checksum
  79. * at skb->csum_start + skb->csum_offset.
  80. *
  81. * Device must show its capabilities in dev->features, set
  82. * at device setup time.
  83. * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
  84. * everything.
  85. * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
  86. * TCP/UDP over IPv4. Sigh. Vendors like this
  87. * way by an unknown reason. Though, see comment above
  88. * about CHECKSUM_UNNECESSARY. 8)
  89. * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
  90. *
  91. * UNNECESSARY: device will do per protocol specific csum. Protocol drivers
  92. * that do not want net to perform the checksum calculation should use
  93. * this flag in their outgoing skbs.
  94. * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC
  95. * offload. Correspondingly, the FCoE protocol driver
  96. * stack should use CHECKSUM_UNNECESSARY.
  97. *
  98. * Any questions? No questions, good. --ANK
  99. */
  100. struct net_device;
  101. struct scatterlist;
  102. struct pipe_inode_info;
  103. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  104. struct nf_conntrack {
  105. atomic_t use;
  106. };
  107. #endif
  108. #ifdef CONFIG_BRIDGE_NETFILTER
  109. struct nf_bridge_info {
  110. atomic_t use;
  111. unsigned int mask;
  112. struct net_device *physindev;
  113. struct net_device *physoutdev;
  114. unsigned long data[32 / sizeof(unsigned long)];
  115. };
  116. #endif
  117. struct sk_buff_head {
  118. /* These two members must be first. */
  119. struct sk_buff *next;
  120. struct sk_buff *prev;
  121. __u32 qlen;
  122. spinlock_t lock;
  123. };
  124. struct sk_buff;
  125. /* To allow 64K frame to be packed as single skb without frag_list we
  126. * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
  127. * buffers which do not start on a page boundary.
  128. *
  129. * Since GRO uses frags we allocate at least 16 regardless of page
  130. * size.
  131. */
  132. #if (65536/PAGE_SIZE + 1) < 16
  133. #define MAX_SKB_FRAGS 16UL
  134. #else
  135. #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
  136. #endif
  137. typedef struct skb_frag_struct skb_frag_t;
  138. struct skb_frag_struct {
  139. struct {
  140. struct page *p;
  141. } page;
  142. #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
  143. __u32 page_offset;
  144. __u32 size;
  145. #else
  146. __u16 page_offset;
  147. __u16 size;
  148. #endif
  149. };
  150. static inline unsigned int skb_frag_size(const skb_frag_t *frag)
  151. {
  152. return frag->size;
  153. }
  154. static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
  155. {
  156. frag->size = size;
  157. }
  158. static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
  159. {
  160. frag->size += delta;
  161. }
  162. static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
  163. {
  164. frag->size -= delta;
  165. }
  166. #define HAVE_HW_TIME_STAMP
  167. /**
  168. * struct skb_shared_hwtstamps - hardware time stamps
  169. * @hwtstamp: hardware time stamp transformed into duration
  170. * since arbitrary point in time
  171. * @syststamp: hwtstamp transformed to system time base
  172. *
  173. * Software time stamps generated by ktime_get_real() are stored in
  174. * skb->tstamp. The relation between the different kinds of time
  175. * stamps is as follows:
  176. *
  177. * syststamp and tstamp can be compared against each other in
  178. * arbitrary combinations. The accuracy of a
  179. * syststamp/tstamp/"syststamp from other device" comparison is
  180. * limited by the accuracy of the transformation into system time
  181. * base. This depends on the device driver and its underlying
  182. * hardware.
  183. *
  184. * hwtstamps can only be compared against other hwtstamps from
  185. * the same device.
  186. *
  187. * This structure is attached to packets as part of the
  188. * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
  189. */
  190. struct skb_shared_hwtstamps {
  191. ktime_t hwtstamp;
  192. ktime_t syststamp;
  193. };
  194. /* Definitions for tx_flags in struct skb_shared_info */
  195. enum {
  196. /* generate hardware time stamp */
  197. SKBTX_HW_TSTAMP = 1 << 0,
  198. /* generate software time stamp */
  199. SKBTX_SW_TSTAMP = 1 << 1,
  200. /* device driver is going to provide hardware time stamp */
  201. SKBTX_IN_PROGRESS = 1 << 2,
  202. /* ensure the originating sk reference is available on driver level */
  203. SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
  204. /* device driver supports TX zero-copy buffers */
  205. SKBTX_DEV_ZEROCOPY = 1 << 4,
  206. /* generate wifi status information (where possible) */
  207. SKBTX_WIFI_STATUS = 1 << 5,
  208. };
  209. /*
  210. * The callback notifies userspace to release buffers when skb DMA is done in
  211. * lower device, the skb last reference should be 0 when calling this.
  212. * The ctx field is used to track device context.
  213. * The desc field is used to track userspace buffer index.
  214. */
  215. struct ubuf_info {
  216. void (*callback)(struct ubuf_info *);
  217. void *ctx;
  218. unsigned long desc;
  219. };
  220. /* This data is invariant across clones and lives at
  221. * the end of the header data, ie. at skb->end.
  222. */
  223. struct skb_shared_info {
  224. unsigned char nr_frags;
  225. __u8 tx_flags;
  226. unsigned short gso_size;
  227. /* Warning: this field is not always filled in (UFO)! */
  228. unsigned short gso_segs;
  229. unsigned short gso_type;
  230. struct sk_buff *frag_list;
  231. struct skb_shared_hwtstamps hwtstamps;
  232. __be32 ip6_frag_id;
  233. /*
  234. * Warning : all fields before dataref are cleared in __alloc_skb()
  235. */
  236. atomic_t dataref;
  237. /* Intermediate layers must ensure that destructor_arg
  238. * remains valid until skb destructor */
  239. void * destructor_arg;
  240. /* must be last field, see pskb_expand_head() */
  241. skb_frag_t frags[MAX_SKB_FRAGS];
  242. };
  243. /* We divide dataref into two halves. The higher 16 bits hold references
  244. * to the payload part of skb->data. The lower 16 bits hold references to
  245. * the entire skb->data. A clone of a headerless skb holds the length of
  246. * the header in skb->hdr_len.
  247. *
  248. * All users must obey the rule that the skb->data reference count must be
  249. * greater than or equal to the payload reference count.
  250. *
  251. * Holding a reference to the payload part means that the user does not
  252. * care about modifications to the header part of skb->data.
  253. */
  254. #define SKB_DATAREF_SHIFT 16
  255. #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
  256. enum {
  257. SKB_FCLONE_UNAVAILABLE,
  258. SKB_FCLONE_ORIG,
  259. SKB_FCLONE_CLONE,
  260. };
  261. enum {
  262. SKB_GSO_TCPV4 = 1 << 0,
  263. SKB_GSO_UDP = 1 << 1,
  264. /* This indicates the skb is from an untrusted source. */
  265. SKB_GSO_DODGY = 1 << 2,
  266. /* This indicates the tcp segment has CWR set. */
  267. SKB_GSO_TCP_ECN = 1 << 3,
  268. SKB_GSO_TCPV6 = 1 << 4,
  269. SKB_GSO_FCOE = 1 << 5,
  270. };
  271. #if BITS_PER_LONG > 32
  272. #define NET_SKBUFF_DATA_USES_OFFSET 1
  273. #endif
  274. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  275. typedef unsigned int sk_buff_data_t;
  276. #else
  277. typedef unsigned char *sk_buff_data_t;
  278. #endif
  279. #if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
  280. defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
  281. #define NET_SKBUFF_NF_DEFRAG_NEEDED 1
  282. #endif
  283. /**
  284. * struct sk_buff - socket buffer
  285. * @next: Next buffer in list
  286. * @prev: Previous buffer in list
  287. * @tstamp: Time we arrived
  288. * @sk: Socket we are owned by
  289. * @dev: Device we arrived on/are leaving by
  290. * @cb: Control buffer. Free for use by every layer. Put private vars here
  291. * @_skb_refdst: destination entry (with norefcount bit)
  292. * @sp: the security path, used for xfrm
  293. * @len: Length of actual data
  294. * @data_len: Data length
  295. * @mac_len: Length of link layer header
  296. * @hdr_len: writable header length of cloned skb
  297. * @csum: Checksum (must include start/offset pair)
  298. * @csum_start: Offset from skb->head where checksumming should start
  299. * @csum_offset: Offset from csum_start where checksum should be stored
  300. * @priority: Packet queueing priority
  301. * @local_df: allow local fragmentation
  302. * @cloned: Head may be cloned (check refcnt to be sure)
  303. * @ip_summed: Driver fed us an IP checksum
  304. * @nohdr: Payload reference only, must not modify header
  305. * @nfctinfo: Relationship of this skb to the connection
  306. * @pkt_type: Packet class
  307. * @fclone: skbuff clone status
  308. * @ipvs_property: skbuff is owned by ipvs
  309. * @peeked: this packet has been seen already, so stats have been
  310. * done for it, don't do them again
  311. * @nf_trace: netfilter packet trace flag
  312. * @protocol: Packet protocol from driver
  313. * @destructor: Destruct function
  314. * @nfct: Associated connection, if any
  315. * @nfct_reasm: netfilter conntrack re-assembly pointer
  316. * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
  317. * @skb_iif: ifindex of device we arrived on
  318. * @tc_index: Traffic control index
  319. * @tc_verd: traffic control verdict
  320. * @rxhash: the packet hash computed on receive
  321. * @queue_mapping: Queue mapping for multiqueue devices
  322. * @ndisc_nodetype: router type (from link layer)
  323. * @ooo_okay: allow the mapping of a socket to a queue to be changed
  324. * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
  325. * ports.
  326. * @wifi_acked_valid: wifi_acked was set
  327. * @wifi_acked: whether frame was acked on wifi or not
  328. * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
  329. * @dma_cookie: a cookie to one of several possible DMA operations
  330. * done by skb DMA functions
  331. * @secmark: security marking
  332. * @mark: Generic packet mark
  333. * @dropcount: total number of sk_receive_queue overflows
  334. * @vlan_tci: vlan tag control information
  335. * @transport_header: Transport layer header
  336. * @network_header: Network layer header
  337. * @mac_header: Link layer header
  338. * @tail: Tail pointer
  339. * @end: End pointer
  340. * @head: Head of buffer
  341. * @data: Data head pointer
  342. * @truesize: Buffer size
  343. * @users: User count - see {datagram,tcp}.c
  344. */
  345. struct sk_buff {
  346. /* These two members must be first. */
  347. struct sk_buff *next;
  348. struct sk_buff *prev;
  349. ktime_t tstamp;
  350. struct sock *sk;
  351. struct net_device *dev;
  352. /*
  353. * This is the control buffer. It is free to use for every
  354. * layer. Please put your private variables there. If you
  355. * want to keep them across layers you have to do a skb_clone()
  356. * first. This is owned by whoever has the skb queued ATM.
  357. */
  358. char cb[48] __aligned(8);
  359. unsigned long _skb_refdst;
  360. #ifdef CONFIG_XFRM
  361. struct sec_path *sp;
  362. #endif
  363. unsigned int len,
  364. data_len;
  365. __u16 mac_len,
  366. hdr_len;
  367. union {
  368. __wsum csum;
  369. struct {
  370. __u16 csum_start;
  371. __u16 csum_offset;
  372. };
  373. };
  374. __u32 priority;
  375. kmemcheck_bitfield_begin(flags1);
  376. __u8 local_df:1,
  377. cloned:1,
  378. ip_summed:2,
  379. nohdr:1,
  380. nfctinfo:3;
  381. __u8 pkt_type:3,
  382. fclone:2,
  383. ipvs_property:1,
  384. peeked:1,
  385. nf_trace:1;
  386. kmemcheck_bitfield_end(flags1);
  387. __be16 protocol;
  388. void (*destructor)(struct sk_buff *skb);
  389. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  390. struct nf_conntrack *nfct;
  391. #endif
  392. #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
  393. struct sk_buff *nfct_reasm;
  394. #endif
  395. #ifdef CONFIG_BRIDGE_NETFILTER
  396. struct nf_bridge_info *nf_bridge;
  397. #endif
  398. int skb_iif;
  399. __u32 rxhash;
  400. __u16 vlan_tci;
  401. #ifdef CONFIG_NET_SCHED
  402. __u16 tc_index; /* traffic control index */
  403. #ifdef CONFIG_NET_CLS_ACT
  404. __u16 tc_verd; /* traffic control verdict */
  405. #endif
  406. #endif
  407. __u16 queue_mapping;
  408. kmemcheck_bitfield_begin(flags2);
  409. #ifdef CONFIG_IPV6_NDISC_NODETYPE
  410. __u8 ndisc_nodetype:2;
  411. #endif
  412. __u8 ooo_okay:1;
  413. __u8 l4_rxhash:1;
  414. __u8 wifi_acked_valid:1;
  415. __u8 wifi_acked:1;
  416. __u8 no_fcs:1;
  417. __u8 head_frag:1;
  418. /* 8/10 bit hole (depending on ndisc_nodetype presence) */
  419. kmemcheck_bitfield_end(flags2);
  420. #ifdef CONFIG_NET_DMA
  421. dma_cookie_t dma_cookie;
  422. #endif
  423. #ifdef CONFIG_NETWORK_SECMARK
  424. __u32 secmark;
  425. #endif
  426. union {
  427. __u32 mark;
  428. __u32 dropcount;
  429. __u32 avail_size;
  430. };
  431. sk_buff_data_t transport_header;
  432. sk_buff_data_t network_header;
  433. sk_buff_data_t mac_header;
  434. /* These elements must be at the end, see alloc_skb() for details. */
  435. sk_buff_data_t tail;
  436. sk_buff_data_t end;
  437. unsigned char *head,
  438. *data;
  439. unsigned int truesize;
  440. atomic_t users;
  441. };
  442. #ifdef __KERNEL__
  443. /*
  444. * Handling routines are only of interest to the kernel
  445. */
  446. #include <linux/slab.h>
  447. /*
  448. * skb might have a dst pointer attached, refcounted or not.
  449. * _skb_refdst low order bit is set if refcount was _not_ taken
  450. */
  451. #define SKB_DST_NOREF 1UL
  452. #define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
  453. /**
  454. * skb_dst - returns skb dst_entry
  455. * @skb: buffer
  456. *
  457. * Returns skb dst_entry, regardless of reference taken or not.
  458. */
  459. static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
  460. {
  461. /* If refdst was not refcounted, check we still are in a
  462. * rcu_read_lock section
  463. */
  464. WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
  465. !rcu_read_lock_held() &&
  466. !rcu_read_lock_bh_held());
  467. return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
  468. }
  469. /**
  470. * skb_dst_set - sets skb dst
  471. * @skb: buffer
  472. * @dst: dst entry
  473. *
  474. * Sets skb dst, assuming a reference was taken on dst and should
  475. * be released by skb_dst_drop()
  476. */
  477. static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
  478. {
  479. skb->_skb_refdst = (unsigned long)dst;
  480. }
  481. extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
  482. /**
  483. * skb_dst_is_noref - Test if skb dst isn't refcounted
  484. * @skb: buffer
  485. */
  486. static inline bool skb_dst_is_noref(const struct sk_buff *skb)
  487. {
  488. return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
  489. }
  490. static inline struct rtable *skb_rtable(const struct sk_buff *skb)
  491. {
  492. return (struct rtable *)skb_dst(skb);
  493. }
  494. extern void kfree_skb(struct sk_buff *skb);
  495. extern void consume_skb(struct sk_buff *skb);
  496. extern void __kfree_skb(struct sk_buff *skb);
  497. extern struct kmem_cache *skbuff_head_cache;
  498. extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
  499. extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
  500. bool *fragstolen, int *delta_truesize);
  501. extern struct sk_buff *__alloc_skb(unsigned int size,
  502. gfp_t priority, int fclone, int node);
  503. extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
  504. static inline struct sk_buff *alloc_skb(unsigned int size,
  505. gfp_t priority)
  506. {
  507. return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
  508. }
  509. static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
  510. gfp_t priority)
  511. {
  512. return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
  513. }
  514. extern void skb_recycle(struct sk_buff *skb);
  515. extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
  516. extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
  517. extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
  518. extern struct sk_buff *skb_clone(struct sk_buff *skb,
  519. gfp_t priority);
  520. extern struct sk_buff *skb_copy(const struct sk_buff *skb,
  521. gfp_t priority);
  522. extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
  523. int headroom, gfp_t gfp_mask);
  524. extern int pskb_expand_head(struct sk_buff *skb,
  525. int nhead, int ntail,
  526. gfp_t gfp_mask);
  527. extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
  528. unsigned int headroom);
  529. extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
  530. int newheadroom, int newtailroom,
  531. gfp_t priority);
  532. extern int skb_to_sgvec(struct sk_buff *skb,
  533. struct scatterlist *sg, int offset,
  534. int len);
  535. extern int skb_cow_data(struct sk_buff *skb, int tailbits,
  536. struct sk_buff **trailer);
  537. extern int skb_pad(struct sk_buff *skb, int pad);
  538. #define dev_kfree_skb(a) consume_skb(a)
  539. extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
  540. int getfrag(void *from, char *to, int offset,
  541. int len,int odd, struct sk_buff *skb),
  542. void *from, int length);
  543. struct skb_seq_state {
  544. __u32 lower_offset;
  545. __u32 upper_offset;
  546. __u32 frag_idx;
  547. __u32 stepped_offset;
  548. struct sk_buff *root_skb;
  549. struct sk_buff *cur_skb;
  550. __u8 *frag_data;
  551. };
  552. extern void skb_prepare_seq_read(struct sk_buff *skb,
  553. unsigned int from, unsigned int to,
  554. struct skb_seq_state *st);
  555. extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
  556. struct skb_seq_state *st);
  557. extern void skb_abort_seq_read(struct skb_seq_state *st);
  558. extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
  559. unsigned int to, struct ts_config *config,
  560. struct ts_state *state);
  561. extern void __skb_get_rxhash(struct sk_buff *skb);
  562. static inline __u32 skb_get_rxhash(struct sk_buff *skb)
  563. {
  564. if (!skb->rxhash)
  565. __skb_get_rxhash(skb);
  566. return skb->rxhash;
  567. }
  568. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  569. static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
  570. {
  571. return skb->head + skb->end;
  572. }
  573. static inline unsigned int skb_end_offset(const struct sk_buff *skb)
  574. {
  575. return skb->end;
  576. }
  577. #else
  578. static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
  579. {
  580. return skb->end;
  581. }
  582. static inline unsigned int skb_end_offset(const struct sk_buff *skb)
  583. {
  584. return skb->end - skb->head;
  585. }
  586. #endif
  587. /* Internal */
  588. #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
  589. static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
  590. {
  591. return &skb_shinfo(skb)->hwtstamps;
  592. }
  593. /**
  594. * skb_queue_empty - check if a queue is empty
  595. * @list: queue head
  596. *
  597. * Returns true if the queue is empty, false otherwise.
  598. */
  599. static inline int skb_queue_empty(const struct sk_buff_head *list)
  600. {
  601. return list->next == (struct sk_buff *)list;
  602. }
  603. /**
  604. * skb_queue_is_last - check if skb is the last entry in the queue
  605. * @list: queue head
  606. * @skb: buffer
  607. *
  608. * Returns true if @skb is the last buffer on the list.
  609. */
  610. static inline bool skb_queue_is_last(const struct sk_buff_head *list,
  611. const struct sk_buff *skb)
  612. {
  613. return skb->next == (struct sk_buff *)list;
  614. }
  615. /**
  616. * skb_queue_is_first - check if skb is the first entry in the queue
  617. * @list: queue head
  618. * @skb: buffer
  619. *
  620. * Returns true if @skb is the first buffer on the list.
  621. */
  622. static inline bool skb_queue_is_first(const struct sk_buff_head *list,
  623. const struct sk_buff *skb)
  624. {
  625. return skb->prev == (struct sk_buff *)list;
  626. }
  627. /**
  628. * skb_queue_next - return the next packet in the queue
  629. * @list: queue head
  630. * @skb: current buffer
  631. *
  632. * Return the next packet in @list after @skb. It is only valid to
  633. * call this if skb_queue_is_last() evaluates to false.
  634. */
  635. static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
  636. const struct sk_buff *skb)
  637. {
  638. /* This BUG_ON may seem severe, but if we just return then we
  639. * are going to dereference garbage.
  640. */
  641. BUG_ON(skb_queue_is_last(list, skb));
  642. return skb->next;
  643. }
  644. /**
  645. * skb_queue_prev - return the prev packet in the queue
  646. * @list: queue head
  647. * @skb: current buffer
  648. *
  649. * Return the prev packet in @list before @skb. It is only valid to
  650. * call this if skb_queue_is_first() evaluates to false.
  651. */
  652. static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
  653. const struct sk_buff *skb)
  654. {
  655. /* This BUG_ON may seem severe, but if we just return then we
  656. * are going to dereference garbage.
  657. */
  658. BUG_ON(skb_queue_is_first(list, skb));
  659. return skb->prev;
  660. }
  661. /**
  662. * skb_get - reference buffer
  663. * @skb: buffer to reference
  664. *
  665. * Makes another reference to a socket buffer and returns a pointer
  666. * to the buffer.
  667. */
  668. static inline struct sk_buff *skb_get(struct sk_buff *skb)
  669. {
  670. atomic_inc(&skb->users);
  671. return skb;
  672. }
  673. /*
  674. * If users == 1, we are the only owner and are can avoid redundant
  675. * atomic change.
  676. */
  677. /**
  678. * skb_cloned - is the buffer a clone
  679. * @skb: buffer to check
  680. *
  681. * Returns true if the buffer was generated with skb_clone() and is
  682. * one of multiple shared copies of the buffer. Cloned buffers are
  683. * shared data so must not be written to under normal circumstances.
  684. */
  685. static inline int skb_cloned(const struct sk_buff *skb)
  686. {
  687. return skb->cloned &&
  688. (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
  689. }
  690. /**
  691. * skb_header_cloned - is the header a clone
  692. * @skb: buffer to check
  693. *
  694. * Returns true if modifying the header part of the buffer requires
  695. * the data to be copied.
  696. */
  697. static inline int skb_header_cloned(const struct sk_buff *skb)
  698. {
  699. int dataref;
  700. if (!skb->cloned)
  701. return 0;
  702. dataref = atomic_read(&skb_shinfo(skb)->dataref);
  703. dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
  704. return dataref != 1;
  705. }
  706. /**
  707. * skb_header_release - release reference to header
  708. * @skb: buffer to operate on
  709. *
  710. * Drop a reference to the header part of the buffer. This is done
  711. * by acquiring a payload reference. You must not read from the header
  712. * part of skb->data after this.
  713. */
  714. static inline void skb_header_release(struct sk_buff *skb)
  715. {
  716. BUG_ON(skb->nohdr);
  717. skb->nohdr = 1;
  718. atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
  719. }
  720. /**
  721. * skb_shared - is the buffer shared
  722. * @skb: buffer to check
  723. *
  724. * Returns true if more than one person has a reference to this
  725. * buffer.
  726. */
  727. static inline int skb_shared(const struct sk_buff *skb)
  728. {
  729. return atomic_read(&skb->users) != 1;
  730. }
  731. /**
  732. * skb_share_check - check if buffer is shared and if so clone it
  733. * @skb: buffer to check
  734. * @pri: priority for memory allocation
  735. *
  736. * If the buffer is shared the buffer is cloned and the old copy
  737. * drops a reference. A new clone with a single reference is returned.
  738. * If the buffer is not shared the original buffer is returned. When
  739. * being called from interrupt status or with spinlocks held pri must
  740. * be GFP_ATOMIC.
  741. *
  742. * NULL is returned on a memory allocation failure.
  743. */
  744. static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
  745. gfp_t pri)
  746. {
  747. might_sleep_if(pri & __GFP_WAIT);
  748. if (skb_shared(skb)) {
  749. struct sk_buff *nskb = skb_clone(skb, pri);
  750. kfree_skb(skb);
  751. skb = nskb;
  752. }
  753. return skb;
  754. }
  755. /*
  756. * Copy shared buffers into a new sk_buff. We effectively do COW on
  757. * packets to handle cases where we have a local reader and forward
  758. * and a couple of other messy ones. The normal one is tcpdumping
  759. * a packet thats being forwarded.
  760. */
  761. /**
  762. * skb_unshare - make a copy of a shared buffer
  763. * @skb: buffer to check
  764. * @pri: priority for memory allocation
  765. *
  766. * If the socket buffer is a clone then this function creates a new
  767. * copy of the data, drops a reference count on the old copy and returns
  768. * the new copy with the reference count at 1. If the buffer is not a clone
  769. * the original buffer is returned. When called with a spinlock held or
  770. * from interrupt state @pri must be %GFP_ATOMIC
  771. *
  772. * %NULL is returned on a memory allocation failure.
  773. */
  774. static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
  775. gfp_t pri)
  776. {
  777. might_sleep_if(pri & __GFP_WAIT);
  778. if (skb_cloned(skb)) {
  779. struct sk_buff *nskb = skb_copy(skb, pri);
  780. kfree_skb(skb); /* Free our shared copy */
  781. skb = nskb;
  782. }
  783. return skb;
  784. }
  785. /**
  786. * skb_peek - peek at the head of an &sk_buff_head
  787. * @list_: list to peek at
  788. *
  789. * Peek an &sk_buff. Unlike most other operations you _MUST_
  790. * be careful with this one. A peek leaves the buffer on the
  791. * list and someone else may run off with it. You must hold
  792. * the appropriate locks or have a private queue to do this.
  793. *
  794. * Returns %NULL for an empty list or a pointer to the head element.
  795. * The reference count is not incremented and the reference is therefore
  796. * volatile. Use with caution.
  797. */
  798. static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
  799. {
  800. struct sk_buff *skb = list_->next;
  801. if (skb == (struct sk_buff *)list_)
  802. skb = NULL;
  803. return skb;
  804. }
  805. /**
  806. * skb_peek_next - peek skb following the given one from a queue
  807. * @skb: skb to start from
  808. * @list_: list to peek at
  809. *
  810. * Returns %NULL when the end of the list is met or a pointer to the
  811. * next element. The reference count is not incremented and the
  812. * reference is therefore volatile. Use with caution.
  813. */
  814. static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
  815. const struct sk_buff_head *list_)
  816. {
  817. struct sk_buff *next = skb->next;
  818. if (next == (struct sk_buff *)list_)
  819. next = NULL;
  820. return next;
  821. }
  822. /**
  823. * skb_peek_tail - peek at the tail of an &sk_buff_head
  824. * @list_: list to peek at
  825. *
  826. * Peek an &sk_buff. Unlike most other operations you _MUST_
  827. * be careful with this one. A peek leaves the buffer on the
  828. * list and someone else may run off with it. You must hold
  829. * the appropriate locks or have a private queue to do this.
  830. *
  831. * Returns %NULL for an empty list or a pointer to the tail element.
  832. * The reference count is not incremented and the reference is therefore
  833. * volatile. Use with caution.
  834. */
  835. static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
  836. {
  837. struct sk_buff *skb = list_->prev;
  838. if (skb == (struct sk_buff *)list_)
  839. skb = NULL;
  840. return skb;
  841. }
  842. /**
  843. * skb_queue_len - get queue length
  844. * @list_: list to measure
  845. *
  846. * Return the length of an &sk_buff queue.
  847. */
  848. static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
  849. {
  850. return list_->qlen;
  851. }
  852. /**
  853. * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
  854. * @list: queue to initialize
  855. *
  856. * This initializes only the list and queue length aspects of
  857. * an sk_buff_head object. This allows to initialize the list
  858. * aspects of an sk_buff_head without reinitializing things like
  859. * the spinlock. It can also be used for on-stack sk_buff_head
  860. * objects where the spinlock is known to not be used.
  861. */
  862. static inline void __skb_queue_head_init(struct sk_buff_head *list)
  863. {
  864. list->prev = list->next = (struct sk_buff *)list;
  865. list->qlen = 0;
  866. }
  867. /*
  868. * This function creates a split out lock class for each invocation;
  869. * this is needed for now since a whole lot of users of the skb-queue
  870. * infrastructure in drivers have different locking usage (in hardirq)
  871. * than the networking core (in softirq only). In the long run either the
  872. * network layer or drivers should need annotation to consolidate the
  873. * main types of usage into 3 classes.
  874. */
  875. static inline void skb_queue_head_init(struct sk_buff_head *list)
  876. {
  877. spin_lock_init(&list->lock);
  878. __skb_queue_head_init(list);
  879. }
  880. static inline void skb_queue_head_init_class(struct sk_buff_head *list,
  881. struct lock_class_key *class)
  882. {
  883. skb_queue_head_init(list);
  884. lockdep_set_class(&list->lock, class);
  885. }
  886. /*
  887. * Insert an sk_buff on a list.
  888. *
  889. * The "__skb_xxxx()" functions are the non-atomic ones that
  890. * can only be called with interrupts disabled.
  891. */
  892. extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
  893. static inline void __skb_insert(struct sk_buff *newsk,
  894. struct sk_buff *prev, struct sk_buff *next,
  895. struct sk_buff_head *list)
  896. {
  897. newsk->next = next;
  898. newsk->prev = prev;
  899. next->prev = prev->next = newsk;
  900. list->qlen++;
  901. }
  902. static inline void __skb_queue_splice(const struct sk_buff_head *list,
  903. struct sk_buff *prev,
  904. struct sk_buff *next)
  905. {
  906. struct sk_buff *first = list->next;
  907. struct sk_buff *last = list->prev;
  908. first->prev = prev;
  909. prev->next = first;
  910. last->next = next;
  911. next->prev = last;
  912. }
  913. /**
  914. * skb_queue_splice - join two skb lists, this is designed for stacks
  915. * @list: the new list to add
  916. * @head: the place to add it in the first list
  917. */
  918. static inline void skb_queue_splice(const struct sk_buff_head *list,
  919. struct sk_buff_head *head)
  920. {
  921. if (!skb_queue_empty(list)) {
  922. __skb_queue_splice(list, (struct sk_buff *) head, head->next);
  923. head->qlen += list->qlen;
  924. }
  925. }
  926. /**
  927. * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
  928. * @list: the new list to add
  929. * @head: the place to add it in the first list
  930. *
  931. * The list at @list is reinitialised
  932. */
  933. static inline void skb_queue_splice_init(struct sk_buff_head *list,
  934. struct sk_buff_head *head)
  935. {
  936. if (!skb_queue_empty(list)) {
  937. __skb_queue_splice(list, (struct sk_buff *) head, head->next);
  938. head->qlen += list->qlen;
  939. __skb_queue_head_init(list);
  940. }
  941. }
  942. /**
  943. * skb_queue_splice_tail - join two skb lists, each list being a queue
  944. * @list: the new list to add
  945. * @head: the place to add it in the first list
  946. */
  947. static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
  948. struct sk_buff_head *head)
  949. {
  950. if (!skb_queue_empty(list)) {
  951. __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
  952. head->qlen += list->qlen;
  953. }
  954. }
  955. /**
  956. * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
  957. * @list: the new list to add
  958. * @head: the place to add it in the first list
  959. *
  960. * Each of the lists is a queue.
  961. * The list at @list is reinitialised
  962. */
  963. static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
  964. struct sk_buff_head *head)
  965. {
  966. if (!skb_queue_empty(list)) {
  967. __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
  968. head->qlen += list->qlen;
  969. __skb_queue_head_init(list);
  970. }
  971. }
  972. /**
  973. * __skb_queue_after - queue a buffer at the list head
  974. * @list: list to use
  975. * @prev: place after this buffer
  976. * @newsk: buffer to queue
  977. *
  978. * Queue a buffer int the middle of a list. This function takes no locks
  979. * and you must therefore hold required locks before calling it.
  980. *
  981. * A buffer cannot be placed on two lists at the same time.
  982. */
  983. static inline void __skb_queue_after(struct sk_buff_head *list,
  984. struct sk_buff *prev,
  985. struct sk_buff *newsk)
  986. {
  987. __skb_insert(newsk, prev, prev->next, list);
  988. }
  989. extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
  990. struct sk_buff_head *list);
  991. static inline void __skb_queue_before(struct sk_buff_head *list,
  992. struct sk_buff *next,
  993. struct sk_buff *newsk)
  994. {
  995. __skb_insert(newsk, next->prev, next, list);
  996. }
  997. /**
  998. * __skb_queue_head - queue a buffer at the list head
  999. * @list: list to use
  1000. * @newsk: buffer to queue
  1001. *
  1002. * Queue a buffer at the start of a list. This function takes no locks
  1003. * and you must therefore hold required locks before calling it.
  1004. *
  1005. * A buffer cannot be placed on two lists at the same time.
  1006. */
  1007. extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
  1008. static inline void __skb_queue_head(struct sk_buff_head *list,
  1009. struct sk_buff *newsk)
  1010. {
  1011. __skb_queue_after(list, (struct sk_buff *)list, newsk);
  1012. }
  1013. /**
  1014. * __skb_queue_tail - queue a buffer at the list tail
  1015. * @list: list to use
  1016. * @newsk: buffer to queue
  1017. *
  1018. * Queue a buffer at the end of a list. This function takes no locks
  1019. * and you must therefore hold required locks before calling it.
  1020. *
  1021. * A buffer cannot be placed on two lists at the same time.
  1022. */
  1023. extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
  1024. static inline void __skb_queue_tail(struct sk_buff_head *list,
  1025. struct sk_buff *newsk)
  1026. {
  1027. __skb_queue_before(list, (struct sk_buff *)list, newsk);
  1028. }
  1029. /*
  1030. * remove sk_buff from list. _Must_ be called atomically, and with
  1031. * the list known..
  1032. */
  1033. extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
  1034. static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
  1035. {
  1036. struct sk_buff *next, *prev;
  1037. list->qlen--;
  1038. next = skb->next;
  1039. prev = skb->prev;
  1040. skb->next = skb->prev = NULL;
  1041. next->prev = prev;
  1042. prev->next = next;
  1043. }
  1044. /**
  1045. * __skb_dequeue - remove from the head of the queue
  1046. * @list: list to dequeue from
  1047. *
  1048. * Remove the head of the list. This function does not take any locks
  1049. * so must be used with appropriate locks held only. The head item is
  1050. * returned or %NULL if the list is empty.
  1051. */
  1052. extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
  1053. static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
  1054. {
  1055. struct sk_buff *skb = skb_peek(list);
  1056. if (skb)
  1057. __skb_unlink(skb, list);
  1058. return skb;
  1059. }
  1060. /**
  1061. * __skb_dequeue_tail - remove from the tail of the queue
  1062. * @list: list to dequeue from
  1063. *
  1064. * Remove the tail of the list. This function does not take any locks
  1065. * so must be used with appropriate locks held only. The tail item is
  1066. * returned or %NULL if the list is empty.
  1067. */
  1068. extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
  1069. static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
  1070. {
  1071. struct sk_buff *skb = skb_peek_tail(list);
  1072. if (skb)
  1073. __skb_unlink(skb, list);
  1074. return skb;
  1075. }
  1076. static inline bool skb_is_nonlinear(const struct sk_buff *skb)
  1077. {
  1078. return skb->data_len;
  1079. }
  1080. static inline unsigned int skb_headlen(const struct sk_buff *skb)
  1081. {
  1082. return skb->len - skb->data_len;
  1083. }
  1084. static inline int skb_pagelen(const struct sk_buff *skb)
  1085. {
  1086. int i, len = 0;
  1087. for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
  1088. len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
  1089. return len + skb_headlen(skb);
  1090. }
  1091. /**
  1092. * __skb_fill_page_desc - initialise a paged fragment in an skb
  1093. * @skb: buffer containing fragment to be initialised
  1094. * @i: paged fragment index to initialise
  1095. * @page: the page to use for this fragment
  1096. * @off: the offset to the data with @page
  1097. * @size: the length of the data
  1098. *
  1099. * Initialises the @i'th fragment of @skb to point to &size bytes at
  1100. * offset @off within @page.
  1101. *
  1102. * Does not take any additional reference on the fragment.
  1103. */
  1104. static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
  1105. struct page *page, int off, int size)
  1106. {
  1107. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1108. frag->page.p = page;
  1109. frag->page_offset = off;
  1110. skb_frag_size_set(frag, size);
  1111. }
  1112. /**
  1113. * skb_fill_page_desc - initialise a paged fragment in an skb
  1114. * @skb: buffer containing fragment to be initialised
  1115. * @i: paged fragment index to initialise
  1116. * @page: the page to use for this fragment
  1117. * @off: the offset to the data with @page
  1118. * @size: the length of the data
  1119. *
  1120. * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
  1121. * @skb to point to &size bytes at offset @off within @page. In
  1122. * addition updates @skb such that @i is the last fragment.
  1123. *
  1124. * Does not take any additional reference on the fragment.
  1125. */
  1126. static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
  1127. struct page *page, int off, int size)
  1128. {
  1129. __skb_fill_page_desc(skb, i, page, off, size);
  1130. skb_shinfo(skb)->nr_frags = i + 1;
  1131. }
  1132. extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
  1133. int off, int size, unsigned int truesize);
  1134. #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
  1135. #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
  1136. #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
  1137. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  1138. static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
  1139. {
  1140. return skb->head + skb->tail;
  1141. }
  1142. static inline void skb_reset_tail_pointer(struct sk_buff *skb)
  1143. {
  1144. skb->tail = skb->data - skb->head;
  1145. }
  1146. static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
  1147. {
  1148. skb_reset_tail_pointer(skb);
  1149. skb->tail += offset;
  1150. }
  1151. #else /* NET_SKBUFF_DATA_USES_OFFSET */
  1152. static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
  1153. {
  1154. return skb->tail;
  1155. }
  1156. static inline void skb_reset_tail_pointer(struct sk_buff *skb)
  1157. {
  1158. skb->tail = skb->data;
  1159. }
  1160. static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
  1161. {
  1162. skb->tail = skb->data + offset;
  1163. }
  1164. #endif /* NET_SKBUFF_DATA_USES_OFFSET */
  1165. /*
  1166. * Add data to an sk_buff
  1167. */
  1168. extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
  1169. static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
  1170. {
  1171. unsigned char *tmp = skb_tail_pointer(skb);
  1172. SKB_LINEAR_ASSERT(skb);
  1173. skb->tail += len;
  1174. skb->len += len;
  1175. return tmp;
  1176. }
  1177. extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
  1178. static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
  1179. {
  1180. skb->data -= len;
  1181. skb->len += len;
  1182. return skb->data;
  1183. }
  1184. extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
  1185. static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
  1186. {
  1187. skb->len -= len;
  1188. BUG_ON(skb->len < skb->data_len);
  1189. return skb->data += len;
  1190. }
  1191. static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
  1192. {
  1193. return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
  1194. }
  1195. extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
  1196. static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
  1197. {
  1198. if (len > skb_headlen(skb) &&
  1199. !__pskb_pull_tail(skb, len - skb_headlen(skb)))
  1200. return NULL;
  1201. skb->len -= len;
  1202. return skb->data += len;
  1203. }
  1204. static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
  1205. {
  1206. return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
  1207. }
  1208. static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
  1209. {
  1210. if (likely(len <= skb_headlen(skb)))
  1211. return 1;
  1212. if (unlikely(len > skb->len))
  1213. return 0;
  1214. return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
  1215. }
  1216. /**
  1217. * skb_headroom - bytes at buffer head
  1218. * @skb: buffer to check
  1219. *
  1220. * Return the number of bytes of free space at the head of an &sk_buff.
  1221. */
  1222. static inline unsigned int skb_headroom(const struct sk_buff *skb)
  1223. {
  1224. return skb->data - skb->head;
  1225. }
  1226. /**
  1227. * skb_tailroom - bytes at buffer end
  1228. * @skb: buffer to check
  1229. *
  1230. * Return the number of bytes of free space at the tail of an sk_buff
  1231. */
  1232. static inline int skb_tailroom(const struct sk_buff *skb)
  1233. {
  1234. return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
  1235. }
  1236. /**
  1237. * skb_availroom - bytes at buffer end
  1238. * @skb: buffer to check
  1239. *
  1240. * Return the number of bytes of free space at the tail of an sk_buff
  1241. * allocated by sk_stream_alloc()
  1242. */
  1243. static inline int skb_availroom(const struct sk_buff *skb)
  1244. {
  1245. return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
  1246. }
  1247. /**
  1248. * skb_reserve - adjust headroom
  1249. * @skb: buffer to alter
  1250. * @len: bytes to move
  1251. *
  1252. * Increase the headroom of an empty &sk_buff by reducing the tail
  1253. * room. This is only allowed for an empty buffer.
  1254. */
  1255. static inline void skb_reserve(struct sk_buff *skb, int len)
  1256. {
  1257. skb->data += len;
  1258. skb->tail += len;
  1259. }
  1260. static inline void skb_reset_mac_len(struct sk_buff *skb)
  1261. {
  1262. skb->mac_len = skb->network_header - skb->mac_header;
  1263. }
  1264. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  1265. static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
  1266. {
  1267. return skb->head + skb->transport_header;
  1268. }
  1269. static inline void skb_reset_transport_header(struct sk_buff *skb)
  1270. {
  1271. skb->transport_header = skb->data - skb->head;
  1272. }
  1273. static inline void skb_set_transport_header(struct sk_buff *skb,
  1274. const int offset)
  1275. {
  1276. skb_reset_transport_header(skb);
  1277. skb->transport_header += offset;
  1278. }
  1279. static inline unsigned char *skb_network_header(const struct sk_buff *skb)
  1280. {
  1281. return skb->head + skb->network_header;
  1282. }
  1283. static inline void skb_reset_network_header(struct sk_buff *skb)
  1284. {
  1285. skb->network_header = skb->data - skb->head;
  1286. }
  1287. static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
  1288. {
  1289. skb_reset_network_header(skb);
  1290. skb->network_header += offset;
  1291. }
  1292. static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
  1293. {
  1294. return skb->head + skb->mac_header;
  1295. }
  1296. static inline int skb_mac_header_was_set(const struct sk_buff *skb)
  1297. {
  1298. return skb->mac_header != ~0U;
  1299. }
  1300. static inline void skb_reset_mac_header(struct sk_buff *skb)
  1301. {
  1302. skb->mac_header = skb->data - skb->head;
  1303. }
  1304. static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
  1305. {
  1306. skb_reset_mac_header(skb);
  1307. skb->mac_header += offset;
  1308. }
  1309. #else /* NET_SKBUFF_DATA_USES_OFFSET */
  1310. static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
  1311. {
  1312. return skb->transport_header;
  1313. }
  1314. static inline void skb_reset_transport_header(struct sk_buff *skb)
  1315. {
  1316. skb->transport_header = skb->data;
  1317. }
  1318. static inline void skb_set_transport_header(struct sk_buff *skb,
  1319. const int offset)
  1320. {
  1321. skb->transport_header = skb->data + offset;
  1322. }
  1323. static inline unsigned char *skb_network_header(const struct sk_buff *skb)
  1324. {
  1325. return skb->network_header;
  1326. }
  1327. static inline void skb_reset_network_header(struct sk_buff *skb)
  1328. {
  1329. skb->network_header = skb->data;
  1330. }
  1331. static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
  1332. {
  1333. skb->network_header = skb->data + offset;
  1334. }
  1335. static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
  1336. {
  1337. return skb->mac_header;
  1338. }
  1339. static inline int skb_mac_header_was_set(const struct sk_buff *skb)
  1340. {
  1341. return skb->mac_header != NULL;
  1342. }
  1343. static inline void skb_reset_mac_header(struct sk_buff *skb)
  1344. {
  1345. skb->mac_header = skb->data;
  1346. }
  1347. static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
  1348. {
  1349. skb->mac_header = skb->data + offset;
  1350. }
  1351. #endif /* NET_SKBUFF_DATA_USES_OFFSET */
  1352. static inline void skb_mac_header_rebuild(struct sk_buff *skb)
  1353. {
  1354. if (skb_mac_header_was_set(skb)) {
  1355. const unsigned char *old_mac = skb_mac_header(skb);
  1356. skb_set_mac_header(skb, -skb->mac_len);
  1357. memmove(skb_mac_header(skb), old_mac, skb->mac_len);
  1358. }
  1359. }
  1360. static inline int skb_checksum_start_offset(const struct sk_buff *skb)
  1361. {
  1362. return skb->csum_start - skb_headroom(skb);
  1363. }
  1364. static inline int skb_transport_offset(const struct sk_buff *skb)
  1365. {
  1366. return skb_transport_header(skb) - skb->data;
  1367. }
  1368. static inline u32 skb_network_header_len(const struct sk_buff *skb)
  1369. {
  1370. return skb->transport_header - skb->network_header;
  1371. }
  1372. static inline int skb_network_offset(const struct sk_buff *skb)
  1373. {
  1374. return skb_network_header(skb) - skb->data;
  1375. }
  1376. static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
  1377. {
  1378. return pskb_may_pull(skb, skb_network_offset(skb) + len);
  1379. }
  1380. /*
  1381. * CPUs often take a performance hit when accessing unaligned memory
  1382. * locations. The actual performance hit varies, it can be small if the
  1383. * hardware handles it or large if we have to take an exception and fix it
  1384. * in software.
  1385. *
  1386. * Since an ethernet header is 14 bytes network drivers often end up with
  1387. * the IP header at an unaligned offset. The IP header can be aligned by
  1388. * shifting the start of the packet by 2 bytes. Drivers should do this
  1389. * with:
  1390. *
  1391. * skb_reserve(skb, NET_IP_ALIGN);
  1392. *
  1393. * The downside to this alignment of the IP header is that the DMA is now
  1394. * unaligned. On some architectures the cost of an unaligned DMA is high
  1395. * and this cost outweighs the gains made by aligning the IP header.
  1396. *
  1397. * Since this trade off varies between architectures, we allow NET_IP_ALIGN
  1398. * to be overridden.
  1399. */
  1400. #ifndef NET_IP_ALIGN
  1401. #define NET_IP_ALIGN 2
  1402. #endif
  1403. /*
  1404. * The networking layer reserves some headroom in skb data (via
  1405. * dev_alloc_skb). This is used to avoid having to reallocate skb data when
  1406. * the header has to grow. In the default case, if the header has to grow
  1407. * 32 bytes or less we avoid the reallocation.
  1408. *
  1409. * Unfortunately this headroom changes the DMA alignment of the resulting
  1410. * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
  1411. * on some architectures. An architecture can override this value,
  1412. * perhaps setting it to a cacheline in size (since that will maintain
  1413. * cacheline alignment of the DMA). It must be a power of 2.
  1414. *
  1415. * Various parts of the networking layer expect at least 32 bytes of
  1416. * headroom, you should not reduce this.
  1417. *
  1418. * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
  1419. * to reduce average number of cache lines per packet.
  1420. * get_rps_cpus() for example only access one 64 bytes aligned block :
  1421. * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
  1422. */
  1423. #ifndef NET_SKB_PAD
  1424. #define NET_SKB_PAD max(32, L1_CACHE_BYTES)
  1425. #endif
  1426. extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
  1427. static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
  1428. {
  1429. if (unlikely(skb_is_nonlinear(skb))) {
  1430. WARN_ON(1);
  1431. return;
  1432. }
  1433. skb->len = len;
  1434. skb_set_tail_pointer(skb, len);
  1435. }
  1436. extern void skb_trim(struct sk_buff *skb, unsigned int len);
  1437. static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
  1438. {
  1439. if (skb->data_len)
  1440. return ___pskb_trim(skb, len);
  1441. __skb_trim(skb, len);
  1442. return 0;
  1443. }
  1444. static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
  1445. {
  1446. return (len < skb->len) ? __pskb_trim(skb, len) : 0;
  1447. }
  1448. /**
  1449. * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
  1450. * @skb: buffer to alter
  1451. * @len: new length
  1452. *
  1453. * This is identical to pskb_trim except that the caller knows that
  1454. * the skb is not cloned so we should never get an error due to out-
  1455. * of-memory.
  1456. */
  1457. static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
  1458. {
  1459. int err = pskb_trim(skb, len);
  1460. BUG_ON(err);
  1461. }
  1462. /**
  1463. * skb_orphan - orphan a buffer
  1464. * @skb: buffer to orphan
  1465. *
  1466. * If a buffer currently has an owner then we call the owner's
  1467. * destructor function and make the @skb unowned. The buffer continues
  1468. * to exist but is no longer charged to its former owner.
  1469. */
  1470. static inline void skb_orphan(struct sk_buff *skb)
  1471. {
  1472. if (skb->destructor)
  1473. skb->destructor(skb);
  1474. skb->destructor = NULL;
  1475. skb->sk = NULL;
  1476. }
  1477. /**
  1478. * __skb_queue_purge - empty a list
  1479. * @list: list to empty
  1480. *
  1481. * Delete all buffers on an &sk_buff list. Each buffer is removed from
  1482. * the list and one reference dropped. This function does not take the
  1483. * list lock and the caller must hold the relevant locks to use it.
  1484. */
  1485. extern void skb_queue_purge(struct sk_buff_head *list);
  1486. static inline void __skb_queue_purge(struct sk_buff_head *list)
  1487. {
  1488. struct sk_buff *skb;
  1489. while ((skb = __skb_dequeue(list)) != NULL)
  1490. kfree_skb(skb);
  1491. }
  1492. extern void *netdev_alloc_frag(unsigned int fragsz);
  1493. extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
  1494. unsigned int length,
  1495. gfp_t gfp_mask);
  1496. /**
  1497. * netdev_alloc_skb - allocate an skbuff for rx on a specific device
  1498. * @dev: network device to receive on
  1499. * @length: length to allocate
  1500. *
  1501. * Allocate a new &sk_buff and assign it a usage count of one. The
  1502. * buffer has unspecified headroom built in. Users should allocate
  1503. * the headroom they think they need without accounting for the
  1504. * built in space. The built in space is used for optimisations.
  1505. *
  1506. * %NULL is returned if there is no free memory. Although this function
  1507. * allocates memory it can be called from an interrupt.
  1508. */
  1509. static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
  1510. unsigned int length)
  1511. {
  1512. return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
  1513. }
  1514. /* legacy helper around __netdev_alloc_skb() */
  1515. static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
  1516. gfp_t gfp_mask)
  1517. {
  1518. return __netdev_alloc_skb(NULL, length, gfp_mask);
  1519. }
  1520. /* legacy helper around netdev_alloc_skb() */
  1521. static inline struct sk_buff *dev_alloc_skb(unsigned int length)
  1522. {
  1523. return netdev_alloc_skb(NULL, length);
  1524. }
  1525. static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
  1526. unsigned int length, gfp_t gfp)
  1527. {
  1528. struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
  1529. if (NET_IP_ALIGN && skb)
  1530. skb_reserve(skb, NET_IP_ALIGN);
  1531. return skb;
  1532. }
  1533. static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
  1534. unsigned int length)
  1535. {
  1536. return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
  1537. }
  1538. /**
  1539. * skb_frag_page - retrieve the page refered to by a paged fragment
  1540. * @frag: the paged fragment
  1541. *
  1542. * Returns the &struct page associated with @frag.
  1543. */
  1544. static inline struct page *skb_frag_page(const skb_frag_t *frag)
  1545. {
  1546. return frag->page.p;
  1547. }
  1548. /**
  1549. * __skb_frag_ref - take an addition reference on a paged fragment.
  1550. * @frag: the paged fragment
  1551. *
  1552. * Takes an additional reference on the paged fragment @frag.
  1553. */
  1554. static inline void __skb_frag_ref(skb_frag_t *frag)
  1555. {
  1556. get_page(skb_frag_page(frag));
  1557. }
  1558. /**
  1559. * skb_frag_ref - take an addition reference on a paged fragment of an skb.
  1560. * @skb: the buffer
  1561. * @f: the fragment offset.
  1562. *
  1563. * Takes an additional reference on the @f'th paged fragment of @skb.
  1564. */
  1565. static inline void skb_frag_ref(struct sk_buff *skb, int f)
  1566. {
  1567. __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
  1568. }
  1569. /**
  1570. * __skb_frag_unref - release a reference on a paged fragment.
  1571. * @frag: the paged fragment
  1572. *
  1573. * Releases a reference on the paged fragment @frag.
  1574. */
  1575. static inline void __skb_frag_unref(skb_frag_t *frag)
  1576. {
  1577. put_page(skb_frag_page(frag));
  1578. }
  1579. /**
  1580. * skb_frag_unref - release a reference on a paged fragment of an skb.
  1581. * @skb: the buffer
  1582. * @f: the fragment offset
  1583. *
  1584. * Releases a reference on the @f'th paged fragment of @skb.
  1585. */
  1586. static inline void skb_frag_unref(struct sk_buff *skb, int f)
  1587. {
  1588. __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
  1589. }
  1590. /**
  1591. * skb_frag_address - gets the address of the data contained in a paged fragment
  1592. * @frag: the paged fragment buffer
  1593. *
  1594. * Returns the address of the data within @frag. The page must already
  1595. * be mapped.
  1596. */
  1597. static inline void *skb_frag_address(const skb_frag_t *frag)
  1598. {
  1599. return page_address(skb_frag_page(frag)) + frag->page_offset;
  1600. }
  1601. /**
  1602. * skb_frag_address_safe - gets the address of the data contained in a paged fragment
  1603. * @frag: the paged fragment buffer
  1604. *
  1605. * Returns the address of the data within @frag. Checks that the page
  1606. * is mapped and returns %NULL otherwise.
  1607. */
  1608. static inline void *skb_frag_address_safe(const skb_frag_t *frag)
  1609. {
  1610. void *ptr = page_address(skb_frag_page(frag));
  1611. if (unlikely(!ptr))
  1612. return NULL;
  1613. return ptr + frag->page_offset;
  1614. }
  1615. /**
  1616. * __skb_frag_set_page - sets the page contained in a paged fragment
  1617. * @frag: the paged fragment
  1618. * @page: the page to set
  1619. *
  1620. * Sets the fragment @frag to contain @page.
  1621. */
  1622. static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
  1623. {
  1624. frag->page.p = page;
  1625. }
  1626. /**
  1627. * skb_frag_set_page - sets the page contained in a paged fragment of an skb
  1628. * @skb: the buffer
  1629. * @f: the fragment offset
  1630. * @page: the page to set
  1631. *
  1632. * Sets the @f'th fragment of @skb to contain @page.
  1633. */
  1634. static inline void skb_frag_set_page(struct sk_buff *skb, int f,
  1635. struct page *page)
  1636. {
  1637. __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
  1638. }
  1639. /**
  1640. * skb_frag_dma_map - maps a paged fragment via the DMA API
  1641. * @dev: the device to map the fragment to
  1642. * @frag: the paged fragment to map
  1643. * @offset: the offset within the fragment (starting at the
  1644. * fragment's own offset)
  1645. * @size: the number of bytes to map
  1646. * @dir: the direction of the mapping (%PCI_DMA_*)
  1647. *
  1648. * Maps the page associated with @frag to @device.
  1649. */
  1650. static inline dma_addr_t skb_frag_dma_map(struct device *dev,
  1651. const skb_frag_t *frag,
  1652. size_t offset, size_t size,
  1653. enum dma_data_direction dir)
  1654. {
  1655. return dma_map_page(dev, skb_frag_page(frag),
  1656. frag->page_offset + offset, size, dir);
  1657. }
  1658. static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
  1659. gfp_t gfp_mask)
  1660. {
  1661. return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
  1662. }
  1663. /**
  1664. * skb_clone_writable - is the header of a clone writable
  1665. * @skb: buffer to check
  1666. * @len: length up to which to write
  1667. *
  1668. * Returns true if modifying the header part of the cloned buffer
  1669. * does not requires the data to be copied.
  1670. */
  1671. static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
  1672. {
  1673. return !skb_header_cloned(skb) &&
  1674. skb_headroom(skb) + len <= skb->hdr_len;
  1675. }
  1676. static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
  1677. int cloned)
  1678. {
  1679. int delta = 0;
  1680. if (headroom > skb_headroom(skb))
  1681. delta = headroom - skb_headroom(skb);
  1682. if (delta || cloned)
  1683. return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
  1684. GFP_ATOMIC);
  1685. return 0;
  1686. }
  1687. /**
  1688. * skb_cow - copy header of skb when it is required
  1689. * @skb: buffer to cow
  1690. * @headroom: needed headroom
  1691. *
  1692. * If the skb passed lacks sufficient headroom or its data part
  1693. * is shared, data is reallocated. If reallocation fails, an error
  1694. * is returned and original skb is not changed.
  1695. *
  1696. * The result is skb with writable area skb->head...skb->tail
  1697. * and at least @headroom of space at head.
  1698. */
  1699. static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
  1700. {
  1701. return __skb_cow(skb, headroom, skb_cloned(skb));
  1702. }
  1703. /**
  1704. * skb_cow_head - skb_cow but only making the head writable
  1705. * @skb: buffer to cow
  1706. * @headroom: needed headroom
  1707. *
  1708. * This function is identical to skb_cow except that we replace the
  1709. * skb_cloned check by skb_header_cloned. It should be used when
  1710. * you only need to push on some header and do not need to modify
  1711. * the data.
  1712. */
  1713. static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
  1714. {
  1715. return __skb_cow(skb, headroom, skb_header_cloned(skb));
  1716. }
  1717. /**
  1718. * skb_padto - pad an skbuff up to a minimal size
  1719. * @skb: buffer to pad
  1720. * @len: minimal length
  1721. *
  1722. * Pads up a buffer to ensure the trailing bytes exist and are
  1723. * blanked. If the buffer already contains sufficient data it
  1724. * is untouched. Otherwise it is extended. Returns zero on
  1725. * success. The skb is freed on error.
  1726. */
  1727. static inline int skb_padto(struct sk_buff *skb, unsigned int len)
  1728. {
  1729. unsigned int size = skb->len;
  1730. if (likely(size >= len))
  1731. return 0;
  1732. return skb_pad(skb, len - size);
  1733. }
  1734. static inline int skb_add_data(struct sk_buff *skb,
  1735. char __user *from, int copy)
  1736. {
  1737. const int off = skb->len;
  1738. if (skb->ip_summed == CHECKSUM_NONE) {
  1739. int err = 0;
  1740. __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
  1741. copy, 0, &err);
  1742. if (!err) {
  1743. skb->csum = csum_block_add(skb->csum, csum, off);
  1744. return 0;
  1745. }
  1746. } else if (!copy_from_user(skb_put(skb, copy), from, copy))
  1747. return 0;
  1748. __skb_trim(skb, off);
  1749. return -EFAULT;
  1750. }
  1751. static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
  1752. const struct page *page, int off)
  1753. {
  1754. if (i) {
  1755. const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
  1756. return page == skb_frag_page(frag) &&
  1757. off == frag->page_offset + skb_frag_size(frag);
  1758. }
  1759. return false;
  1760. }
  1761. static inline int __skb_linearize(struct sk_buff *skb)
  1762. {
  1763. return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
  1764. }
  1765. /**
  1766. * skb_linearize - convert paged skb to linear one
  1767. * @skb: buffer to linarize
  1768. *
  1769. * If there is no free memory -ENOMEM is returned, otherwise zero
  1770. * is returned and the old skb data released.
  1771. */
  1772. static inline int skb_linearize(struct sk_buff *skb)
  1773. {
  1774. return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
  1775. }
  1776. /**
  1777. * skb_linearize_cow - make sure skb is linear and writable
  1778. * @skb: buffer to process
  1779. *
  1780. * If there is no free memory -ENOMEM is returned, otherwise zero
  1781. * is returned and the old skb data released.
  1782. */
  1783. static inline int skb_linearize_cow(struct sk_buff *skb)
  1784. {
  1785. return skb_is_nonlinear(skb) || skb_cloned(skb) ?
  1786. __skb_linearize(skb) : 0;
  1787. }
  1788. /**
  1789. * skb_postpull_rcsum - update checksum for received skb after pull
  1790. * @skb: buffer to update
  1791. * @start: start of data before pull
  1792. * @len: length of data pulled
  1793. *
  1794. * After doing a pull on a received packet, you need to call this to
  1795. * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
  1796. * CHECKSUM_NONE so that it can be recomputed from scratch.
  1797. */
  1798. static inline void skb_postpull_rcsum(struct sk_buff *skb,
  1799. const void *start, unsigned int len)
  1800. {
  1801. if (skb->ip_summed == CHECKSUM_COMPLETE)
  1802. skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
  1803. }
  1804. unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
  1805. /**
  1806. * pskb_trim_rcsum - trim received skb and update checksum
  1807. * @skb: buffer to trim
  1808. * @len: new length
  1809. *
  1810. * This is exactly the same as pskb_trim except that it ensures the
  1811. * checksum of received packets are still valid after the operation.
  1812. */
  1813. static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
  1814. {
  1815. if (likely(len >= skb->len))
  1816. return 0;
  1817. if (skb->ip_summed == CHECKSUM_COMPLETE)
  1818. skb->ip_summed = CHECKSUM_NONE;
  1819. return __pskb_trim(skb, len);
  1820. }
  1821. #define skb_queue_walk(queue, skb) \
  1822. for (skb = (queue)->next; \
  1823. skb != (struct sk_buff *)(queue); \
  1824. skb = skb->next)
  1825. #define skb_queue_walk_safe(queue, skb, tmp) \
  1826. for (skb = (queue)->next, tmp = skb->next; \
  1827. skb != (struct sk_buff *)(queue); \
  1828. skb = tmp, tmp = skb->next)
  1829. #define skb_queue_walk_from(queue, skb) \
  1830. for (; skb != (struct sk_buff *)(queue); \
  1831. skb = skb->next)
  1832. #define skb_queue_walk_from_safe(queue, skb, tmp) \
  1833. for (tmp = skb->next; \
  1834. skb != (struct sk_buff *)(queue); \
  1835. skb = tmp, tmp = skb->next)
  1836. #define skb_queue_reverse_walk(queue, skb) \
  1837. for (skb = (queue)->prev; \
  1838. skb != (struct sk_buff *)(queue); \
  1839. skb = skb->prev)
  1840. #define skb_queue_reverse_walk_safe(queue, skb, tmp) \
  1841. for (skb = (queue)->prev, tmp = skb->prev; \
  1842. skb != (struct sk_buff *)(queue); \
  1843. skb = tmp, tmp = skb->prev)
  1844. #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
  1845. for (tmp = skb->prev; \
  1846. skb != (struct sk_buff *)(queue); \
  1847. skb = tmp, tmp = skb->prev)
  1848. static inline bool skb_has_frag_list(const struct sk_buff *skb)
  1849. {
  1850. return skb_shinfo(skb)->frag_list != NULL;
  1851. }
  1852. static inline void skb_frag_list_init(struct sk_buff *skb)
  1853. {
  1854. skb_shinfo(skb)->frag_list = NULL;
  1855. }
  1856. static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
  1857. {
  1858. frag->next = skb_shinfo(skb)->frag_list;
  1859. skb_shinfo(skb)->frag_list = frag;
  1860. }
  1861. #define skb_walk_frags(skb, iter) \
  1862. for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
  1863. extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
  1864. int *peeked, int *off, int *err);
  1865. extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
  1866. int noblock, int *err);
  1867. extern unsigned int datagram_poll(struct file *file, struct socket *sock,
  1868. struct poll_table_struct *wait);
  1869. extern int skb_copy_datagram_iovec(const struct sk_buff *from,
  1870. int offset, struct iovec *to,
  1871. int size);
  1872. extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
  1873. int hlen,
  1874. struct iovec *iov);
  1875. extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
  1876. int offset,
  1877. const struct iovec *from,
  1878. int from_offset,
  1879. int len);
  1880. extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
  1881. int offset,
  1882. const struct iovec *to,
  1883. int to_offset,
  1884. int size);
  1885. extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
  1886. extern void skb_free_datagram_locked(struct sock *sk,
  1887. struct sk_buff *skb);
  1888. extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
  1889. unsigned int flags);
  1890. extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
  1891. int len, __wsum csum);
  1892. extern int skb_copy_bits(const struct sk_buff *skb, int offset,
  1893. void *to, int len);
  1894. extern int skb_store_bits(struct sk_buff *skb, int offset,
  1895. const void *from, int len);
  1896. extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
  1897. int offset, u8 *to, int len,
  1898. __wsum csum);
  1899. extern int skb_splice_bits(struct sk_buff *skb,
  1900. unsigned int offset,
  1901. struct pipe_inode_info *pipe,
  1902. unsigned int len,
  1903. unsigned int flags);
  1904. extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
  1905. extern void skb_split(struct sk_buff *skb,
  1906. struct sk_buff *skb1, const u32 len);
  1907. extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
  1908. int shiftlen);
  1909. extern struct sk_buff *skb_segment(struct sk_buff *skb,
  1910. netdev_features_t features);
  1911. static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
  1912. int len, void *buffer)
  1913. {
  1914. int hlen = skb_headlen(skb);
  1915. if (hlen - offset >= len)
  1916. return skb->data + offset;
  1917. if (skb_copy_bits(skb, offset, buffer, len) < 0)
  1918. return NULL;
  1919. return buffer;
  1920. }
  1921. static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
  1922. void *to,
  1923. const unsigned int len)
  1924. {
  1925. memcpy(to, skb->data, len);
  1926. }
  1927. static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
  1928. const int offset, void *to,
  1929. const unsigned int len)
  1930. {
  1931. memcpy(to, skb->data + offset, len);
  1932. }
  1933. static inline void skb_copy_to_linear_data(struct sk_buff *skb,
  1934. const void *from,
  1935. const unsigned int len)
  1936. {
  1937. memcpy(skb->data, from, len);
  1938. }
  1939. static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
  1940. const int offset,
  1941. const void *from,
  1942. const unsigned int len)
  1943. {
  1944. memcpy(skb->data + offset, from, len);
  1945. }
  1946. extern void skb_init(void);
  1947. static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
  1948. {
  1949. return skb->tstamp;
  1950. }
  1951. /**
  1952. * skb_get_timestamp - get timestamp from a skb
  1953. * @skb: skb to get stamp from
  1954. * @stamp: pointer to struct timeval to store stamp in
  1955. *
  1956. * Timestamps are stored in the skb as offsets to a base timestamp.
  1957. * This function converts the offset back to a struct timeval and stores
  1958. * it in stamp.
  1959. */
  1960. static inline void skb_get_timestamp(const struct sk_buff *skb,
  1961. struct timeval *stamp)
  1962. {
  1963. *stamp = ktime_to_timeval(skb->tstamp);
  1964. }
  1965. static inline void skb_get_timestampns(const struct sk_buff *skb,
  1966. struct timespec *stamp)
  1967. {
  1968. *stamp = ktime_to_timespec(skb->tstamp);
  1969. }
  1970. static inline void __net_timestamp(struct sk_buff *skb)
  1971. {
  1972. skb->tstamp = ktime_get_real();
  1973. }
  1974. static inline ktime_t net_timedelta(ktime_t t)
  1975. {
  1976. return ktime_sub(ktime_get_real(), t);
  1977. }
  1978. static inline ktime_t net_invalid_timestamp(void)
  1979. {
  1980. return ktime_set(0, 0);
  1981. }
  1982. extern void skb_timestamping_init(void);
  1983. #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
  1984. extern void skb_clone_tx_timestamp(struct sk_buff *skb);
  1985. extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
  1986. #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
  1987. static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
  1988. {
  1989. }
  1990. static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
  1991. {
  1992. return false;
  1993. }
  1994. #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
  1995. /**
  1996. * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
  1997. *
  1998. * PHY drivers may accept clones of transmitted packets for
  1999. * timestamping via their phy_driver.txtstamp method. These drivers
  2000. * must call this function to return the skb back to the stack, with
  2001. * or without a timestamp.
  2002. *
  2003. * @skb: clone of the the original outgoing packet
  2004. * @hwtstamps: hardware time stamps, may be NULL if not available
  2005. *
  2006. */
  2007. void skb_complete_tx_timestamp(struct sk_buff *skb,
  2008. struct skb_shared_hwtstamps *hwtstamps);
  2009. /**
  2010. * skb_tstamp_tx - queue clone of skb with send time stamps
  2011. * @orig_skb: the original outgoing packet
  2012. * @hwtstamps: hardware time stamps, may be NULL if not available
  2013. *
  2014. * If the skb has a socket associated, then this function clones the
  2015. * skb (thus sharing the actual data and optional structures), stores
  2016. * the optional hardware time stamping information (if non NULL) or
  2017. * generates a software time stamp (otherwise), then queues the clone
  2018. * to the error queue of the socket. Errors are silently ignored.
  2019. */
  2020. extern void skb_tstamp_tx(struct sk_buff *orig_skb,
  2021. struct skb_shared_hwtstamps *hwtstamps);
  2022. static inline void sw_tx_timestamp(struct sk_buff *skb)
  2023. {
  2024. if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
  2025. !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
  2026. skb_tstamp_tx(skb, NULL);
  2027. }
  2028. /**
  2029. * skb_tx_timestamp() - Driver hook for transmit timestamping
  2030. *
  2031. * Ethernet MAC Drivers should call this function in their hard_xmit()
  2032. * function immediately before giving the sk_buff to the MAC hardware.
  2033. *
  2034. * @skb: A socket buffer.
  2035. */
  2036. static inline void skb_tx_timestamp(struct sk_buff *skb)
  2037. {
  2038. skb_clone_tx_timestamp(skb);
  2039. sw_tx_timestamp(skb);
  2040. }
  2041. /**
  2042. * skb_complete_wifi_ack - deliver skb with wifi status
  2043. *
  2044. * @skb: the original outgoing packet
  2045. * @acked: ack status
  2046. *
  2047. */
  2048. void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
  2049. extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
  2050. extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
  2051. static inline int skb_csum_unnecessary(const struct sk_buff *skb)
  2052. {
  2053. return skb->ip_summed & CHECKSUM_UNNECESSARY;
  2054. }
  2055. /**
  2056. * skb_checksum_complete - Calculate checksum of an entire packet
  2057. * @skb: packet to process
  2058. *
  2059. * This function calculates the checksum over the entire packet plus
  2060. * the value of skb->csum. The latter can be used to supply the
  2061. * checksum of a pseudo header as used by TCP/UDP. It returns the
  2062. * checksum.
  2063. *
  2064. * For protocols that contain complete checksums such as ICMP/TCP/UDP,
  2065. * this function can be used to verify that checksum on received
  2066. * packets. In that case the function should return zero if the
  2067. * checksum is correct. In particular, this function will return zero
  2068. * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
  2069. * hardware has already verified the correctness of the checksum.
  2070. */
  2071. static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
  2072. {
  2073. return skb_csum_unnecessary(skb) ?
  2074. 0 : __skb_checksum_complete(skb);
  2075. }
  2076. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  2077. extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
  2078. static inline void nf_conntrack_put(struct nf_conntrack *nfct)
  2079. {
  2080. if (nfct && atomic_dec_and_test(&nfct->use))
  2081. nf_conntrack_destroy(nfct);
  2082. }
  2083. static inline void nf_conntrack_get(struct nf_conntrack *nfct)
  2084. {
  2085. if (nfct)
  2086. atomic_inc(&nfct->use);
  2087. }
  2088. #endif
  2089. #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
  2090. static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
  2091. {
  2092. if (skb)
  2093. atomic_inc(&skb->users);
  2094. }
  2095. static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
  2096. {
  2097. if (skb)
  2098. kfree_skb(skb);
  2099. }
  2100. #endif
  2101. #ifdef CONFIG_BRIDGE_NETFILTER
  2102. static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
  2103. {
  2104. if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
  2105. kfree(nf_bridge);
  2106. }
  2107. static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
  2108. {
  2109. if (nf_bridge)
  2110. atomic_inc(&nf_bridge->use);
  2111. }
  2112. #endif /* CONFIG_BRIDGE_NETFILTER */
  2113. static inline void nf_reset(struct sk_buff *skb)
  2114. {
  2115. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  2116. nf_conntrack_put(skb->nfct);
  2117. skb->nfct = NULL;
  2118. #endif
  2119. #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
  2120. nf_conntrack_put_reasm(skb->nfct_reasm);
  2121. skb->nfct_reasm = NULL;
  2122. #endif
  2123. #ifdef CONFIG_BRIDGE_NETFILTER
  2124. nf_bridge_put(skb->nf_bridge);
  2125. skb->nf_bridge = NULL;
  2126. #endif
  2127. }
  2128. /* Note: This doesn't put any conntrack and bridge info in dst. */
  2129. static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
  2130. {
  2131. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  2132. dst->nfct = src->nfct;
  2133. nf_conntrack_get(src->nfct);
  2134. dst->nfctinfo = src->nfctinfo;
  2135. #endif
  2136. #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
  2137. dst->nfct_reasm = src->nfct_reasm;
  2138. nf_conntrack_get_reasm(src->nfct_reasm);
  2139. #endif
  2140. #ifdef CONFIG_BRIDGE_NETFILTER
  2141. dst->nf_bridge = src->nf_bridge;
  2142. nf_bridge_get(src->nf_bridge);
  2143. #endif
  2144. }
  2145. static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
  2146. {
  2147. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  2148. nf_conntrack_put(dst->nfct);
  2149. #endif
  2150. #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
  2151. nf_conntrack_put_reasm(dst->nfct_reasm);
  2152. #endif
  2153. #ifdef CONFIG_BRIDGE_NETFILTER
  2154. nf_bridge_put(dst->nf_bridge);
  2155. #endif
  2156. __nf_copy(dst, src);
  2157. }
  2158. #ifdef CONFIG_NETWORK_SECMARK
  2159. static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
  2160. {
  2161. to->secmark = from->secmark;
  2162. }
  2163. static inline void skb_init_secmark(struct sk_buff *skb)
  2164. {
  2165. skb->secmark = 0;
  2166. }
  2167. #else
  2168. static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
  2169. { }
  2170. static inline void skb_init_secmark(struct sk_buff *skb)
  2171. { }
  2172. #endif
  2173. static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
  2174. {
  2175. skb->queue_mapping = queue_mapping;
  2176. }
  2177. static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
  2178. {
  2179. return skb->queue_mapping;
  2180. }
  2181. static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
  2182. {
  2183. to->queue_mapping = from->queue_mapping;
  2184. }
  2185. static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
  2186. {
  2187. skb->queue_mapping = rx_queue + 1;
  2188. }
  2189. static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
  2190. {
  2191. return skb->queue_mapping - 1;
  2192. }
  2193. static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
  2194. {
  2195. return skb->queue_mapping != 0;
  2196. }
  2197. extern u16 __skb_tx_hash(const struct net_device *dev,
  2198. const struct sk_buff *skb,
  2199. unsigned int num_tx_queues);
  2200. #ifdef CONFIG_XFRM
  2201. static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
  2202. {
  2203. return skb->sp;
  2204. }
  2205. #else
  2206. static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
  2207. {
  2208. return NULL;
  2209. }
  2210. #endif
  2211. static inline bool skb_is_gso(const struct sk_buff *skb)
  2212. {
  2213. return skb_shinfo(skb)->gso_size;
  2214. }
  2215. static inline bool skb_is_gso_v6(const struct sk_buff *skb)
  2216. {
  2217. return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
  2218. }
  2219. extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
  2220. static inline bool skb_warn_if_lro(const struct sk_buff *skb)
  2221. {
  2222. /* LRO sets gso_size but not gso_type, whereas if GSO is really
  2223. * wanted then gso_type will be set. */
  2224. const struct skb_shared_info *shinfo = skb_shinfo(skb);
  2225. if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
  2226. unlikely(shinfo->gso_type == 0)) {
  2227. __skb_warn_lro_forwarding(skb);
  2228. return true;
  2229. }
  2230. return false;
  2231. }
  2232. static inline void skb_forward_csum(struct sk_buff *skb)
  2233. {
  2234. /* Unfortunately we don't support this one. Any brave souls? */
  2235. if (skb->ip_summed == CHECKSUM_COMPLETE)
  2236. skb->ip_summed = CHECKSUM_NONE;
  2237. }
  2238. /**
  2239. * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
  2240. * @skb: skb to check
  2241. *
  2242. * fresh skbs have their ip_summed set to CHECKSUM_NONE.
  2243. * Instead of forcing ip_summed to CHECKSUM_NONE, we can
  2244. * use this helper, to document places where we make this assertion.
  2245. */
  2246. static inline void skb_checksum_none_assert(const struct sk_buff *skb)
  2247. {
  2248. #ifdef DEBUG
  2249. BUG_ON(skb->ip_summed != CHECKSUM_NONE);
  2250. #endif
  2251. }
  2252. bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
  2253. static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
  2254. {
  2255. if (irqs_disabled())
  2256. return false;
  2257. if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
  2258. return false;
  2259. if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
  2260. return false;
  2261. skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
  2262. if (skb_end_offset(skb) < skb_size)
  2263. return false;
  2264. if (skb_shared(skb) || skb_cloned(skb))
  2265. return false;
  2266. return true;
  2267. }
  2268. /**
  2269. * skb_head_is_locked - Determine if the skb->head is locked down
  2270. * @skb: skb to check
  2271. *
  2272. * The head on skbs build around a head frag can be removed if they are
  2273. * not cloned. This function returns true if the skb head is locked down
  2274. * due to either being allocated via kmalloc, or by being a clone with
  2275. * multiple references to the head.
  2276. */
  2277. static inline bool skb_head_is_locked(const struct sk_buff *skb)
  2278. {
  2279. return !skb->head_frag || skb_cloned(skb);
  2280. }
  2281. #endif /* __KERNEL__ */
  2282. #endif /* _LINUX_SKBUFF_H */