skbuff.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247
  1. /*
  2. * Routines having to do with the 'struct sk_buff' memory handlers.
  3. *
  4. * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
  5. * Florian La Roche <rzsfl@rz.uni-sb.de>
  6. *
  7. * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
  8. *
  9. * Fixes:
  10. * Alan Cox : Fixed the worst of the load
  11. * balancer bugs.
  12. * Dave Platt : Interrupt stacking fix.
  13. * Richard Kooijman : Timestamp fixes.
  14. * Alan Cox : Changed buffer format.
  15. * Alan Cox : destructor hook for AF_UNIX etc.
  16. * Linus Torvalds : Better skb_clone.
  17. * Alan Cox : Added skb_copy.
  18. * Alan Cox : Added all the changed routines Linus
  19. * only put in the headers
  20. * Ray VanTassle : Fixed --skb->lock in free
  21. * Alan Cox : skb_copy copy arp field
  22. * Andi Kleen : slabified it.
  23. * Robert Olsson : Removed skb_head_pool
  24. *
  25. * NOTE:
  26. * The __skb_ routines should be called with interrupts
  27. * disabled, or you better be *real* sure that the operation is atomic
  28. * with respect to whatever list is being frobbed (e.g. via lock_sock()
  29. * or via disabling bottom half handlers, etc).
  30. *
  31. * This program is free software; you can redistribute it and/or
  32. * modify it under the terms of the GNU General Public License
  33. * as published by the Free Software Foundation; either version
  34. * 2 of the License, or (at your option) any later version.
  35. */
  36. /*
  37. * The functions in this file will not compile correctly with gcc 2.4.x
  38. */
  39. #include <linux/module.h>
  40. #include <linux/types.h>
  41. #include <linux/kernel.h>
  42. #include <linux/mm.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/in.h>
  45. #include <linux/inet.h>
  46. #include <linux/slab.h>
  47. #include <linux/netdevice.h>
  48. #ifdef CONFIG_NET_CLS_ACT
  49. #include <net/pkt_sched.h>
  50. #endif
  51. #include <linux/string.h>
  52. #include <linux/skbuff.h>
  53. #include <linux/cache.h>
  54. #include <linux/rtnetlink.h>
  55. #include <linux/init.h>
  56. #include <linux/scatterlist.h>
  57. #include <net/protocol.h>
  58. #include <net/dst.h>
  59. #include <net/sock.h>
  60. #include <net/checksum.h>
  61. #include <net/xfrm.h>
  62. #include <asm/uaccess.h>
  63. #include <asm/system.h>
  64. #include "kmap_skb.h"
  65. static struct kmem_cache *skbuff_head_cache __read_mostly;
  66. static struct kmem_cache *skbuff_fclone_cache __read_mostly;
  67. /*
  68. * Keep out-of-line to prevent kernel bloat.
  69. * __builtin_return_address is not used because it is not always
  70. * reliable.
  71. */
  72. /**
  73. * skb_over_panic - private function
  74. * @skb: buffer
  75. * @sz: size
  76. * @here: address
  77. *
  78. * Out of line support code for skb_put(). Not user callable.
  79. */
  80. void skb_over_panic(struct sk_buff *skb, int sz, void *here)
  81. {
  82. printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
  83. "data:%p tail:%#lx end:%#lx dev:%s\n",
  84. here, skb->len, sz, skb->head, skb->data,
  85. (unsigned long)skb->tail, (unsigned long)skb->end,
  86. skb->dev ? skb->dev->name : "<NULL>");
  87. BUG();
  88. }
  89. /**
  90. * skb_under_panic - private function
  91. * @skb: buffer
  92. * @sz: size
  93. * @here: address
  94. *
  95. * Out of line support code for skb_push(). Not user callable.
  96. */
  97. void skb_under_panic(struct sk_buff *skb, int sz, void *here)
  98. {
  99. printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
  100. "data:%p tail:%#lx end:%#lx dev:%s\n",
  101. here, skb->len, sz, skb->head, skb->data,
  102. (unsigned long)skb->tail, (unsigned long)skb->end,
  103. skb->dev ? skb->dev->name : "<NULL>");
  104. BUG();
  105. }
  106. void skb_truesize_bug(struct sk_buff *skb)
  107. {
  108. printk(KERN_ERR "SKB BUG: Invalid truesize (%u) "
  109. "len=%u, sizeof(sk_buff)=%Zd\n",
  110. skb->truesize, skb->len, sizeof(struct sk_buff));
  111. }
  112. EXPORT_SYMBOL(skb_truesize_bug);
  113. /* Allocate a new skbuff. We do this ourselves so we can fill in a few
  114. * 'private' fields and also do memory statistics to find all the
  115. * [BEEP] leaks.
  116. *
  117. */
  118. /**
  119. * __alloc_skb - allocate a network buffer
  120. * @size: size to allocate
  121. * @gfp_mask: allocation mask
  122. * @fclone: allocate from fclone cache instead of head cache
  123. * and allocate a cloned (child) skb
  124. * @node: numa node to allocate memory on
  125. *
  126. * Allocate a new &sk_buff. The returned buffer has no headroom and a
  127. * tail room of size bytes. The object has a reference count of one.
  128. * The return is the buffer. On a failure the return is %NULL.
  129. *
  130. * Buffers may only be allocated from interrupts using a @gfp_mask of
  131. * %GFP_ATOMIC.
  132. */
  133. struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
  134. int fclone, int node)
  135. {
  136. struct kmem_cache *cache;
  137. struct skb_shared_info *shinfo;
  138. struct sk_buff *skb;
  139. u8 *data;
  140. cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
  141. /* Get the HEAD */
  142. skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
  143. if (!skb)
  144. goto out;
  145. size = SKB_DATA_ALIGN(size);
  146. data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
  147. gfp_mask, node);
  148. if (!data)
  149. goto nodata;
  150. /*
  151. * See comment in sk_buff definition, just before the 'tail' member
  152. */
  153. memset(skb, 0, offsetof(struct sk_buff, tail));
  154. skb->truesize = size + sizeof(struct sk_buff);
  155. atomic_set(&skb->users, 1);
  156. skb->head = data;
  157. skb->data = data;
  158. skb_reset_tail_pointer(skb);
  159. skb->end = skb->tail + size;
  160. /* make sure we initialize shinfo sequentially */
  161. shinfo = skb_shinfo(skb);
  162. atomic_set(&shinfo->dataref, 1);
  163. shinfo->nr_frags = 0;
  164. shinfo->gso_size = 0;
  165. shinfo->gso_segs = 0;
  166. shinfo->gso_type = 0;
  167. shinfo->ip6_frag_id = 0;
  168. shinfo->frag_list = NULL;
  169. if (fclone) {
  170. struct sk_buff *child = skb + 1;
  171. atomic_t *fclone_ref = (atomic_t *) (child + 1);
  172. skb->fclone = SKB_FCLONE_ORIG;
  173. atomic_set(fclone_ref, 1);
  174. child->fclone = SKB_FCLONE_UNAVAILABLE;
  175. }
  176. out:
  177. return skb;
  178. nodata:
  179. kmem_cache_free(cache, skb);
  180. skb = NULL;
  181. goto out;
  182. }
  183. /**
  184. * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
  185. * @dev: network device to receive on
  186. * @length: length to allocate
  187. * @gfp_mask: get_free_pages mask, passed to alloc_skb
  188. *
  189. * Allocate a new &sk_buff and assign it a usage count of one. The
  190. * buffer has unspecified headroom built in. Users should allocate
  191. * the headroom they think they need without accounting for the
  192. * built in space. The built in space is used for optimisations.
  193. *
  194. * %NULL is returned if there is no free memory.
  195. */
  196. struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
  197. unsigned int length, gfp_t gfp_mask)
  198. {
  199. int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
  200. struct sk_buff *skb;
  201. skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
  202. if (likely(skb)) {
  203. skb_reserve(skb, NET_SKB_PAD);
  204. skb->dev = dev;
  205. }
  206. return skb;
  207. }
  208. static void skb_drop_list(struct sk_buff **listp)
  209. {
  210. struct sk_buff *list = *listp;
  211. *listp = NULL;
  212. do {
  213. struct sk_buff *this = list;
  214. list = list->next;
  215. kfree_skb(this);
  216. } while (list);
  217. }
  218. static inline void skb_drop_fraglist(struct sk_buff *skb)
  219. {
  220. skb_drop_list(&skb_shinfo(skb)->frag_list);
  221. }
  222. static void skb_clone_fraglist(struct sk_buff *skb)
  223. {
  224. struct sk_buff *list;
  225. for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
  226. skb_get(list);
  227. }
  228. static void skb_release_data(struct sk_buff *skb)
  229. {
  230. if (!skb->cloned ||
  231. !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
  232. &skb_shinfo(skb)->dataref)) {
  233. if (skb_shinfo(skb)->nr_frags) {
  234. int i;
  235. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
  236. put_page(skb_shinfo(skb)->frags[i].page);
  237. }
  238. if (skb_shinfo(skb)->frag_list)
  239. skb_drop_fraglist(skb);
  240. kfree(skb->head);
  241. }
  242. }
  243. /*
  244. * Free an skbuff by memory without cleaning the state.
  245. */
  246. void kfree_skbmem(struct sk_buff *skb)
  247. {
  248. struct sk_buff *other;
  249. atomic_t *fclone_ref;
  250. skb_release_data(skb);
  251. switch (skb->fclone) {
  252. case SKB_FCLONE_UNAVAILABLE:
  253. kmem_cache_free(skbuff_head_cache, skb);
  254. break;
  255. case SKB_FCLONE_ORIG:
  256. fclone_ref = (atomic_t *) (skb + 2);
  257. if (atomic_dec_and_test(fclone_ref))
  258. kmem_cache_free(skbuff_fclone_cache, skb);
  259. break;
  260. case SKB_FCLONE_CLONE:
  261. fclone_ref = (atomic_t *) (skb + 1);
  262. other = skb - 1;
  263. /* The clone portion is available for
  264. * fast-cloning again.
  265. */
  266. skb->fclone = SKB_FCLONE_UNAVAILABLE;
  267. if (atomic_dec_and_test(fclone_ref))
  268. kmem_cache_free(skbuff_fclone_cache, other);
  269. break;
  270. }
  271. }
  272. /**
  273. * __kfree_skb - private function
  274. * @skb: buffer
  275. *
  276. * Free an sk_buff. Release anything attached to the buffer.
  277. * Clean the state. This is an internal helper function. Users should
  278. * always call kfree_skb
  279. */
  280. void __kfree_skb(struct sk_buff *skb)
  281. {
  282. dst_release(skb->dst);
  283. #ifdef CONFIG_XFRM
  284. secpath_put(skb->sp);
  285. #endif
  286. if (skb->destructor) {
  287. WARN_ON(in_irq());
  288. skb->destructor(skb);
  289. }
  290. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  291. nf_conntrack_put(skb->nfct);
  292. nf_conntrack_put_reasm(skb->nfct_reasm);
  293. #endif
  294. #ifdef CONFIG_BRIDGE_NETFILTER
  295. nf_bridge_put(skb->nf_bridge);
  296. #endif
  297. /* XXX: IS this still necessary? - JHS */
  298. #ifdef CONFIG_NET_SCHED
  299. skb->tc_index = 0;
  300. #ifdef CONFIG_NET_CLS_ACT
  301. skb->tc_verd = 0;
  302. #endif
  303. #endif
  304. kfree_skbmem(skb);
  305. }
  306. /**
  307. * kfree_skb - free an sk_buff
  308. * @skb: buffer to free
  309. *
  310. * Drop a reference to the buffer and free it if the usage count has
  311. * hit zero.
  312. */
  313. void kfree_skb(struct sk_buff *skb)
  314. {
  315. if (unlikely(!skb))
  316. return;
  317. if (likely(atomic_read(&skb->users) == 1))
  318. smp_rmb();
  319. else if (likely(!atomic_dec_and_test(&skb->users)))
  320. return;
  321. __kfree_skb(skb);
  322. }
  323. static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
  324. {
  325. new->tstamp = old->tstamp;
  326. new->dev = old->dev;
  327. new->transport_header = old->transport_header;
  328. new->network_header = old->network_header;
  329. new->mac_header = old->mac_header;
  330. new->dst = dst_clone(old->dst);
  331. #ifdef CONFIG_INET
  332. new->sp = secpath_get(old->sp);
  333. #endif
  334. memcpy(new->cb, old->cb, sizeof(old->cb));
  335. new->csum_start = old->csum_start;
  336. new->csum_offset = old->csum_offset;
  337. new->local_df = old->local_df;
  338. new->pkt_type = old->pkt_type;
  339. new->ip_summed = old->ip_summed;
  340. skb_copy_queue_mapping(new, old);
  341. new->priority = old->priority;
  342. #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
  343. new->ipvs_property = old->ipvs_property;
  344. #endif
  345. new->protocol = old->protocol;
  346. new->mark = old->mark;
  347. __nf_copy(new, old);
  348. #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
  349. defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
  350. new->nf_trace = old->nf_trace;
  351. #endif
  352. #ifdef CONFIG_NET_SCHED
  353. new->tc_index = old->tc_index;
  354. #ifdef CONFIG_NET_CLS_ACT
  355. new->tc_verd = old->tc_verd;
  356. #endif
  357. #endif
  358. skb_copy_secmark(new, old);
  359. }
  360. static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
  361. {
  362. #define C(x) n->x = skb->x
  363. n->next = n->prev = NULL;
  364. n->sk = NULL;
  365. __copy_skb_header(n, skb);
  366. C(len);
  367. C(data_len);
  368. C(mac_len);
  369. n->cloned = 1;
  370. n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
  371. n->nohdr = 0;
  372. n->destructor = NULL;
  373. #ifdef CONFIG_NET_CLS_ACT
  374. /* FIXME What is this and why don't we do it in copy_skb_header? */
  375. n->tc_verd = SET_TC_VERD(n->tc_verd,0);
  376. n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
  377. n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
  378. C(iif);
  379. #endif
  380. C(truesize);
  381. atomic_set(&n->users, 1);
  382. C(head);
  383. C(data);
  384. C(tail);
  385. C(end);
  386. atomic_inc(&(skb_shinfo(skb)->dataref));
  387. skb->cloned = 1;
  388. return n;
  389. #undef C
  390. }
  391. /**
  392. * skb_morph - morph one skb into another
  393. * @dst: the skb to receive the contents
  394. * @src: the skb to supply the contents
  395. *
  396. * This is identical to skb_clone except that the target skb is
  397. * supplied by the user.
  398. *
  399. * The target skb is returned upon exit.
  400. */
  401. struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
  402. {
  403. skb_release_data(dst);
  404. return __skb_clone(dst, src);
  405. }
  406. EXPORT_SYMBOL_GPL(skb_morph);
  407. /**
  408. * skb_clone - duplicate an sk_buff
  409. * @skb: buffer to clone
  410. * @gfp_mask: allocation priority
  411. *
  412. * Duplicate an &sk_buff. The new one is not owned by a socket. Both
  413. * copies share the same packet data but not structure. The new
  414. * buffer has a reference count of 1. If the allocation fails the
  415. * function returns %NULL otherwise the new buffer is returned.
  416. *
  417. * If this function is called from an interrupt gfp_mask() must be
  418. * %GFP_ATOMIC.
  419. */
  420. struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
  421. {
  422. struct sk_buff *n;
  423. n = skb + 1;
  424. if (skb->fclone == SKB_FCLONE_ORIG &&
  425. n->fclone == SKB_FCLONE_UNAVAILABLE) {
  426. atomic_t *fclone_ref = (atomic_t *) (n + 1);
  427. n->fclone = SKB_FCLONE_CLONE;
  428. atomic_inc(fclone_ref);
  429. } else {
  430. n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
  431. if (!n)
  432. return NULL;
  433. n->fclone = SKB_FCLONE_UNAVAILABLE;
  434. }
  435. return __skb_clone(n, skb);
  436. }
  437. static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
  438. {
  439. #ifndef NET_SKBUFF_DATA_USES_OFFSET
  440. /*
  441. * Shift between the two data areas in bytes
  442. */
  443. unsigned long offset = new->data - old->data;
  444. #endif
  445. __copy_skb_header(new, old);
  446. #ifndef NET_SKBUFF_DATA_USES_OFFSET
  447. /* {transport,network,mac}_header are relative to skb->head */
  448. new->transport_header += offset;
  449. new->network_header += offset;
  450. new->mac_header += offset;
  451. #endif
  452. skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
  453. skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
  454. skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
  455. }
  456. /**
  457. * skb_copy - create private copy of an sk_buff
  458. * @skb: buffer to copy
  459. * @gfp_mask: allocation priority
  460. *
  461. * Make a copy of both an &sk_buff and its data. This is used when the
  462. * caller wishes to modify the data and needs a private copy of the
  463. * data to alter. Returns %NULL on failure or the pointer to the buffer
  464. * on success. The returned buffer has a reference count of 1.
  465. *
  466. * As by-product this function converts non-linear &sk_buff to linear
  467. * one, so that &sk_buff becomes completely private and caller is allowed
  468. * to modify all the data of returned buffer. This means that this
  469. * function is not recommended for use in circumstances when only
  470. * header is going to be modified. Use pskb_copy() instead.
  471. */
  472. struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
  473. {
  474. int headerlen = skb->data - skb->head;
  475. /*
  476. * Allocate the copy buffer
  477. */
  478. struct sk_buff *n;
  479. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  480. n = alloc_skb(skb->end + skb->data_len, gfp_mask);
  481. #else
  482. n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
  483. #endif
  484. if (!n)
  485. return NULL;
  486. /* Set the data pointer */
  487. skb_reserve(n, headerlen);
  488. /* Set the tail pointer and length */
  489. skb_put(n, skb->len);
  490. if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
  491. BUG();
  492. copy_skb_header(n, skb);
  493. return n;
  494. }
  495. /**
  496. * pskb_copy - create copy of an sk_buff with private head.
  497. * @skb: buffer to copy
  498. * @gfp_mask: allocation priority
  499. *
  500. * Make a copy of both an &sk_buff and part of its data, located
  501. * in header. Fragmented data remain shared. This is used when
  502. * the caller wishes to modify only header of &sk_buff and needs
  503. * private copy of the header to alter. Returns %NULL on failure
  504. * or the pointer to the buffer on success.
  505. * The returned buffer has a reference count of 1.
  506. */
  507. struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
  508. {
  509. /*
  510. * Allocate the copy buffer
  511. */
  512. struct sk_buff *n;
  513. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  514. n = alloc_skb(skb->end, gfp_mask);
  515. #else
  516. n = alloc_skb(skb->end - skb->head, gfp_mask);
  517. #endif
  518. if (!n)
  519. goto out;
  520. /* Set the data pointer */
  521. skb_reserve(n, skb->data - skb->head);
  522. /* Set the tail pointer and length */
  523. skb_put(n, skb_headlen(skb));
  524. /* Copy the bytes */
  525. skb_copy_from_linear_data(skb, n->data, n->len);
  526. n->truesize += skb->data_len;
  527. n->data_len = skb->data_len;
  528. n->len = skb->len;
  529. if (skb_shinfo(skb)->nr_frags) {
  530. int i;
  531. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  532. skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
  533. get_page(skb_shinfo(n)->frags[i].page);
  534. }
  535. skb_shinfo(n)->nr_frags = i;
  536. }
  537. if (skb_shinfo(skb)->frag_list) {
  538. skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
  539. skb_clone_fraglist(n);
  540. }
  541. copy_skb_header(n, skb);
  542. out:
  543. return n;
  544. }
  545. /**
  546. * pskb_expand_head - reallocate header of &sk_buff
  547. * @skb: buffer to reallocate
  548. * @nhead: room to add at head
  549. * @ntail: room to add at tail
  550. * @gfp_mask: allocation priority
  551. *
  552. * Expands (or creates identical copy, if &nhead and &ntail are zero)
  553. * header of skb. &sk_buff itself is not changed. &sk_buff MUST have
  554. * reference count of 1. Returns zero in the case of success or error,
  555. * if expansion failed. In the last case, &sk_buff is not changed.
  556. *
  557. * All the pointers pointing into skb header may change and must be
  558. * reloaded after call to this function.
  559. */
  560. int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
  561. gfp_t gfp_mask)
  562. {
  563. int i;
  564. u8 *data;
  565. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  566. int size = nhead + skb->end + ntail;
  567. #else
  568. int size = nhead + (skb->end - skb->head) + ntail;
  569. #endif
  570. long off;
  571. if (skb_shared(skb))
  572. BUG();
  573. size = SKB_DATA_ALIGN(size);
  574. data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
  575. if (!data)
  576. goto nodata;
  577. /* Copy only real data... and, alas, header. This should be
  578. * optimized for the cases when header is void. */
  579. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  580. memcpy(data + nhead, skb->head, skb->tail);
  581. #else
  582. memcpy(data + nhead, skb->head, skb->tail - skb->head);
  583. #endif
  584. memcpy(data + size, skb_end_pointer(skb),
  585. sizeof(struct skb_shared_info));
  586. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
  587. get_page(skb_shinfo(skb)->frags[i].page);
  588. if (skb_shinfo(skb)->frag_list)
  589. skb_clone_fraglist(skb);
  590. skb_release_data(skb);
  591. off = (data + nhead) - skb->head;
  592. skb->head = data;
  593. skb->data += off;
  594. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  595. skb->end = size;
  596. off = nhead;
  597. #else
  598. skb->end = skb->head + size;
  599. #endif
  600. /* {transport,network,mac}_header and tail are relative to skb->head */
  601. skb->tail += off;
  602. skb->transport_header += off;
  603. skb->network_header += off;
  604. skb->mac_header += off;
  605. skb->csum_start += nhead;
  606. skb->cloned = 0;
  607. skb->hdr_len = 0;
  608. skb->nohdr = 0;
  609. atomic_set(&skb_shinfo(skb)->dataref, 1);
  610. return 0;
  611. nodata:
  612. return -ENOMEM;
  613. }
  614. /* Make private copy of skb with writable head and some headroom */
  615. struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
  616. {
  617. struct sk_buff *skb2;
  618. int delta = headroom - skb_headroom(skb);
  619. if (delta <= 0)
  620. skb2 = pskb_copy(skb, GFP_ATOMIC);
  621. else {
  622. skb2 = skb_clone(skb, GFP_ATOMIC);
  623. if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
  624. GFP_ATOMIC)) {
  625. kfree_skb(skb2);
  626. skb2 = NULL;
  627. }
  628. }
  629. return skb2;
  630. }
  631. /**
  632. * skb_copy_expand - copy and expand sk_buff
  633. * @skb: buffer to copy
  634. * @newheadroom: new free bytes at head
  635. * @newtailroom: new free bytes at tail
  636. * @gfp_mask: allocation priority
  637. *
  638. * Make a copy of both an &sk_buff and its data and while doing so
  639. * allocate additional space.
  640. *
  641. * This is used when the caller wishes to modify the data and needs a
  642. * private copy of the data to alter as well as more space for new fields.
  643. * Returns %NULL on failure or the pointer to the buffer
  644. * on success. The returned buffer has a reference count of 1.
  645. *
  646. * You must pass %GFP_ATOMIC as the allocation priority if this function
  647. * is called from an interrupt.
  648. */
  649. struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
  650. int newheadroom, int newtailroom,
  651. gfp_t gfp_mask)
  652. {
  653. /*
  654. * Allocate the copy buffer
  655. */
  656. struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
  657. gfp_mask);
  658. int oldheadroom = skb_headroom(skb);
  659. int head_copy_len, head_copy_off;
  660. int off;
  661. if (!n)
  662. return NULL;
  663. skb_reserve(n, newheadroom);
  664. /* Set the tail pointer and length */
  665. skb_put(n, skb->len);
  666. head_copy_len = oldheadroom;
  667. head_copy_off = 0;
  668. if (newheadroom <= head_copy_len)
  669. head_copy_len = newheadroom;
  670. else
  671. head_copy_off = newheadroom - head_copy_len;
  672. /* Copy the linear header and data. */
  673. if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
  674. skb->len + head_copy_len))
  675. BUG();
  676. copy_skb_header(n, skb);
  677. off = newheadroom - oldheadroom;
  678. n->csum_start += off;
  679. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  680. n->transport_header += off;
  681. n->network_header += off;
  682. n->mac_header += off;
  683. #endif
  684. return n;
  685. }
  686. /**
  687. * skb_pad - zero pad the tail of an skb
  688. * @skb: buffer to pad
  689. * @pad: space to pad
  690. *
  691. * Ensure that a buffer is followed by a padding area that is zero
  692. * filled. Used by network drivers which may DMA or transfer data
  693. * beyond the buffer end onto the wire.
  694. *
  695. * May return error in out of memory cases. The skb is freed on error.
  696. */
  697. int skb_pad(struct sk_buff *skb, int pad)
  698. {
  699. int err;
  700. int ntail;
  701. /* If the skbuff is non linear tailroom is always zero.. */
  702. if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
  703. memset(skb->data+skb->len, 0, pad);
  704. return 0;
  705. }
  706. ntail = skb->data_len + pad - (skb->end - skb->tail);
  707. if (likely(skb_cloned(skb) || ntail > 0)) {
  708. err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
  709. if (unlikely(err))
  710. goto free_skb;
  711. }
  712. /* FIXME: The use of this function with non-linear skb's really needs
  713. * to be audited.
  714. */
  715. err = skb_linearize(skb);
  716. if (unlikely(err))
  717. goto free_skb;
  718. memset(skb->data + skb->len, 0, pad);
  719. return 0;
  720. free_skb:
  721. kfree_skb(skb);
  722. return err;
  723. }
  724. /* Trims skb to length len. It can change skb pointers.
  725. */
  726. int ___pskb_trim(struct sk_buff *skb, unsigned int len)
  727. {
  728. struct sk_buff **fragp;
  729. struct sk_buff *frag;
  730. int offset = skb_headlen(skb);
  731. int nfrags = skb_shinfo(skb)->nr_frags;
  732. int i;
  733. int err;
  734. if (skb_cloned(skb) &&
  735. unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
  736. return err;
  737. i = 0;
  738. if (offset >= len)
  739. goto drop_pages;
  740. for (; i < nfrags; i++) {
  741. int end = offset + skb_shinfo(skb)->frags[i].size;
  742. if (end < len) {
  743. offset = end;
  744. continue;
  745. }
  746. skb_shinfo(skb)->frags[i++].size = len - offset;
  747. drop_pages:
  748. skb_shinfo(skb)->nr_frags = i;
  749. for (; i < nfrags; i++)
  750. put_page(skb_shinfo(skb)->frags[i].page);
  751. if (skb_shinfo(skb)->frag_list)
  752. skb_drop_fraglist(skb);
  753. goto done;
  754. }
  755. for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
  756. fragp = &frag->next) {
  757. int end = offset + frag->len;
  758. if (skb_shared(frag)) {
  759. struct sk_buff *nfrag;
  760. nfrag = skb_clone(frag, GFP_ATOMIC);
  761. if (unlikely(!nfrag))
  762. return -ENOMEM;
  763. nfrag->next = frag->next;
  764. kfree_skb(frag);
  765. frag = nfrag;
  766. *fragp = frag;
  767. }
  768. if (end < len) {
  769. offset = end;
  770. continue;
  771. }
  772. if (end > len &&
  773. unlikely((err = pskb_trim(frag, len - offset))))
  774. return err;
  775. if (frag->next)
  776. skb_drop_list(&frag->next);
  777. break;
  778. }
  779. done:
  780. if (len > skb_headlen(skb)) {
  781. skb->data_len -= skb->len - len;
  782. skb->len = len;
  783. } else {
  784. skb->len = len;
  785. skb->data_len = 0;
  786. skb_set_tail_pointer(skb, len);
  787. }
  788. return 0;
  789. }
  790. /**
  791. * __pskb_pull_tail - advance tail of skb header
  792. * @skb: buffer to reallocate
  793. * @delta: number of bytes to advance tail
  794. *
  795. * The function makes a sense only on a fragmented &sk_buff,
  796. * it expands header moving its tail forward and copying necessary
  797. * data from fragmented part.
  798. *
  799. * &sk_buff MUST have reference count of 1.
  800. *
  801. * Returns %NULL (and &sk_buff does not change) if pull failed
  802. * or value of new tail of skb in the case of success.
  803. *
  804. * All the pointers pointing into skb header may change and must be
  805. * reloaded after call to this function.
  806. */
  807. /* Moves tail of skb head forward, copying data from fragmented part,
  808. * when it is necessary.
  809. * 1. It may fail due to malloc failure.
  810. * 2. It may change skb pointers.
  811. *
  812. * It is pretty complicated. Luckily, it is called only in exceptional cases.
  813. */
  814. unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
  815. {
  816. /* If skb has not enough free space at tail, get new one
  817. * plus 128 bytes for future expansions. If we have enough
  818. * room at tail, reallocate without expansion only if skb is cloned.
  819. */
  820. int i, k, eat = (skb->tail + delta) - skb->end;
  821. if (eat > 0 || skb_cloned(skb)) {
  822. if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
  823. GFP_ATOMIC))
  824. return NULL;
  825. }
  826. if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
  827. BUG();
  828. /* Optimization: no fragments, no reasons to preestimate
  829. * size of pulled pages. Superb.
  830. */
  831. if (!skb_shinfo(skb)->frag_list)
  832. goto pull_pages;
  833. /* Estimate size of pulled pages. */
  834. eat = delta;
  835. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  836. if (skb_shinfo(skb)->frags[i].size >= eat)
  837. goto pull_pages;
  838. eat -= skb_shinfo(skb)->frags[i].size;
  839. }
  840. /* If we need update frag list, we are in troubles.
  841. * Certainly, it possible to add an offset to skb data,
  842. * but taking into account that pulling is expected to
  843. * be very rare operation, it is worth to fight against
  844. * further bloating skb head and crucify ourselves here instead.
  845. * Pure masohism, indeed. 8)8)
  846. */
  847. if (eat) {
  848. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  849. struct sk_buff *clone = NULL;
  850. struct sk_buff *insp = NULL;
  851. do {
  852. BUG_ON(!list);
  853. if (list->len <= eat) {
  854. /* Eaten as whole. */
  855. eat -= list->len;
  856. list = list->next;
  857. insp = list;
  858. } else {
  859. /* Eaten partially. */
  860. if (skb_shared(list)) {
  861. /* Sucks! We need to fork list. :-( */
  862. clone = skb_clone(list, GFP_ATOMIC);
  863. if (!clone)
  864. return NULL;
  865. insp = list->next;
  866. list = clone;
  867. } else {
  868. /* This may be pulled without
  869. * problems. */
  870. insp = list;
  871. }
  872. if (!pskb_pull(list, eat)) {
  873. if (clone)
  874. kfree_skb(clone);
  875. return NULL;
  876. }
  877. break;
  878. }
  879. } while (eat);
  880. /* Free pulled out fragments. */
  881. while ((list = skb_shinfo(skb)->frag_list) != insp) {
  882. skb_shinfo(skb)->frag_list = list->next;
  883. kfree_skb(list);
  884. }
  885. /* And insert new clone at head. */
  886. if (clone) {
  887. clone->next = list;
  888. skb_shinfo(skb)->frag_list = clone;
  889. }
  890. }
  891. /* Success! Now we may commit changes to skb data. */
  892. pull_pages:
  893. eat = delta;
  894. k = 0;
  895. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  896. if (skb_shinfo(skb)->frags[i].size <= eat) {
  897. put_page(skb_shinfo(skb)->frags[i].page);
  898. eat -= skb_shinfo(skb)->frags[i].size;
  899. } else {
  900. skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
  901. if (eat) {
  902. skb_shinfo(skb)->frags[k].page_offset += eat;
  903. skb_shinfo(skb)->frags[k].size -= eat;
  904. eat = 0;
  905. }
  906. k++;
  907. }
  908. }
  909. skb_shinfo(skb)->nr_frags = k;
  910. skb->tail += delta;
  911. skb->data_len -= delta;
  912. return skb_tail_pointer(skb);
  913. }
  914. /* Copy some data bits from skb to kernel buffer. */
  915. int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
  916. {
  917. int i, copy;
  918. int start = skb_headlen(skb);
  919. if (offset > (int)skb->len - len)
  920. goto fault;
  921. /* Copy header. */
  922. if ((copy = start - offset) > 0) {
  923. if (copy > len)
  924. copy = len;
  925. skb_copy_from_linear_data_offset(skb, offset, to, copy);
  926. if ((len -= copy) == 0)
  927. return 0;
  928. offset += copy;
  929. to += copy;
  930. }
  931. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  932. int end;
  933. BUG_TRAP(start <= offset + len);
  934. end = start + skb_shinfo(skb)->frags[i].size;
  935. if ((copy = end - offset) > 0) {
  936. u8 *vaddr;
  937. if (copy > len)
  938. copy = len;
  939. vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
  940. memcpy(to,
  941. vaddr + skb_shinfo(skb)->frags[i].page_offset+
  942. offset - start, copy);
  943. kunmap_skb_frag(vaddr);
  944. if ((len -= copy) == 0)
  945. return 0;
  946. offset += copy;
  947. to += copy;
  948. }
  949. start = end;
  950. }
  951. if (skb_shinfo(skb)->frag_list) {
  952. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  953. for (; list; list = list->next) {
  954. int end;
  955. BUG_TRAP(start <= offset + len);
  956. end = start + list->len;
  957. if ((copy = end - offset) > 0) {
  958. if (copy > len)
  959. copy = len;
  960. if (skb_copy_bits(list, offset - start,
  961. to, copy))
  962. goto fault;
  963. if ((len -= copy) == 0)
  964. return 0;
  965. offset += copy;
  966. to += copy;
  967. }
  968. start = end;
  969. }
  970. }
  971. if (!len)
  972. return 0;
  973. fault:
  974. return -EFAULT;
  975. }
  976. /**
  977. * skb_store_bits - store bits from kernel buffer to skb
  978. * @skb: destination buffer
  979. * @offset: offset in destination
  980. * @from: source buffer
  981. * @len: number of bytes to copy
  982. *
  983. * Copy the specified number of bytes from the source buffer to the
  984. * destination skb. This function handles all the messy bits of
  985. * traversing fragment lists and such.
  986. */
  987. int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
  988. {
  989. int i, copy;
  990. int start = skb_headlen(skb);
  991. if (offset > (int)skb->len - len)
  992. goto fault;
  993. if ((copy = start - offset) > 0) {
  994. if (copy > len)
  995. copy = len;
  996. skb_copy_to_linear_data_offset(skb, offset, from, copy);
  997. if ((len -= copy) == 0)
  998. return 0;
  999. offset += copy;
  1000. from += copy;
  1001. }
  1002. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1003. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1004. int end;
  1005. BUG_TRAP(start <= offset + len);
  1006. end = start + frag->size;
  1007. if ((copy = end - offset) > 0) {
  1008. u8 *vaddr;
  1009. if (copy > len)
  1010. copy = len;
  1011. vaddr = kmap_skb_frag(frag);
  1012. memcpy(vaddr + frag->page_offset + offset - start,
  1013. from, copy);
  1014. kunmap_skb_frag(vaddr);
  1015. if ((len -= copy) == 0)
  1016. return 0;
  1017. offset += copy;
  1018. from += copy;
  1019. }
  1020. start = end;
  1021. }
  1022. if (skb_shinfo(skb)->frag_list) {
  1023. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  1024. for (; list; list = list->next) {
  1025. int end;
  1026. BUG_TRAP(start <= offset + len);
  1027. end = start + list->len;
  1028. if ((copy = end - offset) > 0) {
  1029. if (copy > len)
  1030. copy = len;
  1031. if (skb_store_bits(list, offset - start,
  1032. from, copy))
  1033. goto fault;
  1034. if ((len -= copy) == 0)
  1035. return 0;
  1036. offset += copy;
  1037. from += copy;
  1038. }
  1039. start = end;
  1040. }
  1041. }
  1042. if (!len)
  1043. return 0;
  1044. fault:
  1045. return -EFAULT;
  1046. }
  1047. EXPORT_SYMBOL(skb_store_bits);
  1048. /* Checksum skb data. */
  1049. __wsum skb_checksum(const struct sk_buff *skb, int offset,
  1050. int len, __wsum csum)
  1051. {
  1052. int start = skb_headlen(skb);
  1053. int i, copy = start - offset;
  1054. int pos = 0;
  1055. /* Checksum header. */
  1056. if (copy > 0) {
  1057. if (copy > len)
  1058. copy = len;
  1059. csum = csum_partial(skb->data + offset, copy, csum);
  1060. if ((len -= copy) == 0)
  1061. return csum;
  1062. offset += copy;
  1063. pos = copy;
  1064. }
  1065. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1066. int end;
  1067. BUG_TRAP(start <= offset + len);
  1068. end = start + skb_shinfo(skb)->frags[i].size;
  1069. if ((copy = end - offset) > 0) {
  1070. __wsum csum2;
  1071. u8 *vaddr;
  1072. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1073. if (copy > len)
  1074. copy = len;
  1075. vaddr = kmap_skb_frag(frag);
  1076. csum2 = csum_partial(vaddr + frag->page_offset +
  1077. offset - start, copy, 0);
  1078. kunmap_skb_frag(vaddr);
  1079. csum = csum_block_add(csum, csum2, pos);
  1080. if (!(len -= copy))
  1081. return csum;
  1082. offset += copy;
  1083. pos += copy;
  1084. }
  1085. start = end;
  1086. }
  1087. if (skb_shinfo(skb)->frag_list) {
  1088. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  1089. for (; list; list = list->next) {
  1090. int end;
  1091. BUG_TRAP(start <= offset + len);
  1092. end = start + list->len;
  1093. if ((copy = end - offset) > 0) {
  1094. __wsum csum2;
  1095. if (copy > len)
  1096. copy = len;
  1097. csum2 = skb_checksum(list, offset - start,
  1098. copy, 0);
  1099. csum = csum_block_add(csum, csum2, pos);
  1100. if ((len -= copy) == 0)
  1101. return csum;
  1102. offset += copy;
  1103. pos += copy;
  1104. }
  1105. start = end;
  1106. }
  1107. }
  1108. BUG_ON(len);
  1109. return csum;
  1110. }
  1111. /* Both of above in one bottle. */
  1112. __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
  1113. u8 *to, int len, __wsum csum)
  1114. {
  1115. int start = skb_headlen(skb);
  1116. int i, copy = start - offset;
  1117. int pos = 0;
  1118. /* Copy header. */
  1119. if (copy > 0) {
  1120. if (copy > len)
  1121. copy = len;
  1122. csum = csum_partial_copy_nocheck(skb->data + offset, to,
  1123. copy, csum);
  1124. if ((len -= copy) == 0)
  1125. return csum;
  1126. offset += copy;
  1127. to += copy;
  1128. pos = copy;
  1129. }
  1130. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1131. int end;
  1132. BUG_TRAP(start <= offset + len);
  1133. end = start + skb_shinfo(skb)->frags[i].size;
  1134. if ((copy = end - offset) > 0) {
  1135. __wsum csum2;
  1136. u8 *vaddr;
  1137. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1138. if (copy > len)
  1139. copy = len;
  1140. vaddr = kmap_skb_frag(frag);
  1141. csum2 = csum_partial_copy_nocheck(vaddr +
  1142. frag->page_offset +
  1143. offset - start, to,
  1144. copy, 0);
  1145. kunmap_skb_frag(vaddr);
  1146. csum = csum_block_add(csum, csum2, pos);
  1147. if (!(len -= copy))
  1148. return csum;
  1149. offset += copy;
  1150. to += copy;
  1151. pos += copy;
  1152. }
  1153. start = end;
  1154. }
  1155. if (skb_shinfo(skb)->frag_list) {
  1156. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  1157. for (; list; list = list->next) {
  1158. __wsum csum2;
  1159. int end;
  1160. BUG_TRAP(start <= offset + len);
  1161. end = start + list->len;
  1162. if ((copy = end - offset) > 0) {
  1163. if (copy > len)
  1164. copy = len;
  1165. csum2 = skb_copy_and_csum_bits(list,
  1166. offset - start,
  1167. to, copy, 0);
  1168. csum = csum_block_add(csum, csum2, pos);
  1169. if ((len -= copy) == 0)
  1170. return csum;
  1171. offset += copy;
  1172. to += copy;
  1173. pos += copy;
  1174. }
  1175. start = end;
  1176. }
  1177. }
  1178. BUG_ON(len);
  1179. return csum;
  1180. }
  1181. void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
  1182. {
  1183. __wsum csum;
  1184. long csstart;
  1185. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1186. csstart = skb->csum_start - skb_headroom(skb);
  1187. else
  1188. csstart = skb_headlen(skb);
  1189. BUG_ON(csstart > skb_headlen(skb));
  1190. skb_copy_from_linear_data(skb, to, csstart);
  1191. csum = 0;
  1192. if (csstart != skb->len)
  1193. csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
  1194. skb->len - csstart, 0);
  1195. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1196. long csstuff = csstart + skb->csum_offset;
  1197. *((__sum16 *)(to + csstuff)) = csum_fold(csum);
  1198. }
  1199. }
  1200. /**
  1201. * skb_dequeue - remove from the head of the queue
  1202. * @list: list to dequeue from
  1203. *
  1204. * Remove the head of the list. The list lock is taken so the function
  1205. * may be used safely with other locking list functions. The head item is
  1206. * returned or %NULL if the list is empty.
  1207. */
  1208. struct sk_buff *skb_dequeue(struct sk_buff_head *list)
  1209. {
  1210. unsigned long flags;
  1211. struct sk_buff *result;
  1212. spin_lock_irqsave(&list->lock, flags);
  1213. result = __skb_dequeue(list);
  1214. spin_unlock_irqrestore(&list->lock, flags);
  1215. return result;
  1216. }
  1217. /**
  1218. * skb_dequeue_tail - remove from the tail of the queue
  1219. * @list: list to dequeue from
  1220. *
  1221. * Remove the tail of the list. The list lock is taken so the function
  1222. * may be used safely with other locking list functions. The tail item is
  1223. * returned or %NULL if the list is empty.
  1224. */
  1225. struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
  1226. {
  1227. unsigned long flags;
  1228. struct sk_buff *result;
  1229. spin_lock_irqsave(&list->lock, flags);
  1230. result = __skb_dequeue_tail(list);
  1231. spin_unlock_irqrestore(&list->lock, flags);
  1232. return result;
  1233. }
  1234. /**
  1235. * skb_queue_purge - empty a list
  1236. * @list: list to empty
  1237. *
  1238. * Delete all buffers on an &sk_buff list. Each buffer is removed from
  1239. * the list and one reference dropped. This function takes the list
  1240. * lock and is atomic with respect to other list locking functions.
  1241. */
  1242. void skb_queue_purge(struct sk_buff_head *list)
  1243. {
  1244. struct sk_buff *skb;
  1245. while ((skb = skb_dequeue(list)) != NULL)
  1246. kfree_skb(skb);
  1247. }
  1248. /**
  1249. * skb_queue_head - queue a buffer at the list head
  1250. * @list: list to use
  1251. * @newsk: buffer to queue
  1252. *
  1253. * Queue a buffer at the start of the list. This function takes the
  1254. * list lock and can be used safely with other locking &sk_buff functions
  1255. * safely.
  1256. *
  1257. * A buffer cannot be placed on two lists at the same time.
  1258. */
  1259. void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
  1260. {
  1261. unsigned long flags;
  1262. spin_lock_irqsave(&list->lock, flags);
  1263. __skb_queue_head(list, newsk);
  1264. spin_unlock_irqrestore(&list->lock, flags);
  1265. }
  1266. /**
  1267. * skb_queue_tail - queue a buffer at the list tail
  1268. * @list: list to use
  1269. * @newsk: buffer to queue
  1270. *
  1271. * Queue a buffer at the tail of the list. This function takes the
  1272. * list lock and can be used safely with other locking &sk_buff functions
  1273. * safely.
  1274. *
  1275. * A buffer cannot be placed on two lists at the same time.
  1276. */
  1277. void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
  1278. {
  1279. unsigned long flags;
  1280. spin_lock_irqsave(&list->lock, flags);
  1281. __skb_queue_tail(list, newsk);
  1282. spin_unlock_irqrestore(&list->lock, flags);
  1283. }
  1284. /**
  1285. * skb_unlink - remove a buffer from a list
  1286. * @skb: buffer to remove
  1287. * @list: list to use
  1288. *
  1289. * Remove a packet from a list. The list locks are taken and this
  1290. * function is atomic with respect to other list locked calls
  1291. *
  1292. * You must know what list the SKB is on.
  1293. */
  1294. void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
  1295. {
  1296. unsigned long flags;
  1297. spin_lock_irqsave(&list->lock, flags);
  1298. __skb_unlink(skb, list);
  1299. spin_unlock_irqrestore(&list->lock, flags);
  1300. }
  1301. /**
  1302. * skb_append - append a buffer
  1303. * @old: buffer to insert after
  1304. * @newsk: buffer to insert
  1305. * @list: list to use
  1306. *
  1307. * Place a packet after a given packet in a list. The list locks are taken
  1308. * and this function is atomic with respect to other list locked calls.
  1309. * A buffer cannot be placed on two lists at the same time.
  1310. */
  1311. void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
  1312. {
  1313. unsigned long flags;
  1314. spin_lock_irqsave(&list->lock, flags);
  1315. __skb_append(old, newsk, list);
  1316. spin_unlock_irqrestore(&list->lock, flags);
  1317. }
  1318. /**
  1319. * skb_insert - insert a buffer
  1320. * @old: buffer to insert before
  1321. * @newsk: buffer to insert
  1322. * @list: list to use
  1323. *
  1324. * Place a packet before a given packet in a list. The list locks are
  1325. * taken and this function is atomic with respect to other list locked
  1326. * calls.
  1327. *
  1328. * A buffer cannot be placed on two lists at the same time.
  1329. */
  1330. void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
  1331. {
  1332. unsigned long flags;
  1333. spin_lock_irqsave(&list->lock, flags);
  1334. __skb_insert(newsk, old->prev, old, list);
  1335. spin_unlock_irqrestore(&list->lock, flags);
  1336. }
  1337. static inline void skb_split_inside_header(struct sk_buff *skb,
  1338. struct sk_buff* skb1,
  1339. const u32 len, const int pos)
  1340. {
  1341. int i;
  1342. skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
  1343. pos - len);
  1344. /* And move data appendix as is. */
  1345. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
  1346. skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
  1347. skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
  1348. skb_shinfo(skb)->nr_frags = 0;
  1349. skb1->data_len = skb->data_len;
  1350. skb1->len += skb1->data_len;
  1351. skb->data_len = 0;
  1352. skb->len = len;
  1353. skb_set_tail_pointer(skb, len);
  1354. }
  1355. static inline void skb_split_no_header(struct sk_buff *skb,
  1356. struct sk_buff* skb1,
  1357. const u32 len, int pos)
  1358. {
  1359. int i, k = 0;
  1360. const int nfrags = skb_shinfo(skb)->nr_frags;
  1361. skb_shinfo(skb)->nr_frags = 0;
  1362. skb1->len = skb1->data_len = skb->len - len;
  1363. skb->len = len;
  1364. skb->data_len = len - pos;
  1365. for (i = 0; i < nfrags; i++) {
  1366. int size = skb_shinfo(skb)->frags[i].size;
  1367. if (pos + size > len) {
  1368. skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
  1369. if (pos < len) {
  1370. /* Split frag.
  1371. * We have two variants in this case:
  1372. * 1. Move all the frag to the second
  1373. * part, if it is possible. F.e.
  1374. * this approach is mandatory for TUX,
  1375. * where splitting is expensive.
  1376. * 2. Split is accurately. We make this.
  1377. */
  1378. get_page(skb_shinfo(skb)->frags[i].page);
  1379. skb_shinfo(skb1)->frags[0].page_offset += len - pos;
  1380. skb_shinfo(skb1)->frags[0].size -= len - pos;
  1381. skb_shinfo(skb)->frags[i].size = len - pos;
  1382. skb_shinfo(skb)->nr_frags++;
  1383. }
  1384. k++;
  1385. } else
  1386. skb_shinfo(skb)->nr_frags++;
  1387. pos += size;
  1388. }
  1389. skb_shinfo(skb1)->nr_frags = k;
  1390. }
  1391. /**
  1392. * skb_split - Split fragmented skb to two parts at length len.
  1393. * @skb: the buffer to split
  1394. * @skb1: the buffer to receive the second part
  1395. * @len: new length for skb
  1396. */
  1397. void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
  1398. {
  1399. int pos = skb_headlen(skb);
  1400. if (len < pos) /* Split line is inside header. */
  1401. skb_split_inside_header(skb, skb1, len, pos);
  1402. else /* Second chunk has no header, nothing to copy. */
  1403. skb_split_no_header(skb, skb1, len, pos);
  1404. }
  1405. /**
  1406. * skb_prepare_seq_read - Prepare a sequential read of skb data
  1407. * @skb: the buffer to read
  1408. * @from: lower offset of data to be read
  1409. * @to: upper offset of data to be read
  1410. * @st: state variable
  1411. *
  1412. * Initializes the specified state variable. Must be called before
  1413. * invoking skb_seq_read() for the first time.
  1414. */
  1415. void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
  1416. unsigned int to, struct skb_seq_state *st)
  1417. {
  1418. st->lower_offset = from;
  1419. st->upper_offset = to;
  1420. st->root_skb = st->cur_skb = skb;
  1421. st->frag_idx = st->stepped_offset = 0;
  1422. st->frag_data = NULL;
  1423. }
  1424. /**
  1425. * skb_seq_read - Sequentially read skb data
  1426. * @consumed: number of bytes consumed by the caller so far
  1427. * @data: destination pointer for data to be returned
  1428. * @st: state variable
  1429. *
  1430. * Reads a block of skb data at &consumed relative to the
  1431. * lower offset specified to skb_prepare_seq_read(). Assigns
  1432. * the head of the data block to &data and returns the length
  1433. * of the block or 0 if the end of the skb data or the upper
  1434. * offset has been reached.
  1435. *
  1436. * The caller is not required to consume all of the data
  1437. * returned, i.e. &consumed is typically set to the number
  1438. * of bytes already consumed and the next call to
  1439. * skb_seq_read() will return the remaining part of the block.
  1440. *
  1441. * Note: The size of each block of data returned can be arbitary,
  1442. * this limitation is the cost for zerocopy seqeuental
  1443. * reads of potentially non linear data.
  1444. *
  1445. * Note: Fragment lists within fragments are not implemented
  1446. * at the moment, state->root_skb could be replaced with
  1447. * a stack for this purpose.
  1448. */
  1449. unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
  1450. struct skb_seq_state *st)
  1451. {
  1452. unsigned int block_limit, abs_offset = consumed + st->lower_offset;
  1453. skb_frag_t *frag;
  1454. if (unlikely(abs_offset >= st->upper_offset))
  1455. return 0;
  1456. next_skb:
  1457. block_limit = skb_headlen(st->cur_skb);
  1458. if (abs_offset < block_limit) {
  1459. *data = st->cur_skb->data + abs_offset;
  1460. return block_limit - abs_offset;
  1461. }
  1462. if (st->frag_idx == 0 && !st->frag_data)
  1463. st->stepped_offset += skb_headlen(st->cur_skb);
  1464. while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
  1465. frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
  1466. block_limit = frag->size + st->stepped_offset;
  1467. if (abs_offset < block_limit) {
  1468. if (!st->frag_data)
  1469. st->frag_data = kmap_skb_frag(frag);
  1470. *data = (u8 *) st->frag_data + frag->page_offset +
  1471. (abs_offset - st->stepped_offset);
  1472. return block_limit - abs_offset;
  1473. }
  1474. if (st->frag_data) {
  1475. kunmap_skb_frag(st->frag_data);
  1476. st->frag_data = NULL;
  1477. }
  1478. st->frag_idx++;
  1479. st->stepped_offset += frag->size;
  1480. }
  1481. if (st->frag_data) {
  1482. kunmap_skb_frag(st->frag_data);
  1483. st->frag_data = NULL;
  1484. }
  1485. if (st->cur_skb->next) {
  1486. st->cur_skb = st->cur_skb->next;
  1487. st->frag_idx = 0;
  1488. goto next_skb;
  1489. } else if (st->root_skb == st->cur_skb &&
  1490. skb_shinfo(st->root_skb)->frag_list) {
  1491. st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
  1492. goto next_skb;
  1493. }
  1494. return 0;
  1495. }
  1496. /**
  1497. * skb_abort_seq_read - Abort a sequential read of skb data
  1498. * @st: state variable
  1499. *
  1500. * Must be called if skb_seq_read() was not called until it
  1501. * returned 0.
  1502. */
  1503. void skb_abort_seq_read(struct skb_seq_state *st)
  1504. {
  1505. if (st->frag_data)
  1506. kunmap_skb_frag(st->frag_data);
  1507. }
  1508. #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
  1509. static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
  1510. struct ts_config *conf,
  1511. struct ts_state *state)
  1512. {
  1513. return skb_seq_read(offset, text, TS_SKB_CB(state));
  1514. }
  1515. static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
  1516. {
  1517. skb_abort_seq_read(TS_SKB_CB(state));
  1518. }
  1519. /**
  1520. * skb_find_text - Find a text pattern in skb data
  1521. * @skb: the buffer to look in
  1522. * @from: search offset
  1523. * @to: search limit
  1524. * @config: textsearch configuration
  1525. * @state: uninitialized textsearch state variable
  1526. *
  1527. * Finds a pattern in the skb data according to the specified
  1528. * textsearch configuration. Use textsearch_next() to retrieve
  1529. * subsequent occurrences of the pattern. Returns the offset
  1530. * to the first occurrence or UINT_MAX if no match was found.
  1531. */
  1532. unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
  1533. unsigned int to, struct ts_config *config,
  1534. struct ts_state *state)
  1535. {
  1536. unsigned int ret;
  1537. config->get_next_block = skb_ts_get_next_block;
  1538. config->finish = skb_ts_finish;
  1539. skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
  1540. ret = textsearch_find(config, state);
  1541. return (ret <= to - from ? ret : UINT_MAX);
  1542. }
  1543. /**
  1544. * skb_append_datato_frags: - append the user data to a skb
  1545. * @sk: sock structure
  1546. * @skb: skb structure to be appened with user data.
  1547. * @getfrag: call back function to be used for getting the user data
  1548. * @from: pointer to user message iov
  1549. * @length: length of the iov message
  1550. *
  1551. * Description: This procedure append the user data in the fragment part
  1552. * of the skb if any page alloc fails user this procedure returns -ENOMEM
  1553. */
  1554. int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
  1555. int (*getfrag)(void *from, char *to, int offset,
  1556. int len, int odd, struct sk_buff *skb),
  1557. void *from, int length)
  1558. {
  1559. int frg_cnt = 0;
  1560. skb_frag_t *frag = NULL;
  1561. struct page *page = NULL;
  1562. int copy, left;
  1563. int offset = 0;
  1564. int ret;
  1565. do {
  1566. /* Return error if we don't have space for new frag */
  1567. frg_cnt = skb_shinfo(skb)->nr_frags;
  1568. if (frg_cnt >= MAX_SKB_FRAGS)
  1569. return -EFAULT;
  1570. /* allocate a new page for next frag */
  1571. page = alloc_pages(sk->sk_allocation, 0);
  1572. /* If alloc_page fails just return failure and caller will
  1573. * free previous allocated pages by doing kfree_skb()
  1574. */
  1575. if (page == NULL)
  1576. return -ENOMEM;
  1577. /* initialize the next frag */
  1578. sk->sk_sndmsg_page = page;
  1579. sk->sk_sndmsg_off = 0;
  1580. skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
  1581. skb->truesize += PAGE_SIZE;
  1582. atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
  1583. /* get the new initialized frag */
  1584. frg_cnt = skb_shinfo(skb)->nr_frags;
  1585. frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
  1586. /* copy the user data to page */
  1587. left = PAGE_SIZE - frag->page_offset;
  1588. copy = (length > left)? left : length;
  1589. ret = getfrag(from, (page_address(frag->page) +
  1590. frag->page_offset + frag->size),
  1591. offset, copy, 0, skb);
  1592. if (ret < 0)
  1593. return -EFAULT;
  1594. /* copy was successful so update the size parameters */
  1595. sk->sk_sndmsg_off += copy;
  1596. frag->size += copy;
  1597. skb->len += copy;
  1598. skb->data_len += copy;
  1599. offset += copy;
  1600. length -= copy;
  1601. } while (length > 0);
  1602. return 0;
  1603. }
  1604. /**
  1605. * skb_pull_rcsum - pull skb and update receive checksum
  1606. * @skb: buffer to update
  1607. * @start: start of data before pull
  1608. * @len: length of data pulled
  1609. *
  1610. * This function performs an skb_pull on the packet and updates
  1611. * update the CHECKSUM_COMPLETE checksum. It should be used on
  1612. * receive path processing instead of skb_pull unless you know
  1613. * that the checksum difference is zero (e.g., a valid IP header)
  1614. * or you are setting ip_summed to CHECKSUM_NONE.
  1615. */
  1616. unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
  1617. {
  1618. BUG_ON(len > skb->len);
  1619. skb->len -= len;
  1620. BUG_ON(skb->len < skb->data_len);
  1621. skb_postpull_rcsum(skb, skb->data, len);
  1622. return skb->data += len;
  1623. }
  1624. EXPORT_SYMBOL_GPL(skb_pull_rcsum);
  1625. /**
  1626. * skb_segment - Perform protocol segmentation on skb.
  1627. * @skb: buffer to segment
  1628. * @features: features for the output path (see dev->features)
  1629. *
  1630. * This function performs segmentation on the given skb. It returns
  1631. * the segment at the given position. It returns NULL if there are
  1632. * no more segments to generate, or when an error is encountered.
  1633. */
  1634. struct sk_buff *skb_segment(struct sk_buff *skb, int features)
  1635. {
  1636. struct sk_buff *segs = NULL;
  1637. struct sk_buff *tail = NULL;
  1638. unsigned int mss = skb_shinfo(skb)->gso_size;
  1639. unsigned int doffset = skb->data - skb_mac_header(skb);
  1640. unsigned int offset = doffset;
  1641. unsigned int headroom;
  1642. unsigned int len;
  1643. int sg = features & NETIF_F_SG;
  1644. int nfrags = skb_shinfo(skb)->nr_frags;
  1645. int err = -ENOMEM;
  1646. int i = 0;
  1647. int pos;
  1648. __skb_push(skb, doffset);
  1649. headroom = skb_headroom(skb);
  1650. pos = skb_headlen(skb);
  1651. do {
  1652. struct sk_buff *nskb;
  1653. skb_frag_t *frag;
  1654. int hsize;
  1655. int k;
  1656. int size;
  1657. len = skb->len - offset;
  1658. if (len > mss)
  1659. len = mss;
  1660. hsize = skb_headlen(skb) - offset;
  1661. if (hsize < 0)
  1662. hsize = 0;
  1663. if (hsize > len || !sg)
  1664. hsize = len;
  1665. nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
  1666. if (unlikely(!nskb))
  1667. goto err;
  1668. if (segs)
  1669. tail->next = nskb;
  1670. else
  1671. segs = nskb;
  1672. tail = nskb;
  1673. nskb->dev = skb->dev;
  1674. skb_copy_queue_mapping(nskb, skb);
  1675. nskb->priority = skb->priority;
  1676. nskb->protocol = skb->protocol;
  1677. nskb->dst = dst_clone(skb->dst);
  1678. memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
  1679. nskb->pkt_type = skb->pkt_type;
  1680. nskb->mac_len = skb->mac_len;
  1681. skb_reserve(nskb, headroom);
  1682. skb_reset_mac_header(nskb);
  1683. skb_set_network_header(nskb, skb->mac_len);
  1684. nskb->transport_header = (nskb->network_header +
  1685. skb_network_header_len(skb));
  1686. skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
  1687. doffset);
  1688. if (!sg) {
  1689. nskb->csum = skb_copy_and_csum_bits(skb, offset,
  1690. skb_put(nskb, len),
  1691. len, 0);
  1692. continue;
  1693. }
  1694. frag = skb_shinfo(nskb)->frags;
  1695. k = 0;
  1696. nskb->ip_summed = CHECKSUM_PARTIAL;
  1697. nskb->csum = skb->csum;
  1698. skb_copy_from_linear_data_offset(skb, offset,
  1699. skb_put(nskb, hsize), hsize);
  1700. while (pos < offset + len) {
  1701. BUG_ON(i >= nfrags);
  1702. *frag = skb_shinfo(skb)->frags[i];
  1703. get_page(frag->page);
  1704. size = frag->size;
  1705. if (pos < offset) {
  1706. frag->page_offset += offset - pos;
  1707. frag->size -= offset - pos;
  1708. }
  1709. k++;
  1710. if (pos + size <= offset + len) {
  1711. i++;
  1712. pos += size;
  1713. } else {
  1714. frag->size -= pos + size - (offset + len);
  1715. break;
  1716. }
  1717. frag++;
  1718. }
  1719. skb_shinfo(nskb)->nr_frags = k;
  1720. nskb->data_len = len - hsize;
  1721. nskb->len += nskb->data_len;
  1722. nskb->truesize += nskb->data_len;
  1723. } while ((offset += len) < skb->len);
  1724. return segs;
  1725. err:
  1726. while ((skb = segs)) {
  1727. segs = skb->next;
  1728. kfree_skb(skb);
  1729. }
  1730. return ERR_PTR(err);
  1731. }
  1732. EXPORT_SYMBOL_GPL(skb_segment);
  1733. void __init skb_init(void)
  1734. {
  1735. skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
  1736. sizeof(struct sk_buff),
  1737. 0,
  1738. SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  1739. NULL);
  1740. skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
  1741. (2*sizeof(struct sk_buff)) +
  1742. sizeof(atomic_t),
  1743. 0,
  1744. SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  1745. NULL);
  1746. }
  1747. /**
  1748. * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
  1749. * @skb: Socket buffer containing the buffers to be mapped
  1750. * @sg: The scatter-gather list to map into
  1751. * @offset: The offset into the buffer's contents to start mapping
  1752. * @len: Length of buffer space to be mapped
  1753. *
  1754. * Fill the specified scatter-gather list with mappings/pointers into a
  1755. * region of the buffer space attached to a socket buffer.
  1756. */
  1757. int
  1758. skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
  1759. {
  1760. int start = skb_headlen(skb);
  1761. int i, copy = start - offset;
  1762. int elt = 0;
  1763. if (copy > 0) {
  1764. if (copy > len)
  1765. copy = len;
  1766. sg[elt].page = virt_to_page(skb->data + offset);
  1767. sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
  1768. sg[elt].length = copy;
  1769. elt++;
  1770. if ((len -= copy) == 0)
  1771. return elt;
  1772. offset += copy;
  1773. }
  1774. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1775. int end;
  1776. BUG_TRAP(start <= offset + len);
  1777. end = start + skb_shinfo(skb)->frags[i].size;
  1778. if ((copy = end - offset) > 0) {
  1779. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  1780. if (copy > len)
  1781. copy = len;
  1782. sg[elt].page = frag->page;
  1783. sg[elt].offset = frag->page_offset+offset-start;
  1784. sg[elt].length = copy;
  1785. elt++;
  1786. if (!(len -= copy))
  1787. return elt;
  1788. offset += copy;
  1789. }
  1790. start = end;
  1791. }
  1792. if (skb_shinfo(skb)->frag_list) {
  1793. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  1794. for (; list; list = list->next) {
  1795. int end;
  1796. BUG_TRAP(start <= offset + len);
  1797. end = start + list->len;
  1798. if ((copy = end - offset) > 0) {
  1799. if (copy > len)
  1800. copy = len;
  1801. elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
  1802. if ((len -= copy) == 0)
  1803. return elt;
  1804. offset += copy;
  1805. }
  1806. start = end;
  1807. }
  1808. }
  1809. BUG_ON(len);
  1810. return elt;
  1811. }
  1812. /**
  1813. * skb_cow_data - Check that a socket buffer's data buffers are writable
  1814. * @skb: The socket buffer to check.
  1815. * @tailbits: Amount of trailing space to be added
  1816. * @trailer: Returned pointer to the skb where the @tailbits space begins
  1817. *
  1818. * Make sure that the data buffers attached to a socket buffer are
  1819. * writable. If they are not, private copies are made of the data buffers
  1820. * and the socket buffer is set to use these instead.
  1821. *
  1822. * If @tailbits is given, make sure that there is space to write @tailbits
  1823. * bytes of data beyond current end of socket buffer. @trailer will be
  1824. * set to point to the skb in which this space begins.
  1825. *
  1826. * The number of scatterlist elements required to completely map the
  1827. * COW'd and extended socket buffer will be returned.
  1828. */
  1829. int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
  1830. {
  1831. int copyflag;
  1832. int elt;
  1833. struct sk_buff *skb1, **skb_p;
  1834. /* If skb is cloned or its head is paged, reallocate
  1835. * head pulling out all the pages (pages are considered not writable
  1836. * at the moment even if they are anonymous).
  1837. */
  1838. if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
  1839. __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
  1840. return -ENOMEM;
  1841. /* Easy case. Most of packets will go this way. */
  1842. if (!skb_shinfo(skb)->frag_list) {
  1843. /* A little of trouble, not enough of space for trailer.
  1844. * This should not happen, when stack is tuned to generate
  1845. * good frames. OK, on miss we reallocate and reserve even more
  1846. * space, 128 bytes is fair. */
  1847. if (skb_tailroom(skb) < tailbits &&
  1848. pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
  1849. return -ENOMEM;
  1850. /* Voila! */
  1851. *trailer = skb;
  1852. return 1;
  1853. }
  1854. /* Misery. We are in troubles, going to mincer fragments... */
  1855. elt = 1;
  1856. skb_p = &skb_shinfo(skb)->frag_list;
  1857. copyflag = 0;
  1858. while ((skb1 = *skb_p) != NULL) {
  1859. int ntail = 0;
  1860. /* The fragment is partially pulled by someone,
  1861. * this can happen on input. Copy it and everything
  1862. * after it. */
  1863. if (skb_shared(skb1))
  1864. copyflag = 1;
  1865. /* If the skb is the last, worry about trailer. */
  1866. if (skb1->next == NULL && tailbits) {
  1867. if (skb_shinfo(skb1)->nr_frags ||
  1868. skb_shinfo(skb1)->frag_list ||
  1869. skb_tailroom(skb1) < tailbits)
  1870. ntail = tailbits + 128;
  1871. }
  1872. if (copyflag ||
  1873. skb_cloned(skb1) ||
  1874. ntail ||
  1875. skb_shinfo(skb1)->nr_frags ||
  1876. skb_shinfo(skb1)->frag_list) {
  1877. struct sk_buff *skb2;
  1878. /* Fuck, we are miserable poor guys... */
  1879. if (ntail == 0)
  1880. skb2 = skb_copy(skb1, GFP_ATOMIC);
  1881. else
  1882. skb2 = skb_copy_expand(skb1,
  1883. skb_headroom(skb1),
  1884. ntail,
  1885. GFP_ATOMIC);
  1886. if (unlikely(skb2 == NULL))
  1887. return -ENOMEM;
  1888. if (skb1->sk)
  1889. skb_set_owner_w(skb2, skb1->sk);
  1890. /* Looking around. Are we still alive?
  1891. * OK, link new skb, drop old one */
  1892. skb2->next = skb1->next;
  1893. *skb_p = skb2;
  1894. kfree_skb(skb1);
  1895. skb1 = skb2;
  1896. }
  1897. elt++;
  1898. *trailer = skb1;
  1899. skb_p = &skb1->next;
  1900. }
  1901. return elt;
  1902. }
  1903. EXPORT_SYMBOL(___pskb_trim);
  1904. EXPORT_SYMBOL(__kfree_skb);
  1905. EXPORT_SYMBOL(kfree_skb);
  1906. EXPORT_SYMBOL(__pskb_pull_tail);
  1907. EXPORT_SYMBOL(__alloc_skb);
  1908. EXPORT_SYMBOL(__netdev_alloc_skb);
  1909. EXPORT_SYMBOL(pskb_copy);
  1910. EXPORT_SYMBOL(pskb_expand_head);
  1911. EXPORT_SYMBOL(skb_checksum);
  1912. EXPORT_SYMBOL(skb_clone);
  1913. EXPORT_SYMBOL(skb_copy);
  1914. EXPORT_SYMBOL(skb_copy_and_csum_bits);
  1915. EXPORT_SYMBOL(skb_copy_and_csum_dev);
  1916. EXPORT_SYMBOL(skb_copy_bits);
  1917. EXPORT_SYMBOL(skb_copy_expand);
  1918. EXPORT_SYMBOL(skb_over_panic);
  1919. EXPORT_SYMBOL(skb_pad);
  1920. EXPORT_SYMBOL(skb_realloc_headroom);
  1921. EXPORT_SYMBOL(skb_under_panic);
  1922. EXPORT_SYMBOL(skb_dequeue);
  1923. EXPORT_SYMBOL(skb_dequeue_tail);
  1924. EXPORT_SYMBOL(skb_insert);
  1925. EXPORT_SYMBOL(skb_queue_purge);
  1926. EXPORT_SYMBOL(skb_queue_head);
  1927. EXPORT_SYMBOL(skb_queue_tail);
  1928. EXPORT_SYMBOL(skb_unlink);
  1929. EXPORT_SYMBOL(skb_append);
  1930. EXPORT_SYMBOL(skb_split);
  1931. EXPORT_SYMBOL(skb_prepare_seq_read);
  1932. EXPORT_SYMBOL(skb_seq_read);
  1933. EXPORT_SYMBOL(skb_abort_seq_read);
  1934. EXPORT_SYMBOL(skb_find_text);
  1935. EXPORT_SYMBOL(skb_append_datato_frags);
  1936. EXPORT_SYMBOL_GPL(skb_to_sgvec);
  1937. EXPORT_SYMBOL_GPL(skb_cow_data);