xdr.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279
  1. /*
  2. * linux/net/sunrpc/xdr.c
  3. *
  4. * Generic XDR support.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/types.h>
  11. #include <linux/string.h>
  12. #include <linux/kernel.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/errno.h>
  15. #include <linux/sunrpc/xdr.h>
  16. #include <linux/sunrpc/msg_prot.h>
  17. /*
  18. * XDR functions for basic NFS types
  19. */
  20. __be32 *
  21. xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  22. {
  23. unsigned int quadlen = XDR_QUADLEN(obj->len);
  24. p[quadlen] = 0; /* zero trailing bytes */
  25. *p++ = cpu_to_be32(obj->len);
  26. memcpy(p, obj->data, obj->len);
  27. return p + XDR_QUADLEN(obj->len);
  28. }
  29. EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  30. __be32 *
  31. xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  32. {
  33. unsigned int len;
  34. if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  35. return NULL;
  36. obj->len = len;
  37. obj->data = (u8 *) p;
  38. return p + XDR_QUADLEN(len);
  39. }
  40. EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  41. /**
  42. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  43. * @p: pointer to current position in XDR buffer.
  44. * @ptr: pointer to data to encode (or NULL)
  45. * @nbytes: size of data.
  46. *
  47. * Copy the array of data of length nbytes at ptr to the XDR buffer
  48. * at position p, then align to the next 32-bit boundary by padding
  49. * with zero bytes (see RFC1832).
  50. * Note: if ptr is NULL, only the padding is performed.
  51. *
  52. * Returns the updated current XDR buffer position
  53. *
  54. */
  55. __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  56. {
  57. if (likely(nbytes != 0)) {
  58. unsigned int quadlen = XDR_QUADLEN(nbytes);
  59. unsigned int padding = (quadlen << 2) - nbytes;
  60. if (ptr != NULL)
  61. memcpy(p, ptr, nbytes);
  62. if (padding != 0)
  63. memset((char *)p + nbytes, 0, padding);
  64. p += quadlen;
  65. }
  66. return p;
  67. }
  68. EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  69. /**
  70. * xdr_encode_opaque - Encode variable length opaque data
  71. * @p: pointer to current position in XDR buffer.
  72. * @ptr: pointer to data to encode (or NULL)
  73. * @nbytes: size of data.
  74. *
  75. * Returns the updated current XDR buffer position
  76. */
  77. __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  78. {
  79. *p++ = cpu_to_be32(nbytes);
  80. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  81. }
  82. EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  83. __be32 *
  84. xdr_encode_string(__be32 *p, const char *string)
  85. {
  86. return xdr_encode_array(p, string, strlen(string));
  87. }
  88. EXPORT_SYMBOL_GPL(xdr_encode_string);
  89. __be32 *
  90. xdr_decode_string_inplace(__be32 *p, char **sp,
  91. unsigned int *lenp, unsigned int maxlen)
  92. {
  93. u32 len;
  94. len = be32_to_cpu(*p++);
  95. if (len > maxlen)
  96. return NULL;
  97. *lenp = len;
  98. *sp = (char *) p;
  99. return p + XDR_QUADLEN(len);
  100. }
  101. EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
  102. /**
  103. * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
  104. * @buf: XDR buffer where string resides
  105. * @len: length of string, in bytes
  106. *
  107. */
  108. void
  109. xdr_terminate_string(struct xdr_buf *buf, const u32 len)
  110. {
  111. char *kaddr;
  112. kaddr = kmap_atomic(buf->pages[0]);
  113. kaddr[buf->page_base + len] = '\0';
  114. kunmap_atomic(kaddr);
  115. }
  116. EXPORT_SYMBOL_GPL(xdr_terminate_string);
  117. void
  118. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  119. struct page **pages, unsigned int base, unsigned int len)
  120. {
  121. struct kvec *head = xdr->head;
  122. struct kvec *tail = xdr->tail;
  123. char *buf = (char *)head->iov_base;
  124. unsigned int buflen = head->iov_len;
  125. head->iov_len = offset;
  126. xdr->pages = pages;
  127. xdr->page_base = base;
  128. xdr->page_len = len;
  129. tail->iov_base = buf + offset;
  130. tail->iov_len = buflen - offset;
  131. xdr->buflen += len;
  132. }
  133. EXPORT_SYMBOL_GPL(xdr_inline_pages);
  134. /*
  135. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  136. *
  137. * _shift_data_right_pages
  138. * @pages: vector of pages containing both the source and dest memory area.
  139. * @pgto_base: page vector address of destination
  140. * @pgfrom_base: page vector address of source
  141. * @len: number of bytes to copy
  142. *
  143. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  144. * the same way:
  145. * if a memory area starts at byte 'base' in page 'pages[i]',
  146. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  147. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  148. * they point to may overlap.
  149. */
  150. static void
  151. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  152. size_t pgfrom_base, size_t len)
  153. {
  154. struct page **pgfrom, **pgto;
  155. char *vfrom, *vto;
  156. size_t copy;
  157. BUG_ON(pgto_base <= pgfrom_base);
  158. pgto_base += len;
  159. pgfrom_base += len;
  160. pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
  161. pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
  162. pgto_base &= ~PAGE_CACHE_MASK;
  163. pgfrom_base &= ~PAGE_CACHE_MASK;
  164. do {
  165. /* Are any pointers crossing a page boundary? */
  166. if (pgto_base == 0) {
  167. pgto_base = PAGE_CACHE_SIZE;
  168. pgto--;
  169. }
  170. if (pgfrom_base == 0) {
  171. pgfrom_base = PAGE_CACHE_SIZE;
  172. pgfrom--;
  173. }
  174. copy = len;
  175. if (copy > pgto_base)
  176. copy = pgto_base;
  177. if (copy > pgfrom_base)
  178. copy = pgfrom_base;
  179. pgto_base -= copy;
  180. pgfrom_base -= copy;
  181. vto = kmap_atomic(*pgto);
  182. vfrom = kmap_atomic(*pgfrom);
  183. memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
  184. flush_dcache_page(*pgto);
  185. kunmap_atomic(vfrom);
  186. kunmap_atomic(vto);
  187. } while ((len -= copy) != 0);
  188. }
  189. /*
  190. * _copy_to_pages
  191. * @pages: array of pages
  192. * @pgbase: page vector address of destination
  193. * @p: pointer to source data
  194. * @len: length
  195. *
  196. * Copies data from an arbitrary memory location into an array of pages
  197. * The copy is assumed to be non-overlapping.
  198. */
  199. static void
  200. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  201. {
  202. struct page **pgto;
  203. char *vto;
  204. size_t copy;
  205. pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
  206. pgbase &= ~PAGE_CACHE_MASK;
  207. for (;;) {
  208. copy = PAGE_CACHE_SIZE - pgbase;
  209. if (copy > len)
  210. copy = len;
  211. vto = kmap_atomic(*pgto);
  212. memcpy(vto + pgbase, p, copy);
  213. kunmap_atomic(vto);
  214. len -= copy;
  215. if (len == 0)
  216. break;
  217. pgbase += copy;
  218. if (pgbase == PAGE_CACHE_SIZE) {
  219. flush_dcache_page(*pgto);
  220. pgbase = 0;
  221. pgto++;
  222. }
  223. p += copy;
  224. }
  225. flush_dcache_page(*pgto);
  226. }
  227. /*
  228. * _copy_from_pages
  229. * @p: pointer to destination
  230. * @pages: array of pages
  231. * @pgbase: offset of source data
  232. * @len: length
  233. *
  234. * Copies data into an arbitrary memory location from an array of pages
  235. * The copy is assumed to be non-overlapping.
  236. */
  237. void
  238. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  239. {
  240. struct page **pgfrom;
  241. char *vfrom;
  242. size_t copy;
  243. pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
  244. pgbase &= ~PAGE_CACHE_MASK;
  245. do {
  246. copy = PAGE_CACHE_SIZE - pgbase;
  247. if (copy > len)
  248. copy = len;
  249. vfrom = kmap_atomic(*pgfrom);
  250. memcpy(p, vfrom + pgbase, copy);
  251. kunmap_atomic(vfrom);
  252. pgbase += copy;
  253. if (pgbase == PAGE_CACHE_SIZE) {
  254. pgbase = 0;
  255. pgfrom++;
  256. }
  257. p += copy;
  258. } while ((len -= copy) != 0);
  259. }
  260. EXPORT_SYMBOL_GPL(_copy_from_pages);
  261. /*
  262. * xdr_shrink_bufhead
  263. * @buf: xdr_buf
  264. * @len: bytes to remove from buf->head[0]
  265. *
  266. * Shrinks XDR buffer's header kvec buf->head[0] by
  267. * 'len' bytes. The extra data is not lost, but is instead
  268. * moved into the inlined pages and/or the tail.
  269. */
  270. static void
  271. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  272. {
  273. struct kvec *head, *tail;
  274. size_t copy, offs;
  275. unsigned int pglen = buf->page_len;
  276. tail = buf->tail;
  277. head = buf->head;
  278. BUG_ON (len > head->iov_len);
  279. /* Shift the tail first */
  280. if (tail->iov_len != 0) {
  281. if (tail->iov_len > len) {
  282. copy = tail->iov_len - len;
  283. memmove((char *)tail->iov_base + len,
  284. tail->iov_base, copy);
  285. }
  286. /* Copy from the inlined pages into the tail */
  287. copy = len;
  288. if (copy > pglen)
  289. copy = pglen;
  290. offs = len - copy;
  291. if (offs >= tail->iov_len)
  292. copy = 0;
  293. else if (copy > tail->iov_len - offs)
  294. copy = tail->iov_len - offs;
  295. if (copy != 0)
  296. _copy_from_pages((char *)tail->iov_base + offs,
  297. buf->pages,
  298. buf->page_base + pglen + offs - len,
  299. copy);
  300. /* Do we also need to copy data from the head into the tail ? */
  301. if (len > pglen) {
  302. offs = copy = len - pglen;
  303. if (copy > tail->iov_len)
  304. copy = tail->iov_len;
  305. memcpy(tail->iov_base,
  306. (char *)head->iov_base +
  307. head->iov_len - offs,
  308. copy);
  309. }
  310. }
  311. /* Now handle pages */
  312. if (pglen != 0) {
  313. if (pglen > len)
  314. _shift_data_right_pages(buf->pages,
  315. buf->page_base + len,
  316. buf->page_base,
  317. pglen - len);
  318. copy = len;
  319. if (len > pglen)
  320. copy = pglen;
  321. _copy_to_pages(buf->pages, buf->page_base,
  322. (char *)head->iov_base + head->iov_len - len,
  323. copy);
  324. }
  325. head->iov_len -= len;
  326. buf->buflen -= len;
  327. /* Have we truncated the message? */
  328. if (buf->len > buf->buflen)
  329. buf->len = buf->buflen;
  330. }
  331. /*
  332. * xdr_shrink_pagelen
  333. * @buf: xdr_buf
  334. * @len: bytes to remove from buf->pages
  335. *
  336. * Shrinks XDR buffer's page array buf->pages by
  337. * 'len' bytes. The extra data is not lost, but is instead
  338. * moved into the tail.
  339. */
  340. static void
  341. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  342. {
  343. struct kvec *tail;
  344. size_t copy;
  345. unsigned int pglen = buf->page_len;
  346. unsigned int tailbuf_len;
  347. tail = buf->tail;
  348. BUG_ON (len > pglen);
  349. tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
  350. /* Shift the tail first */
  351. if (tailbuf_len != 0) {
  352. unsigned int free_space = tailbuf_len - tail->iov_len;
  353. if (len < free_space)
  354. free_space = len;
  355. tail->iov_len += free_space;
  356. copy = len;
  357. if (tail->iov_len > len) {
  358. char *p = (char *)tail->iov_base + len;
  359. memmove(p, tail->iov_base, tail->iov_len - len);
  360. } else
  361. copy = tail->iov_len;
  362. /* Copy from the inlined pages into the tail */
  363. _copy_from_pages((char *)tail->iov_base,
  364. buf->pages, buf->page_base + pglen - len,
  365. copy);
  366. }
  367. buf->page_len -= len;
  368. buf->buflen -= len;
  369. /* Have we truncated the message? */
  370. if (buf->len > buf->buflen)
  371. buf->len = buf->buflen;
  372. }
  373. void
  374. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  375. {
  376. xdr_shrink_bufhead(buf, len);
  377. }
  378. EXPORT_SYMBOL_GPL(xdr_shift_buf);
  379. /**
  380. * xdr_stream_pos - Return the current offset from the start of the xdr_stream
  381. * @xdr: pointer to struct xdr_stream
  382. */
  383. unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
  384. {
  385. return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
  386. }
  387. EXPORT_SYMBOL_GPL(xdr_stream_pos);
  388. /**
  389. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  390. * @xdr: pointer to xdr_stream struct
  391. * @buf: pointer to XDR buffer in which to encode data
  392. * @p: current pointer inside XDR buffer
  393. *
  394. * Note: at the moment the RPC client only passes the length of our
  395. * scratch buffer in the xdr_buf's header kvec. Previously this
  396. * meant we needed to call xdr_adjust_iovec() after encoding the
  397. * data. With the new scheme, the xdr_stream manages the details
  398. * of the buffer length, and takes care of adjusting the kvec
  399. * length for us.
  400. */
  401. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  402. {
  403. struct kvec *iov = buf->head;
  404. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  405. BUG_ON(scratch_len < 0);
  406. xdr->buf = buf;
  407. xdr->iov = iov;
  408. xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
  409. xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
  410. BUG_ON(iov->iov_len > scratch_len);
  411. if (p != xdr->p && p != NULL) {
  412. size_t len;
  413. BUG_ON(p < xdr->p || p > xdr->end);
  414. len = (char *)p - (char *)xdr->p;
  415. xdr->p = p;
  416. buf->len += len;
  417. iov->iov_len += len;
  418. }
  419. }
  420. EXPORT_SYMBOL_GPL(xdr_init_encode);
  421. /**
  422. * xdr_reserve_space - Reserve buffer space for sending
  423. * @xdr: pointer to xdr_stream
  424. * @nbytes: number of bytes to reserve
  425. *
  426. * Checks that we have enough buffer space to encode 'nbytes' more
  427. * bytes of data. If so, update the total xdr_buf length, and
  428. * adjust the length of the current kvec.
  429. */
  430. __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  431. {
  432. __be32 *p = xdr->p;
  433. __be32 *q;
  434. /* align nbytes on the next 32-bit boundary */
  435. nbytes += 3;
  436. nbytes &= ~3;
  437. q = p + (nbytes >> 2);
  438. if (unlikely(q > xdr->end || q < p))
  439. return NULL;
  440. xdr->p = q;
  441. xdr->iov->iov_len += nbytes;
  442. xdr->buf->len += nbytes;
  443. return p;
  444. }
  445. EXPORT_SYMBOL_GPL(xdr_reserve_space);
  446. /**
  447. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  448. * @xdr: pointer to xdr_stream
  449. * @pages: list of pages
  450. * @base: offset of first byte
  451. * @len: length of data in bytes
  452. *
  453. */
  454. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  455. unsigned int len)
  456. {
  457. struct xdr_buf *buf = xdr->buf;
  458. struct kvec *iov = buf->tail;
  459. buf->pages = pages;
  460. buf->page_base = base;
  461. buf->page_len = len;
  462. iov->iov_base = (char *)xdr->p;
  463. iov->iov_len = 0;
  464. xdr->iov = iov;
  465. if (len & 3) {
  466. unsigned int pad = 4 - (len & 3);
  467. BUG_ON(xdr->p >= xdr->end);
  468. iov->iov_base = (char *)xdr->p + (len & 3);
  469. iov->iov_len += pad;
  470. len += pad;
  471. *xdr->p++ = 0;
  472. }
  473. buf->buflen += len;
  474. buf->len += len;
  475. }
  476. EXPORT_SYMBOL_GPL(xdr_write_pages);
  477. static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
  478. unsigned int len)
  479. {
  480. if (len > iov->iov_len)
  481. len = iov->iov_len;
  482. xdr->p = (__be32*)iov->iov_base;
  483. xdr->end = (__be32*)(iov->iov_base + len);
  484. xdr->iov = iov;
  485. xdr->page_ptr = NULL;
  486. }
  487. static int xdr_set_page_base(struct xdr_stream *xdr,
  488. unsigned int base, unsigned int len)
  489. {
  490. unsigned int pgnr;
  491. unsigned int maxlen;
  492. unsigned int pgoff;
  493. unsigned int pgend;
  494. void *kaddr;
  495. maxlen = xdr->buf->page_len;
  496. if (base >= maxlen)
  497. return -EINVAL;
  498. maxlen -= base;
  499. if (len > maxlen)
  500. len = maxlen;
  501. base += xdr->buf->page_base;
  502. pgnr = base >> PAGE_SHIFT;
  503. xdr->page_ptr = &xdr->buf->pages[pgnr];
  504. kaddr = page_address(*xdr->page_ptr);
  505. pgoff = base & ~PAGE_MASK;
  506. xdr->p = (__be32*)(kaddr + pgoff);
  507. pgend = pgoff + len;
  508. if (pgend > PAGE_SIZE)
  509. pgend = PAGE_SIZE;
  510. xdr->end = (__be32*)(kaddr + pgend);
  511. xdr->iov = NULL;
  512. return 0;
  513. }
  514. static void xdr_set_next_page(struct xdr_stream *xdr)
  515. {
  516. unsigned int newbase;
  517. newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
  518. newbase -= xdr->buf->page_base;
  519. if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
  520. xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
  521. }
  522. static bool xdr_set_next_buffer(struct xdr_stream *xdr)
  523. {
  524. if (xdr->page_ptr != NULL)
  525. xdr_set_next_page(xdr);
  526. else if (xdr->iov == xdr->buf->head) {
  527. if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
  528. xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
  529. }
  530. return xdr->p != xdr->end;
  531. }
  532. /**
  533. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  534. * @xdr: pointer to xdr_stream struct
  535. * @buf: pointer to XDR buffer from which to decode data
  536. * @p: current pointer inside XDR buffer
  537. */
  538. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  539. {
  540. xdr->buf = buf;
  541. xdr->scratch.iov_base = NULL;
  542. xdr->scratch.iov_len = 0;
  543. xdr->nwords = XDR_QUADLEN(buf->len);
  544. if (buf->head[0].iov_len != 0)
  545. xdr_set_iov(xdr, buf->head, buf->len);
  546. else if (buf->page_len != 0)
  547. xdr_set_page_base(xdr, 0, buf->len);
  548. if (p != NULL && p > xdr->p && xdr->end >= p) {
  549. xdr->nwords -= p - xdr->p;
  550. xdr->p = p;
  551. }
  552. }
  553. EXPORT_SYMBOL_GPL(xdr_init_decode);
  554. /**
  555. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  556. * @xdr: pointer to xdr_stream struct
  557. * @buf: pointer to XDR buffer from which to decode data
  558. * @pages: list of pages to decode into
  559. * @len: length in bytes of buffer in pages
  560. */
  561. void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
  562. struct page **pages, unsigned int len)
  563. {
  564. memset(buf, 0, sizeof(*buf));
  565. buf->pages = pages;
  566. buf->page_len = len;
  567. buf->buflen = len;
  568. buf->len = len;
  569. xdr_init_decode(xdr, buf, NULL);
  570. }
  571. EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
  572. static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  573. {
  574. unsigned int nwords = XDR_QUADLEN(nbytes);
  575. __be32 *p = xdr->p;
  576. __be32 *q = p + nwords;
  577. if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
  578. return NULL;
  579. xdr->p = q;
  580. xdr->nwords -= nwords;
  581. return p;
  582. }
  583. /**
  584. * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
  585. * @xdr: pointer to xdr_stream struct
  586. * @buf: pointer to an empty buffer
  587. * @buflen: size of 'buf'
  588. *
  589. * The scratch buffer is used when decoding from an array of pages.
  590. * If an xdr_inline_decode() call spans across page boundaries, then
  591. * we copy the data into the scratch buffer in order to allow linear
  592. * access.
  593. */
  594. void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
  595. {
  596. xdr->scratch.iov_base = buf;
  597. xdr->scratch.iov_len = buflen;
  598. }
  599. EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
  600. static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
  601. {
  602. __be32 *p;
  603. void *cpdest = xdr->scratch.iov_base;
  604. size_t cplen = (char *)xdr->end - (char *)xdr->p;
  605. if (nbytes > xdr->scratch.iov_len)
  606. return NULL;
  607. memcpy(cpdest, xdr->p, cplen);
  608. cpdest += cplen;
  609. nbytes -= cplen;
  610. if (!xdr_set_next_buffer(xdr))
  611. return NULL;
  612. p = __xdr_inline_decode(xdr, nbytes);
  613. if (p == NULL)
  614. return NULL;
  615. memcpy(cpdest, p, nbytes);
  616. return xdr->scratch.iov_base;
  617. }
  618. /**
  619. * xdr_inline_decode - Retrieve XDR data to decode
  620. * @xdr: pointer to xdr_stream struct
  621. * @nbytes: number of bytes of data to decode
  622. *
  623. * Check if the input buffer is long enough to enable us to decode
  624. * 'nbytes' more bytes of data starting at the current position.
  625. * If so return the current pointer, then update the current
  626. * pointer position.
  627. */
  628. __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  629. {
  630. __be32 *p;
  631. if (nbytes == 0)
  632. return xdr->p;
  633. if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
  634. return NULL;
  635. p = __xdr_inline_decode(xdr, nbytes);
  636. if (p != NULL)
  637. return p;
  638. return xdr_copy_to_scratch(xdr, nbytes);
  639. }
  640. EXPORT_SYMBOL_GPL(xdr_inline_decode);
  641. static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
  642. {
  643. struct xdr_buf *buf = xdr->buf;
  644. struct kvec *iov;
  645. unsigned int nwords = XDR_QUADLEN(len);
  646. unsigned int cur = xdr_stream_pos(xdr);
  647. if (xdr->nwords == 0)
  648. return 0;
  649. if (nwords > xdr->nwords) {
  650. nwords = xdr->nwords;
  651. len = nwords << 2;
  652. }
  653. /* Realign pages to current pointer position */
  654. iov = buf->head;
  655. if (iov->iov_len > cur)
  656. xdr_shrink_bufhead(buf, iov->iov_len - cur);
  657. /* Truncate page data and move it into the tail */
  658. if (buf->page_len > len)
  659. xdr_shrink_pagelen(buf, buf->page_len - len);
  660. xdr->nwords = XDR_QUADLEN(buf->len - cur);
  661. return len;
  662. }
  663. /**
  664. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  665. * @xdr: pointer to xdr_stream struct
  666. * @len: number of bytes of page data
  667. *
  668. * Moves data beyond the current pointer position from the XDR head[] buffer
  669. * into the page list. Any data that lies beyond current position + "len"
  670. * bytes is moved into the XDR tail[].
  671. *
  672. * Returns the number of XDR encoded bytes now contained in the pages
  673. */
  674. unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  675. {
  676. struct xdr_buf *buf = xdr->buf;
  677. struct kvec *iov;
  678. unsigned int nwords;
  679. unsigned int end;
  680. unsigned int padding;
  681. len = xdr_align_pages(xdr, len);
  682. if (len == 0)
  683. return 0;
  684. nwords = XDR_QUADLEN(len);
  685. padding = (nwords << 2) - len;
  686. xdr->iov = iov = buf->tail;
  687. /* Compute remaining message length. */
  688. end = ((xdr->nwords - nwords) << 2) + padding;
  689. if (end > iov->iov_len)
  690. end = iov->iov_len;
  691. /*
  692. * Position current pointer at beginning of tail, and
  693. * set remaining message length.
  694. */
  695. xdr->p = (__be32 *)((char *)iov->iov_base + padding);
  696. xdr->end = (__be32 *)((char *)iov->iov_base + end);
  697. xdr->page_ptr = NULL;
  698. xdr->nwords = XDR_QUADLEN(end - padding);
  699. return len;
  700. }
  701. EXPORT_SYMBOL_GPL(xdr_read_pages);
  702. /**
  703. * xdr_enter_page - decode data from the XDR page
  704. * @xdr: pointer to xdr_stream struct
  705. * @len: number of bytes of page data
  706. *
  707. * Moves data beyond the current pointer position from the XDR head[] buffer
  708. * into the page list. Any data that lies beyond current position + "len"
  709. * bytes is moved into the XDR tail[]. The current pointer is then
  710. * repositioned at the beginning of the first XDR page.
  711. */
  712. void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
  713. {
  714. len = xdr_align_pages(xdr, len);
  715. /*
  716. * Position current pointer at beginning of tail, and
  717. * set remaining message length.
  718. */
  719. if (len != 0)
  720. xdr_set_page_base(xdr, 0, len);
  721. }
  722. EXPORT_SYMBOL_GPL(xdr_enter_page);
  723. static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  724. void
  725. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  726. {
  727. buf->head[0] = *iov;
  728. buf->tail[0] = empty_iov;
  729. buf->page_len = 0;
  730. buf->buflen = buf->len = iov->iov_len;
  731. }
  732. EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
  733. /* Sets subbuf to the portion of buf of length len beginning base bytes
  734. * from the start of buf. Returns -1 if base of length are out of bounds. */
  735. int
  736. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  737. unsigned int base, unsigned int len)
  738. {
  739. subbuf->buflen = subbuf->len = len;
  740. if (base < buf->head[0].iov_len) {
  741. subbuf->head[0].iov_base = buf->head[0].iov_base + base;
  742. subbuf->head[0].iov_len = min_t(unsigned int, len,
  743. buf->head[0].iov_len - base);
  744. len -= subbuf->head[0].iov_len;
  745. base = 0;
  746. } else {
  747. subbuf->head[0].iov_base = NULL;
  748. subbuf->head[0].iov_len = 0;
  749. base -= buf->head[0].iov_len;
  750. }
  751. if (base < buf->page_len) {
  752. subbuf->page_len = min(buf->page_len - base, len);
  753. base += buf->page_base;
  754. subbuf->page_base = base & ~PAGE_CACHE_MASK;
  755. subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
  756. len -= subbuf->page_len;
  757. base = 0;
  758. } else {
  759. base -= buf->page_len;
  760. subbuf->page_len = 0;
  761. }
  762. if (base < buf->tail[0].iov_len) {
  763. subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
  764. subbuf->tail[0].iov_len = min_t(unsigned int, len,
  765. buf->tail[0].iov_len - base);
  766. len -= subbuf->tail[0].iov_len;
  767. base = 0;
  768. } else {
  769. subbuf->tail[0].iov_base = NULL;
  770. subbuf->tail[0].iov_len = 0;
  771. base -= buf->tail[0].iov_len;
  772. }
  773. if (base || len)
  774. return -1;
  775. return 0;
  776. }
  777. EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
  778. static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  779. {
  780. unsigned int this_len;
  781. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  782. memcpy(obj, subbuf->head[0].iov_base, this_len);
  783. len -= this_len;
  784. obj += this_len;
  785. this_len = min_t(unsigned int, len, subbuf->page_len);
  786. if (this_len)
  787. _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
  788. len -= this_len;
  789. obj += this_len;
  790. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  791. memcpy(obj, subbuf->tail[0].iov_base, this_len);
  792. }
  793. /* obj is assumed to point to allocated memory of size at least len: */
  794. int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  795. {
  796. struct xdr_buf subbuf;
  797. int status;
  798. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  799. if (status != 0)
  800. return status;
  801. __read_bytes_from_xdr_buf(&subbuf, obj, len);
  802. return 0;
  803. }
  804. EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
  805. static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  806. {
  807. unsigned int this_len;
  808. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  809. memcpy(subbuf->head[0].iov_base, obj, this_len);
  810. len -= this_len;
  811. obj += this_len;
  812. this_len = min_t(unsigned int, len, subbuf->page_len);
  813. if (this_len)
  814. _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
  815. len -= this_len;
  816. obj += this_len;
  817. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  818. memcpy(subbuf->tail[0].iov_base, obj, this_len);
  819. }
  820. /* obj is assumed to point to allocated memory of size at least len: */
  821. int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  822. {
  823. struct xdr_buf subbuf;
  824. int status;
  825. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  826. if (status != 0)
  827. return status;
  828. __write_bytes_to_xdr_buf(&subbuf, obj, len);
  829. return 0;
  830. }
  831. EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
  832. int
  833. xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
  834. {
  835. __be32 raw;
  836. int status;
  837. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  838. if (status)
  839. return status;
  840. *obj = be32_to_cpu(raw);
  841. return 0;
  842. }
  843. EXPORT_SYMBOL_GPL(xdr_decode_word);
  844. int
  845. xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
  846. {
  847. __be32 raw = cpu_to_be32(obj);
  848. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  849. }
  850. EXPORT_SYMBOL_GPL(xdr_encode_word);
  851. /* If the netobj starting offset bytes from the start of xdr_buf is contained
  852. * entirely in the head or the tail, set object to point to it; otherwise
  853. * try to find space for it at the end of the tail, copy it there, and
  854. * set obj to point to it. */
  855. int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
  856. {
  857. struct xdr_buf subbuf;
  858. if (xdr_decode_word(buf, offset, &obj->len))
  859. return -EFAULT;
  860. if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
  861. return -EFAULT;
  862. /* Is the obj contained entirely in the head? */
  863. obj->data = subbuf.head[0].iov_base;
  864. if (subbuf.head[0].iov_len == obj->len)
  865. return 0;
  866. /* ..or is the obj contained entirely in the tail? */
  867. obj->data = subbuf.tail[0].iov_base;
  868. if (subbuf.tail[0].iov_len == obj->len)
  869. return 0;
  870. /* use end of tail as storage for obj:
  871. * (We don't copy to the beginning because then we'd have
  872. * to worry about doing a potentially overlapping copy.
  873. * This assumes the object is at most half the length of the
  874. * tail.) */
  875. if (obj->len > buf->buflen - buf->len)
  876. return -ENOMEM;
  877. if (buf->tail[0].iov_len != 0)
  878. obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  879. else
  880. obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
  881. __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
  882. return 0;
  883. }
  884. EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
  885. /* Returns 0 on success, or else a negative error code. */
  886. static int
  887. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  888. struct xdr_array2_desc *desc, int encode)
  889. {
  890. char *elem = NULL, *c;
  891. unsigned int copied = 0, todo, avail_here;
  892. struct page **ppages = NULL;
  893. int err;
  894. if (encode) {
  895. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  896. return -EINVAL;
  897. } else {
  898. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  899. desc->array_len > desc->array_maxlen ||
  900. (unsigned long) base + 4 + desc->array_len *
  901. desc->elem_size > buf->len)
  902. return -EINVAL;
  903. }
  904. base += 4;
  905. if (!desc->xcode)
  906. return 0;
  907. todo = desc->array_len * desc->elem_size;
  908. /* process head */
  909. if (todo && base < buf->head->iov_len) {
  910. c = buf->head->iov_base + base;
  911. avail_here = min_t(unsigned int, todo,
  912. buf->head->iov_len - base);
  913. todo -= avail_here;
  914. while (avail_here >= desc->elem_size) {
  915. err = desc->xcode(desc, c);
  916. if (err)
  917. goto out;
  918. c += desc->elem_size;
  919. avail_here -= desc->elem_size;
  920. }
  921. if (avail_here) {
  922. if (!elem) {
  923. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  924. err = -ENOMEM;
  925. if (!elem)
  926. goto out;
  927. }
  928. if (encode) {
  929. err = desc->xcode(desc, elem);
  930. if (err)
  931. goto out;
  932. memcpy(c, elem, avail_here);
  933. } else
  934. memcpy(elem, c, avail_here);
  935. copied = avail_here;
  936. }
  937. base = buf->head->iov_len; /* align to start of pages */
  938. }
  939. /* process pages array */
  940. base -= buf->head->iov_len;
  941. if (todo && base < buf->page_len) {
  942. unsigned int avail_page;
  943. avail_here = min(todo, buf->page_len - base);
  944. todo -= avail_here;
  945. base += buf->page_base;
  946. ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
  947. base &= ~PAGE_CACHE_MASK;
  948. avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
  949. avail_here);
  950. c = kmap(*ppages) + base;
  951. while (avail_here) {
  952. avail_here -= avail_page;
  953. if (copied || avail_page < desc->elem_size) {
  954. unsigned int l = min(avail_page,
  955. desc->elem_size - copied);
  956. if (!elem) {
  957. elem = kmalloc(desc->elem_size,
  958. GFP_KERNEL);
  959. err = -ENOMEM;
  960. if (!elem)
  961. goto out;
  962. }
  963. if (encode) {
  964. if (!copied) {
  965. err = desc->xcode(desc, elem);
  966. if (err)
  967. goto out;
  968. }
  969. memcpy(c, elem + copied, l);
  970. copied += l;
  971. if (copied == desc->elem_size)
  972. copied = 0;
  973. } else {
  974. memcpy(elem + copied, c, l);
  975. copied += l;
  976. if (copied == desc->elem_size) {
  977. err = desc->xcode(desc, elem);
  978. if (err)
  979. goto out;
  980. copied = 0;
  981. }
  982. }
  983. avail_page -= l;
  984. c += l;
  985. }
  986. while (avail_page >= desc->elem_size) {
  987. err = desc->xcode(desc, c);
  988. if (err)
  989. goto out;
  990. c += desc->elem_size;
  991. avail_page -= desc->elem_size;
  992. }
  993. if (avail_page) {
  994. unsigned int l = min(avail_page,
  995. desc->elem_size - copied);
  996. if (!elem) {
  997. elem = kmalloc(desc->elem_size,
  998. GFP_KERNEL);
  999. err = -ENOMEM;
  1000. if (!elem)
  1001. goto out;
  1002. }
  1003. if (encode) {
  1004. if (!copied) {
  1005. err = desc->xcode(desc, elem);
  1006. if (err)
  1007. goto out;
  1008. }
  1009. memcpy(c, elem + copied, l);
  1010. copied += l;
  1011. if (copied == desc->elem_size)
  1012. copied = 0;
  1013. } else {
  1014. memcpy(elem + copied, c, l);
  1015. copied += l;
  1016. if (copied == desc->elem_size) {
  1017. err = desc->xcode(desc, elem);
  1018. if (err)
  1019. goto out;
  1020. copied = 0;
  1021. }
  1022. }
  1023. }
  1024. if (avail_here) {
  1025. kunmap(*ppages);
  1026. ppages++;
  1027. c = kmap(*ppages);
  1028. }
  1029. avail_page = min(avail_here,
  1030. (unsigned int) PAGE_CACHE_SIZE);
  1031. }
  1032. base = buf->page_len; /* align to start of tail */
  1033. }
  1034. /* process tail */
  1035. base -= buf->page_len;
  1036. if (todo) {
  1037. c = buf->tail->iov_base + base;
  1038. if (copied) {
  1039. unsigned int l = desc->elem_size - copied;
  1040. if (encode)
  1041. memcpy(c, elem + copied, l);
  1042. else {
  1043. memcpy(elem + copied, c, l);
  1044. err = desc->xcode(desc, elem);
  1045. if (err)
  1046. goto out;
  1047. }
  1048. todo -= l;
  1049. c += l;
  1050. }
  1051. while (todo) {
  1052. err = desc->xcode(desc, c);
  1053. if (err)
  1054. goto out;
  1055. c += desc->elem_size;
  1056. todo -= desc->elem_size;
  1057. }
  1058. }
  1059. err = 0;
  1060. out:
  1061. kfree(elem);
  1062. if (ppages)
  1063. kunmap(*ppages);
  1064. return err;
  1065. }
  1066. int
  1067. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  1068. struct xdr_array2_desc *desc)
  1069. {
  1070. if (base >= buf->len)
  1071. return -EINVAL;
  1072. return xdr_xcode_array2(buf, base, desc, 0);
  1073. }
  1074. EXPORT_SYMBOL_GPL(xdr_decode_array2);
  1075. int
  1076. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  1077. struct xdr_array2_desc *desc)
  1078. {
  1079. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  1080. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  1081. return -EINVAL;
  1082. return xdr_xcode_array2(buf, base, desc, 1);
  1083. }
  1084. EXPORT_SYMBOL_GPL(xdr_encode_array2);
  1085. int
  1086. xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
  1087. int (*actor)(struct scatterlist *, void *), void *data)
  1088. {
  1089. int i, ret = 0;
  1090. unsigned int page_len, thislen, page_offset;
  1091. struct scatterlist sg[1];
  1092. sg_init_table(sg, 1);
  1093. if (offset >= buf->head[0].iov_len) {
  1094. offset -= buf->head[0].iov_len;
  1095. } else {
  1096. thislen = buf->head[0].iov_len - offset;
  1097. if (thislen > len)
  1098. thislen = len;
  1099. sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
  1100. ret = actor(sg, data);
  1101. if (ret)
  1102. goto out;
  1103. offset = 0;
  1104. len -= thislen;
  1105. }
  1106. if (len == 0)
  1107. goto out;
  1108. if (offset >= buf->page_len) {
  1109. offset -= buf->page_len;
  1110. } else {
  1111. page_len = buf->page_len - offset;
  1112. if (page_len > len)
  1113. page_len = len;
  1114. len -= page_len;
  1115. page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
  1116. i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
  1117. thislen = PAGE_CACHE_SIZE - page_offset;
  1118. do {
  1119. if (thislen > page_len)
  1120. thislen = page_len;
  1121. sg_set_page(sg, buf->pages[i], thislen, page_offset);
  1122. ret = actor(sg, data);
  1123. if (ret)
  1124. goto out;
  1125. page_len -= thislen;
  1126. i++;
  1127. page_offset = 0;
  1128. thislen = PAGE_CACHE_SIZE;
  1129. } while (page_len != 0);
  1130. offset = 0;
  1131. }
  1132. if (len == 0)
  1133. goto out;
  1134. if (offset < buf->tail[0].iov_len) {
  1135. thislen = buf->tail[0].iov_len - offset;
  1136. if (thislen > len)
  1137. thislen = len;
  1138. sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
  1139. ret = actor(sg, data);
  1140. len -= thislen;
  1141. }
  1142. if (len != 0)
  1143. ret = -EINVAL;
  1144. out:
  1145. return ret;
  1146. }
  1147. EXPORT_SYMBOL_GPL(xdr_process_buf);