xdr.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330
  1. /*
  2. * linux/net/sunrpc/xdr.c
  3. *
  4. * Generic XDR support.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/types.h>
  11. #include <linux/string.h>
  12. #include <linux/kernel.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/errno.h>
  15. #include <linux/sunrpc/xdr.h>
  16. #include <linux/sunrpc/msg_prot.h>
  17. /*
  18. * XDR functions for basic NFS types
  19. */
  20. __be32 *
  21. xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  22. {
  23. unsigned int quadlen = XDR_QUADLEN(obj->len);
  24. p[quadlen] = 0; /* zero trailing bytes */
  25. *p++ = cpu_to_be32(obj->len);
  26. memcpy(p, obj->data, obj->len);
  27. return p + XDR_QUADLEN(obj->len);
  28. }
  29. EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  30. __be32 *
  31. xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  32. {
  33. unsigned int len;
  34. if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  35. return NULL;
  36. obj->len = len;
  37. obj->data = (u8 *) p;
  38. return p + XDR_QUADLEN(len);
  39. }
  40. EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  41. /**
  42. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  43. * @p: pointer to current position in XDR buffer.
  44. * @ptr: pointer to data to encode (or NULL)
  45. * @nbytes: size of data.
  46. *
  47. * Copy the array of data of length nbytes at ptr to the XDR buffer
  48. * at position p, then align to the next 32-bit boundary by padding
  49. * with zero bytes (see RFC1832).
  50. * Note: if ptr is NULL, only the padding is performed.
  51. *
  52. * Returns the updated current XDR buffer position
  53. *
  54. */
  55. __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  56. {
  57. if (likely(nbytes != 0)) {
  58. unsigned int quadlen = XDR_QUADLEN(nbytes);
  59. unsigned int padding = (quadlen << 2) - nbytes;
  60. if (ptr != NULL)
  61. memcpy(p, ptr, nbytes);
  62. if (padding != 0)
  63. memset((char *)p + nbytes, 0, padding);
  64. p += quadlen;
  65. }
  66. return p;
  67. }
  68. EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  69. /**
  70. * xdr_encode_opaque - Encode variable length opaque data
  71. * @p: pointer to current position in XDR buffer.
  72. * @ptr: pointer to data to encode (or NULL)
  73. * @nbytes: size of data.
  74. *
  75. * Returns the updated current XDR buffer position
  76. */
  77. __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  78. {
  79. *p++ = cpu_to_be32(nbytes);
  80. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  81. }
  82. EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  83. __be32 *
  84. xdr_encode_string(__be32 *p, const char *string)
  85. {
  86. return xdr_encode_array(p, string, strlen(string));
  87. }
  88. EXPORT_SYMBOL_GPL(xdr_encode_string);
  89. __be32 *
  90. xdr_decode_string_inplace(__be32 *p, char **sp,
  91. unsigned int *lenp, unsigned int maxlen)
  92. {
  93. u32 len;
  94. len = be32_to_cpu(*p++);
  95. if (len > maxlen)
  96. return NULL;
  97. *lenp = len;
  98. *sp = (char *) p;
  99. return p + XDR_QUADLEN(len);
  100. }
  101. EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
  102. /**
  103. * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
  104. * @buf: XDR buffer where string resides
  105. * @len: length of string, in bytes
  106. *
  107. */
  108. void
  109. xdr_terminate_string(struct xdr_buf *buf, const u32 len)
  110. {
  111. char *kaddr;
  112. kaddr = kmap_atomic(buf->pages[0]);
  113. kaddr[buf->page_base + len] = '\0';
  114. kunmap_atomic(kaddr);
  115. }
  116. EXPORT_SYMBOL_GPL(xdr_terminate_string);
  117. void
  118. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  119. struct page **pages, unsigned int base, unsigned int len)
  120. {
  121. struct kvec *head = xdr->head;
  122. struct kvec *tail = xdr->tail;
  123. char *buf = (char *)head->iov_base;
  124. unsigned int buflen = head->iov_len;
  125. head->iov_len = offset;
  126. xdr->pages = pages;
  127. xdr->page_base = base;
  128. xdr->page_len = len;
  129. tail->iov_base = buf + offset;
  130. tail->iov_len = buflen - offset;
  131. xdr->buflen += len;
  132. }
  133. EXPORT_SYMBOL_GPL(xdr_inline_pages);
  134. /*
  135. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  136. */
  137. /**
  138. * _shift_data_right_pages
  139. * @pages: vector of pages containing both the source and dest memory area.
  140. * @pgto_base: page vector address of destination
  141. * @pgfrom_base: page vector address of source
  142. * @len: number of bytes to copy
  143. *
  144. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  145. * the same way:
  146. * if a memory area starts at byte 'base' in page 'pages[i]',
  147. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  148. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  149. * they point to may overlap.
  150. */
  151. static void
  152. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  153. size_t pgfrom_base, size_t len)
  154. {
  155. struct page **pgfrom, **pgto;
  156. char *vfrom, *vto;
  157. size_t copy;
  158. BUG_ON(pgto_base <= pgfrom_base);
  159. pgto_base += len;
  160. pgfrom_base += len;
  161. pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
  162. pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
  163. pgto_base &= ~PAGE_CACHE_MASK;
  164. pgfrom_base &= ~PAGE_CACHE_MASK;
  165. do {
  166. /* Are any pointers crossing a page boundary? */
  167. if (pgto_base == 0) {
  168. pgto_base = PAGE_CACHE_SIZE;
  169. pgto--;
  170. }
  171. if (pgfrom_base == 0) {
  172. pgfrom_base = PAGE_CACHE_SIZE;
  173. pgfrom--;
  174. }
  175. copy = len;
  176. if (copy > pgto_base)
  177. copy = pgto_base;
  178. if (copy > pgfrom_base)
  179. copy = pgfrom_base;
  180. pgto_base -= copy;
  181. pgfrom_base -= copy;
  182. vto = kmap_atomic(*pgto);
  183. vfrom = kmap_atomic(*pgfrom);
  184. memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
  185. flush_dcache_page(*pgto);
  186. kunmap_atomic(vfrom);
  187. kunmap_atomic(vto);
  188. } while ((len -= copy) != 0);
  189. }
  190. /**
  191. * _copy_to_pages
  192. * @pages: array of pages
  193. * @pgbase: page vector address of destination
  194. * @p: pointer to source data
  195. * @len: length
  196. *
  197. * Copies data from an arbitrary memory location into an array of pages
  198. * The copy is assumed to be non-overlapping.
  199. */
  200. static void
  201. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  202. {
  203. struct page **pgto;
  204. char *vto;
  205. size_t copy;
  206. pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
  207. pgbase &= ~PAGE_CACHE_MASK;
  208. for (;;) {
  209. copy = PAGE_CACHE_SIZE - pgbase;
  210. if (copy > len)
  211. copy = len;
  212. vto = kmap_atomic(*pgto);
  213. memcpy(vto + pgbase, p, copy);
  214. kunmap_atomic(vto);
  215. len -= copy;
  216. if (len == 0)
  217. break;
  218. pgbase += copy;
  219. if (pgbase == PAGE_CACHE_SIZE) {
  220. flush_dcache_page(*pgto);
  221. pgbase = 0;
  222. pgto++;
  223. }
  224. p += copy;
  225. }
  226. flush_dcache_page(*pgto);
  227. }
  228. /**
  229. * _copy_from_pages
  230. * @p: pointer to destination
  231. * @pages: array of pages
  232. * @pgbase: offset of source data
  233. * @len: length
  234. *
  235. * Copies data into an arbitrary memory location from an array of pages
  236. * The copy is assumed to be non-overlapping.
  237. */
  238. void
  239. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  240. {
  241. struct page **pgfrom;
  242. char *vfrom;
  243. size_t copy;
  244. pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
  245. pgbase &= ~PAGE_CACHE_MASK;
  246. do {
  247. copy = PAGE_CACHE_SIZE - pgbase;
  248. if (copy > len)
  249. copy = len;
  250. vfrom = kmap_atomic(*pgfrom);
  251. memcpy(p, vfrom + pgbase, copy);
  252. kunmap_atomic(vfrom);
  253. pgbase += copy;
  254. if (pgbase == PAGE_CACHE_SIZE) {
  255. pgbase = 0;
  256. pgfrom++;
  257. }
  258. p += copy;
  259. } while ((len -= copy) != 0);
  260. }
  261. EXPORT_SYMBOL_GPL(_copy_from_pages);
  262. /**
  263. * xdr_shrink_bufhead
  264. * @buf: xdr_buf
  265. * @len: bytes to remove from buf->head[0]
  266. *
  267. * Shrinks XDR buffer's header kvec buf->head[0] by
  268. * 'len' bytes. The extra data is not lost, but is instead
  269. * moved into the inlined pages and/or the tail.
  270. */
  271. static void
  272. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  273. {
  274. struct kvec *head, *tail;
  275. size_t copy, offs;
  276. unsigned int pglen = buf->page_len;
  277. tail = buf->tail;
  278. head = buf->head;
  279. WARN_ON_ONCE(len > head->iov_len);
  280. if (len > head->iov_len)
  281. len = head->iov_len;
  282. /* Shift the tail first */
  283. if (tail->iov_len != 0) {
  284. if (tail->iov_len > len) {
  285. copy = tail->iov_len - len;
  286. memmove((char *)tail->iov_base + len,
  287. tail->iov_base, copy);
  288. }
  289. /* Copy from the inlined pages into the tail */
  290. copy = len;
  291. if (copy > pglen)
  292. copy = pglen;
  293. offs = len - copy;
  294. if (offs >= tail->iov_len)
  295. copy = 0;
  296. else if (copy > tail->iov_len - offs)
  297. copy = tail->iov_len - offs;
  298. if (copy != 0)
  299. _copy_from_pages((char *)tail->iov_base + offs,
  300. buf->pages,
  301. buf->page_base + pglen + offs - len,
  302. copy);
  303. /* Do we also need to copy data from the head into the tail ? */
  304. if (len > pglen) {
  305. offs = copy = len - pglen;
  306. if (copy > tail->iov_len)
  307. copy = tail->iov_len;
  308. memcpy(tail->iov_base,
  309. (char *)head->iov_base +
  310. head->iov_len - offs,
  311. copy);
  312. }
  313. }
  314. /* Now handle pages */
  315. if (pglen != 0) {
  316. if (pglen > len)
  317. _shift_data_right_pages(buf->pages,
  318. buf->page_base + len,
  319. buf->page_base,
  320. pglen - len);
  321. copy = len;
  322. if (len > pglen)
  323. copy = pglen;
  324. _copy_to_pages(buf->pages, buf->page_base,
  325. (char *)head->iov_base + head->iov_len - len,
  326. copy);
  327. }
  328. head->iov_len -= len;
  329. buf->buflen -= len;
  330. /* Have we truncated the message? */
  331. if (buf->len > buf->buflen)
  332. buf->len = buf->buflen;
  333. }
  334. /**
  335. * xdr_shrink_pagelen
  336. * @buf: xdr_buf
  337. * @len: bytes to remove from buf->pages
  338. *
  339. * Shrinks XDR buffer's page array buf->pages by
  340. * 'len' bytes. The extra data is not lost, but is instead
  341. * moved into the tail.
  342. */
  343. static void
  344. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  345. {
  346. struct kvec *tail;
  347. size_t copy;
  348. unsigned int pglen = buf->page_len;
  349. unsigned int tailbuf_len;
  350. tail = buf->tail;
  351. BUG_ON (len > pglen);
  352. tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
  353. /* Shift the tail first */
  354. if (tailbuf_len != 0) {
  355. unsigned int free_space = tailbuf_len - tail->iov_len;
  356. if (len < free_space)
  357. free_space = len;
  358. tail->iov_len += free_space;
  359. copy = len;
  360. if (tail->iov_len > len) {
  361. char *p = (char *)tail->iov_base + len;
  362. memmove(p, tail->iov_base, tail->iov_len - len);
  363. } else
  364. copy = tail->iov_len;
  365. /* Copy from the inlined pages into the tail */
  366. _copy_from_pages((char *)tail->iov_base,
  367. buf->pages, buf->page_base + pglen - len,
  368. copy);
  369. }
  370. buf->page_len -= len;
  371. buf->buflen -= len;
  372. /* Have we truncated the message? */
  373. if (buf->len > buf->buflen)
  374. buf->len = buf->buflen;
  375. }
  376. void
  377. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  378. {
  379. xdr_shrink_bufhead(buf, len);
  380. }
  381. EXPORT_SYMBOL_GPL(xdr_shift_buf);
  382. /**
  383. * xdr_stream_pos - Return the current offset from the start of the xdr_stream
  384. * @xdr: pointer to struct xdr_stream
  385. */
  386. unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
  387. {
  388. return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
  389. }
  390. EXPORT_SYMBOL_GPL(xdr_stream_pos);
  391. /**
  392. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  393. * @xdr: pointer to xdr_stream struct
  394. * @buf: pointer to XDR buffer in which to encode data
  395. * @p: current pointer inside XDR buffer
  396. *
  397. * Note: at the moment the RPC client only passes the length of our
  398. * scratch buffer in the xdr_buf's header kvec. Previously this
  399. * meant we needed to call xdr_adjust_iovec() after encoding the
  400. * data. With the new scheme, the xdr_stream manages the details
  401. * of the buffer length, and takes care of adjusting the kvec
  402. * length for us.
  403. */
  404. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  405. {
  406. struct kvec *iov = buf->head;
  407. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  408. BUG_ON(scratch_len < 0);
  409. xdr->buf = buf;
  410. xdr->iov = iov;
  411. xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
  412. xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
  413. BUG_ON(iov->iov_len > scratch_len);
  414. if (p != xdr->p && p != NULL) {
  415. size_t len;
  416. BUG_ON(p < xdr->p || p > xdr->end);
  417. len = (char *)p - (char *)xdr->p;
  418. xdr->p = p;
  419. buf->len += len;
  420. iov->iov_len += len;
  421. }
  422. }
  423. EXPORT_SYMBOL_GPL(xdr_init_encode);
  424. /**
  425. * xdr_reserve_space - Reserve buffer space for sending
  426. * @xdr: pointer to xdr_stream
  427. * @nbytes: number of bytes to reserve
  428. *
  429. * Checks that we have enough buffer space to encode 'nbytes' more
  430. * bytes of data. If so, update the total xdr_buf length, and
  431. * adjust the length of the current kvec.
  432. */
  433. __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  434. {
  435. __be32 *p = xdr->p;
  436. __be32 *q;
  437. /* align nbytes on the next 32-bit boundary */
  438. nbytes += 3;
  439. nbytes &= ~3;
  440. q = p + (nbytes >> 2);
  441. if (unlikely(q > xdr->end || q < p))
  442. return NULL;
  443. xdr->p = q;
  444. xdr->iov->iov_len += nbytes;
  445. xdr->buf->len += nbytes;
  446. return p;
  447. }
  448. EXPORT_SYMBOL_GPL(xdr_reserve_space);
  449. /**
  450. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  451. * @xdr: pointer to xdr_stream
  452. * @pages: list of pages
  453. * @base: offset of first byte
  454. * @len: length of data in bytes
  455. *
  456. */
  457. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  458. unsigned int len)
  459. {
  460. struct xdr_buf *buf = xdr->buf;
  461. struct kvec *iov = buf->tail;
  462. buf->pages = pages;
  463. buf->page_base = base;
  464. buf->page_len = len;
  465. iov->iov_base = (char *)xdr->p;
  466. iov->iov_len = 0;
  467. xdr->iov = iov;
  468. if (len & 3) {
  469. unsigned int pad = 4 - (len & 3);
  470. BUG_ON(xdr->p >= xdr->end);
  471. iov->iov_base = (char *)xdr->p + (len & 3);
  472. iov->iov_len += pad;
  473. len += pad;
  474. *xdr->p++ = 0;
  475. }
  476. buf->buflen += len;
  477. buf->len += len;
  478. }
  479. EXPORT_SYMBOL_GPL(xdr_write_pages);
  480. static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
  481. unsigned int len)
  482. {
  483. if (len > iov->iov_len)
  484. len = iov->iov_len;
  485. xdr->p = (__be32*)iov->iov_base;
  486. xdr->end = (__be32*)(iov->iov_base + len);
  487. xdr->iov = iov;
  488. xdr->page_ptr = NULL;
  489. }
  490. static int xdr_set_page_base(struct xdr_stream *xdr,
  491. unsigned int base, unsigned int len)
  492. {
  493. unsigned int pgnr;
  494. unsigned int maxlen;
  495. unsigned int pgoff;
  496. unsigned int pgend;
  497. void *kaddr;
  498. maxlen = xdr->buf->page_len;
  499. if (base >= maxlen)
  500. return -EINVAL;
  501. maxlen -= base;
  502. if (len > maxlen)
  503. len = maxlen;
  504. base += xdr->buf->page_base;
  505. pgnr = base >> PAGE_SHIFT;
  506. xdr->page_ptr = &xdr->buf->pages[pgnr];
  507. kaddr = page_address(*xdr->page_ptr);
  508. pgoff = base & ~PAGE_MASK;
  509. xdr->p = (__be32*)(kaddr + pgoff);
  510. pgend = pgoff + len;
  511. if (pgend > PAGE_SIZE)
  512. pgend = PAGE_SIZE;
  513. xdr->end = (__be32*)(kaddr + pgend);
  514. xdr->iov = NULL;
  515. return 0;
  516. }
  517. static void xdr_set_next_page(struct xdr_stream *xdr)
  518. {
  519. unsigned int newbase;
  520. newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
  521. newbase -= xdr->buf->page_base;
  522. if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
  523. xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
  524. }
  525. static bool xdr_set_next_buffer(struct xdr_stream *xdr)
  526. {
  527. if (xdr->page_ptr != NULL)
  528. xdr_set_next_page(xdr);
  529. else if (xdr->iov == xdr->buf->head) {
  530. if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
  531. xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
  532. }
  533. return xdr->p != xdr->end;
  534. }
  535. /**
  536. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  537. * @xdr: pointer to xdr_stream struct
  538. * @buf: pointer to XDR buffer from which to decode data
  539. * @p: current pointer inside XDR buffer
  540. */
  541. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  542. {
  543. xdr->buf = buf;
  544. xdr->scratch.iov_base = NULL;
  545. xdr->scratch.iov_len = 0;
  546. xdr->nwords = XDR_QUADLEN(buf->len);
  547. if (buf->head[0].iov_len != 0)
  548. xdr_set_iov(xdr, buf->head, buf->len);
  549. else if (buf->page_len != 0)
  550. xdr_set_page_base(xdr, 0, buf->len);
  551. if (p != NULL && p > xdr->p && xdr->end >= p) {
  552. xdr->nwords -= p - xdr->p;
  553. xdr->p = p;
  554. }
  555. }
  556. EXPORT_SYMBOL_GPL(xdr_init_decode);
  557. /**
  558. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  559. * @xdr: pointer to xdr_stream struct
  560. * @buf: pointer to XDR buffer from which to decode data
  561. * @pages: list of pages to decode into
  562. * @len: length in bytes of buffer in pages
  563. */
  564. void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
  565. struct page **pages, unsigned int len)
  566. {
  567. memset(buf, 0, sizeof(*buf));
  568. buf->pages = pages;
  569. buf->page_len = len;
  570. buf->buflen = len;
  571. buf->len = len;
  572. xdr_init_decode(xdr, buf, NULL);
  573. }
  574. EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
  575. static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  576. {
  577. unsigned int nwords = XDR_QUADLEN(nbytes);
  578. __be32 *p = xdr->p;
  579. __be32 *q = p + nwords;
  580. if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
  581. return NULL;
  582. xdr->p = q;
  583. xdr->nwords -= nwords;
  584. return p;
  585. }
  586. /**
  587. * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
  588. * @xdr: pointer to xdr_stream struct
  589. * @buf: pointer to an empty buffer
  590. * @buflen: size of 'buf'
  591. *
  592. * The scratch buffer is used when decoding from an array of pages.
  593. * If an xdr_inline_decode() call spans across page boundaries, then
  594. * we copy the data into the scratch buffer in order to allow linear
  595. * access.
  596. */
  597. void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
  598. {
  599. xdr->scratch.iov_base = buf;
  600. xdr->scratch.iov_len = buflen;
  601. }
  602. EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
  603. static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
  604. {
  605. __be32 *p;
  606. void *cpdest = xdr->scratch.iov_base;
  607. size_t cplen = (char *)xdr->end - (char *)xdr->p;
  608. if (nbytes > xdr->scratch.iov_len)
  609. return NULL;
  610. memcpy(cpdest, xdr->p, cplen);
  611. cpdest += cplen;
  612. nbytes -= cplen;
  613. if (!xdr_set_next_buffer(xdr))
  614. return NULL;
  615. p = __xdr_inline_decode(xdr, nbytes);
  616. if (p == NULL)
  617. return NULL;
  618. memcpy(cpdest, p, nbytes);
  619. return xdr->scratch.iov_base;
  620. }
  621. /**
  622. * xdr_inline_decode - Retrieve XDR data to decode
  623. * @xdr: pointer to xdr_stream struct
  624. * @nbytes: number of bytes of data to decode
  625. *
  626. * Check if the input buffer is long enough to enable us to decode
  627. * 'nbytes' more bytes of data starting at the current position.
  628. * If so return the current pointer, then update the current
  629. * pointer position.
  630. */
  631. __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  632. {
  633. __be32 *p;
  634. if (nbytes == 0)
  635. return xdr->p;
  636. if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
  637. return NULL;
  638. p = __xdr_inline_decode(xdr, nbytes);
  639. if (p != NULL)
  640. return p;
  641. return xdr_copy_to_scratch(xdr, nbytes);
  642. }
  643. EXPORT_SYMBOL_GPL(xdr_inline_decode);
  644. static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
  645. {
  646. struct xdr_buf *buf = xdr->buf;
  647. struct kvec *iov;
  648. unsigned int nwords = XDR_QUADLEN(len);
  649. unsigned int cur = xdr_stream_pos(xdr);
  650. if (xdr->nwords == 0)
  651. return 0;
  652. /* Realign pages to current pointer position */
  653. iov = buf->head;
  654. if (iov->iov_len > cur) {
  655. xdr_shrink_bufhead(buf, iov->iov_len - cur);
  656. xdr->nwords = XDR_QUADLEN(buf->len - cur);
  657. }
  658. if (nwords > xdr->nwords) {
  659. nwords = xdr->nwords;
  660. len = nwords << 2;
  661. }
  662. if (buf->page_len <= len)
  663. len = buf->page_len;
  664. else if (nwords < xdr->nwords) {
  665. /* Truncate page data and move it into the tail */
  666. xdr_shrink_pagelen(buf, buf->page_len - len);
  667. xdr->nwords = XDR_QUADLEN(buf->len - cur);
  668. }
  669. return len;
  670. }
  671. /**
  672. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  673. * @xdr: pointer to xdr_stream struct
  674. * @len: number of bytes of page data
  675. *
  676. * Moves data beyond the current pointer position from the XDR head[] buffer
  677. * into the page list. Any data that lies beyond current position + "len"
  678. * bytes is moved into the XDR tail[].
  679. *
  680. * Returns the number of XDR encoded bytes now contained in the pages
  681. */
  682. unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  683. {
  684. struct xdr_buf *buf = xdr->buf;
  685. struct kvec *iov;
  686. unsigned int nwords;
  687. unsigned int end;
  688. unsigned int padding;
  689. len = xdr_align_pages(xdr, len);
  690. if (len == 0)
  691. return 0;
  692. nwords = XDR_QUADLEN(len);
  693. padding = (nwords << 2) - len;
  694. xdr->iov = iov = buf->tail;
  695. /* Compute remaining message length. */
  696. end = ((xdr->nwords - nwords) << 2) + padding;
  697. if (end > iov->iov_len)
  698. end = iov->iov_len;
  699. /*
  700. * Position current pointer at beginning of tail, and
  701. * set remaining message length.
  702. */
  703. xdr->p = (__be32 *)((char *)iov->iov_base + padding);
  704. xdr->end = (__be32 *)((char *)iov->iov_base + end);
  705. xdr->page_ptr = NULL;
  706. xdr->nwords = XDR_QUADLEN(end - padding);
  707. return len;
  708. }
  709. EXPORT_SYMBOL_GPL(xdr_read_pages);
  710. /**
  711. * xdr_enter_page - decode data from the XDR page
  712. * @xdr: pointer to xdr_stream struct
  713. * @len: number of bytes of page data
  714. *
  715. * Moves data beyond the current pointer position from the XDR head[] buffer
  716. * into the page list. Any data that lies beyond current position + "len"
  717. * bytes is moved into the XDR tail[]. The current pointer is then
  718. * repositioned at the beginning of the first XDR page.
  719. */
  720. void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
  721. {
  722. len = xdr_align_pages(xdr, len);
  723. /*
  724. * Position current pointer at beginning of tail, and
  725. * set remaining message length.
  726. */
  727. if (len != 0)
  728. xdr_set_page_base(xdr, 0, len);
  729. }
  730. EXPORT_SYMBOL_GPL(xdr_enter_page);
  731. static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  732. void
  733. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  734. {
  735. buf->head[0] = *iov;
  736. buf->tail[0] = empty_iov;
  737. buf->page_len = 0;
  738. buf->buflen = buf->len = iov->iov_len;
  739. }
  740. EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
  741. /* Sets subbuf to the portion of buf of length len beginning base bytes
  742. * from the start of buf. Returns -1 if base of length are out of bounds. */
  743. int
  744. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  745. unsigned int base, unsigned int len)
  746. {
  747. subbuf->buflen = subbuf->len = len;
  748. if (base < buf->head[0].iov_len) {
  749. subbuf->head[0].iov_base = buf->head[0].iov_base + base;
  750. subbuf->head[0].iov_len = min_t(unsigned int, len,
  751. buf->head[0].iov_len - base);
  752. len -= subbuf->head[0].iov_len;
  753. base = 0;
  754. } else {
  755. subbuf->head[0].iov_base = NULL;
  756. subbuf->head[0].iov_len = 0;
  757. base -= buf->head[0].iov_len;
  758. }
  759. if (base < buf->page_len) {
  760. subbuf->page_len = min(buf->page_len - base, len);
  761. base += buf->page_base;
  762. subbuf->page_base = base & ~PAGE_CACHE_MASK;
  763. subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
  764. len -= subbuf->page_len;
  765. base = 0;
  766. } else {
  767. base -= buf->page_len;
  768. subbuf->page_len = 0;
  769. }
  770. if (base < buf->tail[0].iov_len) {
  771. subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
  772. subbuf->tail[0].iov_len = min_t(unsigned int, len,
  773. buf->tail[0].iov_len - base);
  774. len -= subbuf->tail[0].iov_len;
  775. base = 0;
  776. } else {
  777. subbuf->tail[0].iov_base = NULL;
  778. subbuf->tail[0].iov_len = 0;
  779. base -= buf->tail[0].iov_len;
  780. }
  781. if (base || len)
  782. return -1;
  783. return 0;
  784. }
  785. EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
  786. /**
  787. * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
  788. * @buf: buf to be trimmed
  789. * @len: number of bytes to reduce "buf" by
  790. *
  791. * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
  792. * that it's possible that we'll trim less than that amount if the xdr_buf is
  793. * too small, or if (for instance) it's all in the head and the parser has
  794. * already read too far into it.
  795. */
  796. void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
  797. {
  798. size_t cur;
  799. unsigned int trim = len;
  800. if (buf->tail[0].iov_len) {
  801. cur = min_t(size_t, buf->tail[0].iov_len, trim);
  802. buf->tail[0].iov_len -= cur;
  803. trim -= cur;
  804. if (!trim)
  805. goto fix_len;
  806. }
  807. if (buf->page_len) {
  808. cur = min_t(unsigned int, buf->page_len, trim);
  809. buf->page_len -= cur;
  810. trim -= cur;
  811. if (!trim)
  812. goto fix_len;
  813. }
  814. if (buf->head[0].iov_len) {
  815. cur = min_t(size_t, buf->head[0].iov_len, trim);
  816. buf->head[0].iov_len -= cur;
  817. trim -= cur;
  818. }
  819. fix_len:
  820. buf->len -= (len - trim);
  821. }
  822. EXPORT_SYMBOL_GPL(xdr_buf_trim);
  823. static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  824. {
  825. unsigned int this_len;
  826. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  827. memcpy(obj, subbuf->head[0].iov_base, this_len);
  828. len -= this_len;
  829. obj += this_len;
  830. this_len = min_t(unsigned int, len, subbuf->page_len);
  831. if (this_len)
  832. _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
  833. len -= this_len;
  834. obj += this_len;
  835. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  836. memcpy(obj, subbuf->tail[0].iov_base, this_len);
  837. }
  838. /* obj is assumed to point to allocated memory of size at least len: */
  839. int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  840. {
  841. struct xdr_buf subbuf;
  842. int status;
  843. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  844. if (status != 0)
  845. return status;
  846. __read_bytes_from_xdr_buf(&subbuf, obj, len);
  847. return 0;
  848. }
  849. EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
  850. static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  851. {
  852. unsigned int this_len;
  853. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  854. memcpy(subbuf->head[0].iov_base, obj, this_len);
  855. len -= this_len;
  856. obj += this_len;
  857. this_len = min_t(unsigned int, len, subbuf->page_len);
  858. if (this_len)
  859. _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
  860. len -= this_len;
  861. obj += this_len;
  862. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  863. memcpy(subbuf->tail[0].iov_base, obj, this_len);
  864. }
  865. /* obj is assumed to point to allocated memory of size at least len: */
  866. int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  867. {
  868. struct xdr_buf subbuf;
  869. int status;
  870. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  871. if (status != 0)
  872. return status;
  873. __write_bytes_to_xdr_buf(&subbuf, obj, len);
  874. return 0;
  875. }
  876. EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
  877. int
  878. xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
  879. {
  880. __be32 raw;
  881. int status;
  882. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  883. if (status)
  884. return status;
  885. *obj = be32_to_cpu(raw);
  886. return 0;
  887. }
  888. EXPORT_SYMBOL_GPL(xdr_decode_word);
  889. int
  890. xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
  891. {
  892. __be32 raw = cpu_to_be32(obj);
  893. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  894. }
  895. EXPORT_SYMBOL_GPL(xdr_encode_word);
  896. /* If the netobj starting offset bytes from the start of xdr_buf is contained
  897. * entirely in the head or the tail, set object to point to it; otherwise
  898. * try to find space for it at the end of the tail, copy it there, and
  899. * set obj to point to it. */
  900. int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
  901. {
  902. struct xdr_buf subbuf;
  903. if (xdr_decode_word(buf, offset, &obj->len))
  904. return -EFAULT;
  905. if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
  906. return -EFAULT;
  907. /* Is the obj contained entirely in the head? */
  908. obj->data = subbuf.head[0].iov_base;
  909. if (subbuf.head[0].iov_len == obj->len)
  910. return 0;
  911. /* ..or is the obj contained entirely in the tail? */
  912. obj->data = subbuf.tail[0].iov_base;
  913. if (subbuf.tail[0].iov_len == obj->len)
  914. return 0;
  915. /* use end of tail as storage for obj:
  916. * (We don't copy to the beginning because then we'd have
  917. * to worry about doing a potentially overlapping copy.
  918. * This assumes the object is at most half the length of the
  919. * tail.) */
  920. if (obj->len > buf->buflen - buf->len)
  921. return -ENOMEM;
  922. if (buf->tail[0].iov_len != 0)
  923. obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  924. else
  925. obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
  926. __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
  927. return 0;
  928. }
  929. EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
  930. /* Returns 0 on success, or else a negative error code. */
  931. static int
  932. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  933. struct xdr_array2_desc *desc, int encode)
  934. {
  935. char *elem = NULL, *c;
  936. unsigned int copied = 0, todo, avail_here;
  937. struct page **ppages = NULL;
  938. int err;
  939. if (encode) {
  940. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  941. return -EINVAL;
  942. } else {
  943. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  944. desc->array_len > desc->array_maxlen ||
  945. (unsigned long) base + 4 + desc->array_len *
  946. desc->elem_size > buf->len)
  947. return -EINVAL;
  948. }
  949. base += 4;
  950. if (!desc->xcode)
  951. return 0;
  952. todo = desc->array_len * desc->elem_size;
  953. /* process head */
  954. if (todo && base < buf->head->iov_len) {
  955. c = buf->head->iov_base + base;
  956. avail_here = min_t(unsigned int, todo,
  957. buf->head->iov_len - base);
  958. todo -= avail_here;
  959. while (avail_here >= desc->elem_size) {
  960. err = desc->xcode(desc, c);
  961. if (err)
  962. goto out;
  963. c += desc->elem_size;
  964. avail_here -= desc->elem_size;
  965. }
  966. if (avail_here) {
  967. if (!elem) {
  968. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  969. err = -ENOMEM;
  970. if (!elem)
  971. goto out;
  972. }
  973. if (encode) {
  974. err = desc->xcode(desc, elem);
  975. if (err)
  976. goto out;
  977. memcpy(c, elem, avail_here);
  978. } else
  979. memcpy(elem, c, avail_here);
  980. copied = avail_here;
  981. }
  982. base = buf->head->iov_len; /* align to start of pages */
  983. }
  984. /* process pages array */
  985. base -= buf->head->iov_len;
  986. if (todo && base < buf->page_len) {
  987. unsigned int avail_page;
  988. avail_here = min(todo, buf->page_len - base);
  989. todo -= avail_here;
  990. base += buf->page_base;
  991. ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
  992. base &= ~PAGE_CACHE_MASK;
  993. avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
  994. avail_here);
  995. c = kmap(*ppages) + base;
  996. while (avail_here) {
  997. avail_here -= avail_page;
  998. if (copied || avail_page < desc->elem_size) {
  999. unsigned int l = min(avail_page,
  1000. desc->elem_size - copied);
  1001. if (!elem) {
  1002. elem = kmalloc(desc->elem_size,
  1003. GFP_KERNEL);
  1004. err = -ENOMEM;
  1005. if (!elem)
  1006. goto out;
  1007. }
  1008. if (encode) {
  1009. if (!copied) {
  1010. err = desc->xcode(desc, elem);
  1011. if (err)
  1012. goto out;
  1013. }
  1014. memcpy(c, elem + copied, l);
  1015. copied += l;
  1016. if (copied == desc->elem_size)
  1017. copied = 0;
  1018. } else {
  1019. memcpy(elem + copied, c, l);
  1020. copied += l;
  1021. if (copied == desc->elem_size) {
  1022. err = desc->xcode(desc, elem);
  1023. if (err)
  1024. goto out;
  1025. copied = 0;
  1026. }
  1027. }
  1028. avail_page -= l;
  1029. c += l;
  1030. }
  1031. while (avail_page >= desc->elem_size) {
  1032. err = desc->xcode(desc, c);
  1033. if (err)
  1034. goto out;
  1035. c += desc->elem_size;
  1036. avail_page -= desc->elem_size;
  1037. }
  1038. if (avail_page) {
  1039. unsigned int l = min(avail_page,
  1040. desc->elem_size - copied);
  1041. if (!elem) {
  1042. elem = kmalloc(desc->elem_size,
  1043. GFP_KERNEL);
  1044. err = -ENOMEM;
  1045. if (!elem)
  1046. goto out;
  1047. }
  1048. if (encode) {
  1049. if (!copied) {
  1050. err = desc->xcode(desc, elem);
  1051. if (err)
  1052. goto out;
  1053. }
  1054. memcpy(c, elem + copied, l);
  1055. copied += l;
  1056. if (copied == desc->elem_size)
  1057. copied = 0;
  1058. } else {
  1059. memcpy(elem + copied, c, l);
  1060. copied += l;
  1061. if (copied == desc->elem_size) {
  1062. err = desc->xcode(desc, elem);
  1063. if (err)
  1064. goto out;
  1065. copied = 0;
  1066. }
  1067. }
  1068. }
  1069. if (avail_here) {
  1070. kunmap(*ppages);
  1071. ppages++;
  1072. c = kmap(*ppages);
  1073. }
  1074. avail_page = min(avail_here,
  1075. (unsigned int) PAGE_CACHE_SIZE);
  1076. }
  1077. base = buf->page_len; /* align to start of tail */
  1078. }
  1079. /* process tail */
  1080. base -= buf->page_len;
  1081. if (todo) {
  1082. c = buf->tail->iov_base + base;
  1083. if (copied) {
  1084. unsigned int l = desc->elem_size - copied;
  1085. if (encode)
  1086. memcpy(c, elem + copied, l);
  1087. else {
  1088. memcpy(elem + copied, c, l);
  1089. err = desc->xcode(desc, elem);
  1090. if (err)
  1091. goto out;
  1092. }
  1093. todo -= l;
  1094. c += l;
  1095. }
  1096. while (todo) {
  1097. err = desc->xcode(desc, c);
  1098. if (err)
  1099. goto out;
  1100. c += desc->elem_size;
  1101. todo -= desc->elem_size;
  1102. }
  1103. }
  1104. err = 0;
  1105. out:
  1106. kfree(elem);
  1107. if (ppages)
  1108. kunmap(*ppages);
  1109. return err;
  1110. }
  1111. int
  1112. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  1113. struct xdr_array2_desc *desc)
  1114. {
  1115. if (base >= buf->len)
  1116. return -EINVAL;
  1117. return xdr_xcode_array2(buf, base, desc, 0);
  1118. }
  1119. EXPORT_SYMBOL_GPL(xdr_decode_array2);
  1120. int
  1121. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  1122. struct xdr_array2_desc *desc)
  1123. {
  1124. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  1125. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  1126. return -EINVAL;
  1127. return xdr_xcode_array2(buf, base, desc, 1);
  1128. }
  1129. EXPORT_SYMBOL_GPL(xdr_encode_array2);
  1130. int
  1131. xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
  1132. int (*actor)(struct scatterlist *, void *), void *data)
  1133. {
  1134. int i, ret = 0;
  1135. unsigned int page_len, thislen, page_offset;
  1136. struct scatterlist sg[1];
  1137. sg_init_table(sg, 1);
  1138. if (offset >= buf->head[0].iov_len) {
  1139. offset -= buf->head[0].iov_len;
  1140. } else {
  1141. thislen = buf->head[0].iov_len - offset;
  1142. if (thislen > len)
  1143. thislen = len;
  1144. sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
  1145. ret = actor(sg, data);
  1146. if (ret)
  1147. goto out;
  1148. offset = 0;
  1149. len -= thislen;
  1150. }
  1151. if (len == 0)
  1152. goto out;
  1153. if (offset >= buf->page_len) {
  1154. offset -= buf->page_len;
  1155. } else {
  1156. page_len = buf->page_len - offset;
  1157. if (page_len > len)
  1158. page_len = len;
  1159. len -= page_len;
  1160. page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
  1161. i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
  1162. thislen = PAGE_CACHE_SIZE - page_offset;
  1163. do {
  1164. if (thislen > page_len)
  1165. thislen = page_len;
  1166. sg_set_page(sg, buf->pages[i], thislen, page_offset);
  1167. ret = actor(sg, data);
  1168. if (ret)
  1169. goto out;
  1170. page_len -= thislen;
  1171. i++;
  1172. page_offset = 0;
  1173. thislen = PAGE_CACHE_SIZE;
  1174. } while (page_len != 0);
  1175. offset = 0;
  1176. }
  1177. if (len == 0)
  1178. goto out;
  1179. if (offset < buf->tail[0].iov_len) {
  1180. thislen = buf->tail[0].iov_len - offset;
  1181. if (thislen > len)
  1182. thislen = len;
  1183. sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
  1184. ret = actor(sg, data);
  1185. len -= thislen;
  1186. }
  1187. if (len != 0)
  1188. ret = -EINVAL;
  1189. out:
  1190. return ret;
  1191. }
  1192. EXPORT_SYMBOL_GPL(xdr_process_buf);