xdr.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295
  1. /*
  2. * linux/net/sunrpc/xdr.c
  3. *
  4. * Generic XDR support.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/types.h>
  11. #include <linux/string.h>
  12. #include <linux/kernel.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/errno.h>
  15. #include <linux/sunrpc/xdr.h>
  16. #include <linux/sunrpc/msg_prot.h>
  17. /*
  18. * XDR functions for basic NFS types
  19. */
  20. __be32 *
  21. xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  22. {
  23. unsigned int quadlen = XDR_QUADLEN(obj->len);
  24. p[quadlen] = 0; /* zero trailing bytes */
  25. *p++ = cpu_to_be32(obj->len);
  26. memcpy(p, obj->data, obj->len);
  27. return p + XDR_QUADLEN(obj->len);
  28. }
  29. EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  30. __be32 *
  31. xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  32. {
  33. unsigned int len;
  34. if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  35. return NULL;
  36. obj->len = len;
  37. obj->data = (u8 *) p;
  38. return p + XDR_QUADLEN(len);
  39. }
  40. EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  41. /**
  42. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  43. * @p: pointer to current position in XDR buffer.
  44. * @ptr: pointer to data to encode (or NULL)
  45. * @nbytes: size of data.
  46. *
  47. * Copy the array of data of length nbytes at ptr to the XDR buffer
  48. * at position p, then align to the next 32-bit boundary by padding
  49. * with zero bytes (see RFC1832).
  50. * Note: if ptr is NULL, only the padding is performed.
  51. *
  52. * Returns the updated current XDR buffer position
  53. *
  54. */
  55. __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  56. {
  57. if (likely(nbytes != 0)) {
  58. unsigned int quadlen = XDR_QUADLEN(nbytes);
  59. unsigned int padding = (quadlen << 2) - nbytes;
  60. if (ptr != NULL)
  61. memcpy(p, ptr, nbytes);
  62. if (padding != 0)
  63. memset((char *)p + nbytes, 0, padding);
  64. p += quadlen;
  65. }
  66. return p;
  67. }
  68. EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  69. /**
  70. * xdr_encode_opaque - Encode variable length opaque data
  71. * @p: pointer to current position in XDR buffer.
  72. * @ptr: pointer to data to encode (or NULL)
  73. * @nbytes: size of data.
  74. *
  75. * Returns the updated current XDR buffer position
  76. */
  77. __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  78. {
  79. *p++ = cpu_to_be32(nbytes);
  80. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  81. }
  82. EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  83. __be32 *
  84. xdr_encode_string(__be32 *p, const char *string)
  85. {
  86. return xdr_encode_array(p, string, strlen(string));
  87. }
  88. EXPORT_SYMBOL_GPL(xdr_encode_string);
  89. __be32 *
  90. xdr_decode_string_inplace(__be32 *p, char **sp,
  91. unsigned int *lenp, unsigned int maxlen)
  92. {
  93. u32 len;
  94. len = be32_to_cpu(*p++);
  95. if (len > maxlen)
  96. return NULL;
  97. *lenp = len;
  98. *sp = (char *) p;
  99. return p + XDR_QUADLEN(len);
  100. }
  101. EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
  102. /**
  103. * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
  104. * @buf: XDR buffer where string resides
  105. * @len: length of string, in bytes
  106. *
  107. */
  108. void
  109. xdr_terminate_string(struct xdr_buf *buf, const u32 len)
  110. {
  111. char *kaddr;
  112. kaddr = kmap_atomic(buf->pages[0]);
  113. kaddr[buf->page_base + len] = '\0';
  114. kunmap_atomic(kaddr);
  115. }
  116. EXPORT_SYMBOL_GPL(xdr_terminate_string);
  117. void
  118. xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
  119. unsigned int len)
  120. {
  121. struct kvec *tail = xdr->tail;
  122. u32 *p;
  123. xdr->pages = pages;
  124. xdr->page_base = base;
  125. xdr->page_len = len;
  126. p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
  127. tail->iov_base = p;
  128. tail->iov_len = 0;
  129. if (len & 3) {
  130. unsigned int pad = 4 - (len & 3);
  131. *p = 0;
  132. tail->iov_base = (char *)p + (len & 3);
  133. tail->iov_len = pad;
  134. len += pad;
  135. }
  136. xdr->buflen += len;
  137. xdr->len += len;
  138. }
  139. EXPORT_SYMBOL_GPL(xdr_encode_pages);
  140. void
  141. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  142. struct page **pages, unsigned int base, unsigned int len)
  143. {
  144. struct kvec *head = xdr->head;
  145. struct kvec *tail = xdr->tail;
  146. char *buf = (char *)head->iov_base;
  147. unsigned int buflen = head->iov_len;
  148. head->iov_len = offset;
  149. xdr->pages = pages;
  150. xdr->page_base = base;
  151. xdr->page_len = len;
  152. tail->iov_base = buf + offset;
  153. tail->iov_len = buflen - offset;
  154. xdr->buflen += len;
  155. }
  156. EXPORT_SYMBOL_GPL(xdr_inline_pages);
  157. /*
  158. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  159. *
  160. * _shift_data_right_pages
  161. * @pages: vector of pages containing both the source and dest memory area.
  162. * @pgto_base: page vector address of destination
  163. * @pgfrom_base: page vector address of source
  164. * @len: number of bytes to copy
  165. *
  166. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  167. * the same way:
  168. * if a memory area starts at byte 'base' in page 'pages[i]',
  169. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  170. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  171. * they point to may overlap.
  172. */
  173. static void
  174. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  175. size_t pgfrom_base, size_t len)
  176. {
  177. struct page **pgfrom, **pgto;
  178. char *vfrom, *vto;
  179. size_t copy;
  180. BUG_ON(pgto_base <= pgfrom_base);
  181. pgto_base += len;
  182. pgfrom_base += len;
  183. pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
  184. pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
  185. pgto_base &= ~PAGE_CACHE_MASK;
  186. pgfrom_base &= ~PAGE_CACHE_MASK;
  187. do {
  188. /* Are any pointers crossing a page boundary? */
  189. if (pgto_base == 0) {
  190. pgto_base = PAGE_CACHE_SIZE;
  191. pgto--;
  192. }
  193. if (pgfrom_base == 0) {
  194. pgfrom_base = PAGE_CACHE_SIZE;
  195. pgfrom--;
  196. }
  197. copy = len;
  198. if (copy > pgto_base)
  199. copy = pgto_base;
  200. if (copy > pgfrom_base)
  201. copy = pgfrom_base;
  202. pgto_base -= copy;
  203. pgfrom_base -= copy;
  204. vto = kmap_atomic(*pgto);
  205. vfrom = kmap_atomic(*pgfrom);
  206. memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
  207. flush_dcache_page(*pgto);
  208. kunmap_atomic(vfrom);
  209. kunmap_atomic(vto);
  210. } while ((len -= copy) != 0);
  211. }
  212. /*
  213. * _copy_to_pages
  214. * @pages: array of pages
  215. * @pgbase: page vector address of destination
  216. * @p: pointer to source data
  217. * @len: length
  218. *
  219. * Copies data from an arbitrary memory location into an array of pages
  220. * The copy is assumed to be non-overlapping.
  221. */
  222. static void
  223. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  224. {
  225. struct page **pgto;
  226. char *vto;
  227. size_t copy;
  228. pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
  229. pgbase &= ~PAGE_CACHE_MASK;
  230. for (;;) {
  231. copy = PAGE_CACHE_SIZE - pgbase;
  232. if (copy > len)
  233. copy = len;
  234. vto = kmap_atomic(*pgto);
  235. memcpy(vto + pgbase, p, copy);
  236. kunmap_atomic(vto);
  237. len -= copy;
  238. if (len == 0)
  239. break;
  240. pgbase += copy;
  241. if (pgbase == PAGE_CACHE_SIZE) {
  242. flush_dcache_page(*pgto);
  243. pgbase = 0;
  244. pgto++;
  245. }
  246. p += copy;
  247. }
  248. flush_dcache_page(*pgto);
  249. }
  250. /*
  251. * _copy_from_pages
  252. * @p: pointer to destination
  253. * @pages: array of pages
  254. * @pgbase: offset of source data
  255. * @len: length
  256. *
  257. * Copies data into an arbitrary memory location from an array of pages
  258. * The copy is assumed to be non-overlapping.
  259. */
  260. void
  261. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  262. {
  263. struct page **pgfrom;
  264. char *vfrom;
  265. size_t copy;
  266. pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
  267. pgbase &= ~PAGE_CACHE_MASK;
  268. do {
  269. copy = PAGE_CACHE_SIZE - pgbase;
  270. if (copy > len)
  271. copy = len;
  272. vfrom = kmap_atomic(*pgfrom);
  273. memcpy(p, vfrom + pgbase, copy);
  274. kunmap_atomic(vfrom);
  275. pgbase += copy;
  276. if (pgbase == PAGE_CACHE_SIZE) {
  277. pgbase = 0;
  278. pgfrom++;
  279. }
  280. p += copy;
  281. } while ((len -= copy) != 0);
  282. }
  283. EXPORT_SYMBOL_GPL(_copy_from_pages);
  284. /*
  285. * xdr_shrink_bufhead
  286. * @buf: xdr_buf
  287. * @len: bytes to remove from buf->head[0]
  288. *
  289. * Shrinks XDR buffer's header kvec buf->head[0] by
  290. * 'len' bytes. The extra data is not lost, but is instead
  291. * moved into the inlined pages and/or the tail.
  292. */
  293. static void
  294. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  295. {
  296. struct kvec *head, *tail;
  297. size_t copy, offs;
  298. unsigned int pglen = buf->page_len;
  299. tail = buf->tail;
  300. head = buf->head;
  301. BUG_ON (len > head->iov_len);
  302. /* Shift the tail first */
  303. if (tail->iov_len != 0) {
  304. if (tail->iov_len > len) {
  305. copy = tail->iov_len - len;
  306. memmove((char *)tail->iov_base + len,
  307. tail->iov_base, copy);
  308. }
  309. /* Copy from the inlined pages into the tail */
  310. copy = len;
  311. if (copy > pglen)
  312. copy = pglen;
  313. offs = len - copy;
  314. if (offs >= tail->iov_len)
  315. copy = 0;
  316. else if (copy > tail->iov_len - offs)
  317. copy = tail->iov_len - offs;
  318. if (copy != 0)
  319. _copy_from_pages((char *)tail->iov_base + offs,
  320. buf->pages,
  321. buf->page_base + pglen + offs - len,
  322. copy);
  323. /* Do we also need to copy data from the head into the tail ? */
  324. if (len > pglen) {
  325. offs = copy = len - pglen;
  326. if (copy > tail->iov_len)
  327. copy = tail->iov_len;
  328. memcpy(tail->iov_base,
  329. (char *)head->iov_base +
  330. head->iov_len - offs,
  331. copy);
  332. }
  333. }
  334. /* Now handle pages */
  335. if (pglen != 0) {
  336. if (pglen > len)
  337. _shift_data_right_pages(buf->pages,
  338. buf->page_base + len,
  339. buf->page_base,
  340. pglen - len);
  341. copy = len;
  342. if (len > pglen)
  343. copy = pglen;
  344. _copy_to_pages(buf->pages, buf->page_base,
  345. (char *)head->iov_base + head->iov_len - len,
  346. copy);
  347. }
  348. head->iov_len -= len;
  349. buf->buflen -= len;
  350. /* Have we truncated the message? */
  351. if (buf->len > buf->buflen)
  352. buf->len = buf->buflen;
  353. }
  354. /*
  355. * xdr_shrink_pagelen
  356. * @buf: xdr_buf
  357. * @len: bytes to remove from buf->pages
  358. *
  359. * Shrinks XDR buffer's page array buf->pages by
  360. * 'len' bytes. The extra data is not lost, but is instead
  361. * moved into the tail.
  362. */
  363. static void
  364. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  365. {
  366. struct kvec *tail;
  367. size_t copy;
  368. unsigned int pglen = buf->page_len;
  369. unsigned int tailbuf_len;
  370. tail = buf->tail;
  371. BUG_ON (len > pglen);
  372. tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
  373. /* Shift the tail first */
  374. if (tailbuf_len != 0) {
  375. unsigned int free_space = tailbuf_len - tail->iov_len;
  376. if (len < free_space)
  377. free_space = len;
  378. tail->iov_len += free_space;
  379. copy = len;
  380. if (tail->iov_len > len) {
  381. char *p = (char *)tail->iov_base + len;
  382. memmove(p, tail->iov_base, tail->iov_len - len);
  383. } else
  384. copy = tail->iov_len;
  385. /* Copy from the inlined pages into the tail */
  386. _copy_from_pages((char *)tail->iov_base,
  387. buf->pages, buf->page_base + pglen - len,
  388. copy);
  389. }
  390. buf->page_len -= len;
  391. buf->buflen -= len;
  392. /* Have we truncated the message? */
  393. if (buf->len > buf->buflen)
  394. buf->len = buf->buflen;
  395. }
  396. void
  397. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  398. {
  399. xdr_shrink_bufhead(buf, len);
  400. }
  401. EXPORT_SYMBOL_GPL(xdr_shift_buf);
  402. /**
  403. * xdr_stream_pos - Return the current offset from the start of the xdr_stream
  404. * @xdr: pointer to struct xdr_stream
  405. */
  406. unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
  407. {
  408. return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
  409. }
  410. EXPORT_SYMBOL_GPL(xdr_stream_pos);
  411. /**
  412. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  413. * @xdr: pointer to xdr_stream struct
  414. * @buf: pointer to XDR buffer in which to encode data
  415. * @p: current pointer inside XDR buffer
  416. *
  417. * Note: at the moment the RPC client only passes the length of our
  418. * scratch buffer in the xdr_buf's header kvec. Previously this
  419. * meant we needed to call xdr_adjust_iovec() after encoding the
  420. * data. With the new scheme, the xdr_stream manages the details
  421. * of the buffer length, and takes care of adjusting the kvec
  422. * length for us.
  423. */
  424. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  425. {
  426. struct kvec *iov = buf->head;
  427. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  428. BUG_ON(scratch_len < 0);
  429. xdr->buf = buf;
  430. xdr->iov = iov;
  431. xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
  432. xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
  433. BUG_ON(iov->iov_len > scratch_len);
  434. if (p != xdr->p && p != NULL) {
  435. size_t len;
  436. BUG_ON(p < xdr->p || p > xdr->end);
  437. len = (char *)p - (char *)xdr->p;
  438. xdr->p = p;
  439. buf->len += len;
  440. iov->iov_len += len;
  441. }
  442. }
  443. EXPORT_SYMBOL_GPL(xdr_init_encode);
  444. /**
  445. * xdr_reserve_space - Reserve buffer space for sending
  446. * @xdr: pointer to xdr_stream
  447. * @nbytes: number of bytes to reserve
  448. *
  449. * Checks that we have enough buffer space to encode 'nbytes' more
  450. * bytes of data. If so, update the total xdr_buf length, and
  451. * adjust the length of the current kvec.
  452. */
  453. __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  454. {
  455. __be32 *p = xdr->p;
  456. __be32 *q;
  457. /* align nbytes on the next 32-bit boundary */
  458. nbytes += 3;
  459. nbytes &= ~3;
  460. q = p + (nbytes >> 2);
  461. if (unlikely(q > xdr->end || q < p))
  462. return NULL;
  463. xdr->p = q;
  464. xdr->iov->iov_len += nbytes;
  465. xdr->buf->len += nbytes;
  466. return p;
  467. }
  468. EXPORT_SYMBOL_GPL(xdr_reserve_space);
  469. /**
  470. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  471. * @xdr: pointer to xdr_stream
  472. * @pages: list of pages
  473. * @base: offset of first byte
  474. * @len: length of data in bytes
  475. *
  476. */
  477. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  478. unsigned int len)
  479. {
  480. struct xdr_buf *buf = xdr->buf;
  481. struct kvec *iov = buf->tail;
  482. buf->pages = pages;
  483. buf->page_base = base;
  484. buf->page_len = len;
  485. iov->iov_base = (char *)xdr->p;
  486. iov->iov_len = 0;
  487. xdr->iov = iov;
  488. if (len & 3) {
  489. unsigned int pad = 4 - (len & 3);
  490. BUG_ON(xdr->p >= xdr->end);
  491. iov->iov_base = (char *)xdr->p + (len & 3);
  492. iov->iov_len += pad;
  493. len += pad;
  494. *xdr->p++ = 0;
  495. }
  496. buf->buflen += len;
  497. buf->len += len;
  498. }
  499. EXPORT_SYMBOL_GPL(xdr_write_pages);
  500. static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
  501. unsigned int len)
  502. {
  503. if (len > iov->iov_len)
  504. len = iov->iov_len;
  505. xdr->p = (__be32*)iov->iov_base;
  506. xdr->end = (__be32*)(iov->iov_base + len);
  507. xdr->iov = iov;
  508. xdr->page_ptr = NULL;
  509. }
  510. static int xdr_set_page_base(struct xdr_stream *xdr,
  511. unsigned int base, unsigned int len)
  512. {
  513. unsigned int pgnr;
  514. unsigned int maxlen;
  515. unsigned int pgoff;
  516. unsigned int pgend;
  517. void *kaddr;
  518. maxlen = xdr->buf->page_len;
  519. if (base >= maxlen)
  520. return -EINVAL;
  521. maxlen -= base;
  522. if (len > maxlen)
  523. len = maxlen;
  524. base += xdr->buf->page_base;
  525. pgnr = base >> PAGE_SHIFT;
  526. xdr->page_ptr = &xdr->buf->pages[pgnr];
  527. kaddr = page_address(*xdr->page_ptr);
  528. pgoff = base & ~PAGE_MASK;
  529. xdr->p = (__be32*)(kaddr + pgoff);
  530. pgend = pgoff + len;
  531. if (pgend > PAGE_SIZE)
  532. pgend = PAGE_SIZE;
  533. xdr->end = (__be32*)(kaddr + pgend);
  534. xdr->iov = NULL;
  535. return 0;
  536. }
  537. static void xdr_set_next_page(struct xdr_stream *xdr)
  538. {
  539. unsigned int newbase;
  540. newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
  541. newbase -= xdr->buf->page_base;
  542. if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
  543. xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
  544. }
  545. static bool xdr_set_next_buffer(struct xdr_stream *xdr)
  546. {
  547. if (xdr->page_ptr != NULL)
  548. xdr_set_next_page(xdr);
  549. else if (xdr->iov == xdr->buf->head) {
  550. if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
  551. xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
  552. }
  553. return xdr->p != xdr->end;
  554. }
  555. /**
  556. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  557. * @xdr: pointer to xdr_stream struct
  558. * @buf: pointer to XDR buffer from which to decode data
  559. * @p: current pointer inside XDR buffer
  560. */
  561. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  562. {
  563. xdr->buf = buf;
  564. xdr->scratch.iov_base = NULL;
  565. xdr->scratch.iov_len = 0;
  566. xdr->nwords = XDR_QUADLEN(buf->len);
  567. if (buf->head[0].iov_len != 0)
  568. xdr_set_iov(xdr, buf->head, buf->len);
  569. else if (buf->page_len != 0)
  570. xdr_set_page_base(xdr, 0, buf->len);
  571. if (p != NULL && p > xdr->p && xdr->end >= p) {
  572. xdr->nwords -= p - xdr->p;
  573. xdr->p = p;
  574. }
  575. }
  576. EXPORT_SYMBOL_GPL(xdr_init_decode);
  577. /**
  578. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  579. * @xdr: pointer to xdr_stream struct
  580. * @buf: pointer to XDR buffer from which to decode data
  581. * @pages: list of pages to decode into
  582. * @len: length in bytes of buffer in pages
  583. */
  584. void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
  585. struct page **pages, unsigned int len)
  586. {
  587. memset(buf, 0, sizeof(*buf));
  588. buf->pages = pages;
  589. buf->page_len = len;
  590. buf->buflen = len;
  591. buf->len = len;
  592. xdr_init_decode(xdr, buf, NULL);
  593. }
  594. EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
  595. static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  596. {
  597. unsigned int nwords = XDR_QUADLEN(nbytes);
  598. __be32 *p = xdr->p;
  599. __be32 *q = p + nwords;
  600. if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
  601. return NULL;
  602. xdr->p = q;
  603. xdr->nwords -= nwords;
  604. return p;
  605. }
  606. /**
  607. * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
  608. * @xdr: pointer to xdr_stream struct
  609. * @buf: pointer to an empty buffer
  610. * @buflen: size of 'buf'
  611. *
  612. * The scratch buffer is used when decoding from an array of pages.
  613. * If an xdr_inline_decode() call spans across page boundaries, then
  614. * we copy the data into the scratch buffer in order to allow linear
  615. * access.
  616. */
  617. void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
  618. {
  619. xdr->scratch.iov_base = buf;
  620. xdr->scratch.iov_len = buflen;
  621. }
  622. EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
  623. static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
  624. {
  625. __be32 *p;
  626. void *cpdest = xdr->scratch.iov_base;
  627. size_t cplen = (char *)xdr->end - (char *)xdr->p;
  628. if (nbytes > xdr->scratch.iov_len)
  629. return NULL;
  630. memcpy(cpdest, xdr->p, cplen);
  631. cpdest += cplen;
  632. nbytes -= cplen;
  633. if (!xdr_set_next_buffer(xdr))
  634. return NULL;
  635. p = __xdr_inline_decode(xdr, nbytes);
  636. if (p == NULL)
  637. return NULL;
  638. memcpy(cpdest, p, nbytes);
  639. return xdr->scratch.iov_base;
  640. }
  641. /**
  642. * xdr_inline_decode - Retrieve XDR data to decode
  643. * @xdr: pointer to xdr_stream struct
  644. * @nbytes: number of bytes of data to decode
  645. *
  646. * Check if the input buffer is long enough to enable us to decode
  647. * 'nbytes' more bytes of data starting at the current position.
  648. * If so return the current pointer, then update the current
  649. * pointer position.
  650. */
  651. __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  652. {
  653. __be32 *p;
  654. if (nbytes == 0)
  655. return xdr->p;
  656. if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
  657. return NULL;
  658. p = __xdr_inline_decode(xdr, nbytes);
  659. if (p != NULL)
  660. return p;
  661. return xdr_copy_to_scratch(xdr, nbytes);
  662. }
  663. EXPORT_SYMBOL_GPL(xdr_inline_decode);
  664. /**
  665. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  666. * @xdr: pointer to xdr_stream struct
  667. * @len: number of bytes of page data
  668. *
  669. * Moves data beyond the current pointer position from the XDR head[] buffer
  670. * into the page list. Any data that lies beyond current position + "len"
  671. * bytes is moved into the XDR tail[].
  672. *
  673. * Returns the number of XDR encoded bytes now contained in the pages
  674. */
  675. unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  676. {
  677. struct xdr_buf *buf = xdr->buf;
  678. struct kvec *iov;
  679. unsigned int nwords = XDR_QUADLEN(len);
  680. unsigned int cur = xdr_stream_pos(xdr);
  681. unsigned int end;
  682. unsigned int padding;
  683. if (xdr->nwords == 0)
  684. return 0;
  685. if (nwords > xdr->nwords) {
  686. nwords = xdr->nwords;
  687. len = nwords << 2;
  688. }
  689. /* Realign pages to current pointer position */
  690. iov = buf->head;
  691. if (iov->iov_len > cur)
  692. xdr_shrink_bufhead(buf, iov->iov_len - cur);
  693. /* Truncate page data and move it into the tail */
  694. if (buf->page_len > len)
  695. xdr_shrink_pagelen(buf, buf->page_len - len);
  696. xdr->nwords = XDR_QUADLEN(buf->len - cur);
  697. padding = (nwords << 2) - len;
  698. xdr->iov = iov = buf->tail;
  699. /* Compute remaining message length. */
  700. end = ((xdr->nwords - nwords) << 2) + padding;
  701. if (end > iov->iov_len)
  702. end = iov->iov_len;
  703. /*
  704. * Position current pointer at beginning of tail, and
  705. * set remaining message length.
  706. */
  707. xdr->p = (__be32 *)((char *)iov->iov_base + padding);
  708. xdr->end = (__be32 *)((char *)iov->iov_base + end);
  709. xdr->page_ptr = NULL;
  710. xdr->nwords = XDR_QUADLEN(end - padding);
  711. return len;
  712. }
  713. EXPORT_SYMBOL_GPL(xdr_read_pages);
  714. /**
  715. * xdr_enter_page - decode data from the XDR page
  716. * @xdr: pointer to xdr_stream struct
  717. * @len: number of bytes of page data
  718. *
  719. * Moves data beyond the current pointer position from the XDR head[] buffer
  720. * into the page list. Any data that lies beyond current position + "len"
  721. * bytes is moved into the XDR tail[]. The current pointer is then
  722. * repositioned at the beginning of the first XDR page.
  723. */
  724. void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
  725. {
  726. len = xdr_read_pages(xdr, len);
  727. /*
  728. * Position current pointer at beginning of tail, and
  729. * set remaining message length.
  730. */
  731. xdr_set_page_base(xdr, 0, len);
  732. xdr->nwords += XDR_QUADLEN(xdr->buf->page_len);
  733. }
  734. EXPORT_SYMBOL_GPL(xdr_enter_page);
  735. static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  736. void
  737. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  738. {
  739. buf->head[0] = *iov;
  740. buf->tail[0] = empty_iov;
  741. buf->page_len = 0;
  742. buf->buflen = buf->len = iov->iov_len;
  743. }
  744. EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
  745. /* Sets subbuf to the portion of buf of length len beginning base bytes
  746. * from the start of buf. Returns -1 if base of length are out of bounds. */
  747. int
  748. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  749. unsigned int base, unsigned int len)
  750. {
  751. subbuf->buflen = subbuf->len = len;
  752. if (base < buf->head[0].iov_len) {
  753. subbuf->head[0].iov_base = buf->head[0].iov_base + base;
  754. subbuf->head[0].iov_len = min_t(unsigned int, len,
  755. buf->head[0].iov_len - base);
  756. len -= subbuf->head[0].iov_len;
  757. base = 0;
  758. } else {
  759. subbuf->head[0].iov_base = NULL;
  760. subbuf->head[0].iov_len = 0;
  761. base -= buf->head[0].iov_len;
  762. }
  763. if (base < buf->page_len) {
  764. subbuf->page_len = min(buf->page_len - base, len);
  765. base += buf->page_base;
  766. subbuf->page_base = base & ~PAGE_CACHE_MASK;
  767. subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
  768. len -= subbuf->page_len;
  769. base = 0;
  770. } else {
  771. base -= buf->page_len;
  772. subbuf->page_len = 0;
  773. }
  774. if (base < buf->tail[0].iov_len) {
  775. subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
  776. subbuf->tail[0].iov_len = min_t(unsigned int, len,
  777. buf->tail[0].iov_len - base);
  778. len -= subbuf->tail[0].iov_len;
  779. base = 0;
  780. } else {
  781. subbuf->tail[0].iov_base = NULL;
  782. subbuf->tail[0].iov_len = 0;
  783. base -= buf->tail[0].iov_len;
  784. }
  785. if (base || len)
  786. return -1;
  787. return 0;
  788. }
  789. EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
  790. static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  791. {
  792. unsigned int this_len;
  793. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  794. memcpy(obj, subbuf->head[0].iov_base, this_len);
  795. len -= this_len;
  796. obj += this_len;
  797. this_len = min_t(unsigned int, len, subbuf->page_len);
  798. if (this_len)
  799. _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
  800. len -= this_len;
  801. obj += this_len;
  802. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  803. memcpy(obj, subbuf->tail[0].iov_base, this_len);
  804. }
  805. /* obj is assumed to point to allocated memory of size at least len: */
  806. int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  807. {
  808. struct xdr_buf subbuf;
  809. int status;
  810. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  811. if (status != 0)
  812. return status;
  813. __read_bytes_from_xdr_buf(&subbuf, obj, len);
  814. return 0;
  815. }
  816. EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
  817. static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  818. {
  819. unsigned int this_len;
  820. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  821. memcpy(subbuf->head[0].iov_base, obj, this_len);
  822. len -= this_len;
  823. obj += this_len;
  824. this_len = min_t(unsigned int, len, subbuf->page_len);
  825. if (this_len)
  826. _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
  827. len -= this_len;
  828. obj += this_len;
  829. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  830. memcpy(subbuf->tail[0].iov_base, obj, this_len);
  831. }
  832. /* obj is assumed to point to allocated memory of size at least len: */
  833. int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  834. {
  835. struct xdr_buf subbuf;
  836. int status;
  837. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  838. if (status != 0)
  839. return status;
  840. __write_bytes_to_xdr_buf(&subbuf, obj, len);
  841. return 0;
  842. }
  843. EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
  844. int
  845. xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
  846. {
  847. __be32 raw;
  848. int status;
  849. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  850. if (status)
  851. return status;
  852. *obj = be32_to_cpu(raw);
  853. return 0;
  854. }
  855. EXPORT_SYMBOL_GPL(xdr_decode_word);
  856. int
  857. xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
  858. {
  859. __be32 raw = cpu_to_be32(obj);
  860. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  861. }
  862. EXPORT_SYMBOL_GPL(xdr_encode_word);
  863. /* If the netobj starting offset bytes from the start of xdr_buf is contained
  864. * entirely in the head or the tail, set object to point to it; otherwise
  865. * try to find space for it at the end of the tail, copy it there, and
  866. * set obj to point to it. */
  867. int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
  868. {
  869. struct xdr_buf subbuf;
  870. if (xdr_decode_word(buf, offset, &obj->len))
  871. return -EFAULT;
  872. if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
  873. return -EFAULT;
  874. /* Is the obj contained entirely in the head? */
  875. obj->data = subbuf.head[0].iov_base;
  876. if (subbuf.head[0].iov_len == obj->len)
  877. return 0;
  878. /* ..or is the obj contained entirely in the tail? */
  879. obj->data = subbuf.tail[0].iov_base;
  880. if (subbuf.tail[0].iov_len == obj->len)
  881. return 0;
  882. /* use end of tail as storage for obj:
  883. * (We don't copy to the beginning because then we'd have
  884. * to worry about doing a potentially overlapping copy.
  885. * This assumes the object is at most half the length of the
  886. * tail.) */
  887. if (obj->len > buf->buflen - buf->len)
  888. return -ENOMEM;
  889. if (buf->tail[0].iov_len != 0)
  890. obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  891. else
  892. obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
  893. __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
  894. return 0;
  895. }
  896. EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
  897. /* Returns 0 on success, or else a negative error code. */
  898. static int
  899. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  900. struct xdr_array2_desc *desc, int encode)
  901. {
  902. char *elem = NULL, *c;
  903. unsigned int copied = 0, todo, avail_here;
  904. struct page **ppages = NULL;
  905. int err;
  906. if (encode) {
  907. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  908. return -EINVAL;
  909. } else {
  910. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  911. desc->array_len > desc->array_maxlen ||
  912. (unsigned long) base + 4 + desc->array_len *
  913. desc->elem_size > buf->len)
  914. return -EINVAL;
  915. }
  916. base += 4;
  917. if (!desc->xcode)
  918. return 0;
  919. todo = desc->array_len * desc->elem_size;
  920. /* process head */
  921. if (todo && base < buf->head->iov_len) {
  922. c = buf->head->iov_base + base;
  923. avail_here = min_t(unsigned int, todo,
  924. buf->head->iov_len - base);
  925. todo -= avail_here;
  926. while (avail_here >= desc->elem_size) {
  927. err = desc->xcode(desc, c);
  928. if (err)
  929. goto out;
  930. c += desc->elem_size;
  931. avail_here -= desc->elem_size;
  932. }
  933. if (avail_here) {
  934. if (!elem) {
  935. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  936. err = -ENOMEM;
  937. if (!elem)
  938. goto out;
  939. }
  940. if (encode) {
  941. err = desc->xcode(desc, elem);
  942. if (err)
  943. goto out;
  944. memcpy(c, elem, avail_here);
  945. } else
  946. memcpy(elem, c, avail_here);
  947. copied = avail_here;
  948. }
  949. base = buf->head->iov_len; /* align to start of pages */
  950. }
  951. /* process pages array */
  952. base -= buf->head->iov_len;
  953. if (todo && base < buf->page_len) {
  954. unsigned int avail_page;
  955. avail_here = min(todo, buf->page_len - base);
  956. todo -= avail_here;
  957. base += buf->page_base;
  958. ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
  959. base &= ~PAGE_CACHE_MASK;
  960. avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
  961. avail_here);
  962. c = kmap(*ppages) + base;
  963. while (avail_here) {
  964. avail_here -= avail_page;
  965. if (copied || avail_page < desc->elem_size) {
  966. unsigned int l = min(avail_page,
  967. desc->elem_size - copied);
  968. if (!elem) {
  969. elem = kmalloc(desc->elem_size,
  970. GFP_KERNEL);
  971. err = -ENOMEM;
  972. if (!elem)
  973. goto out;
  974. }
  975. if (encode) {
  976. if (!copied) {
  977. err = desc->xcode(desc, elem);
  978. if (err)
  979. goto out;
  980. }
  981. memcpy(c, elem + copied, l);
  982. copied += l;
  983. if (copied == desc->elem_size)
  984. copied = 0;
  985. } else {
  986. memcpy(elem + copied, c, l);
  987. copied += l;
  988. if (copied == desc->elem_size) {
  989. err = desc->xcode(desc, elem);
  990. if (err)
  991. goto out;
  992. copied = 0;
  993. }
  994. }
  995. avail_page -= l;
  996. c += l;
  997. }
  998. while (avail_page >= desc->elem_size) {
  999. err = desc->xcode(desc, c);
  1000. if (err)
  1001. goto out;
  1002. c += desc->elem_size;
  1003. avail_page -= desc->elem_size;
  1004. }
  1005. if (avail_page) {
  1006. unsigned int l = min(avail_page,
  1007. desc->elem_size - copied);
  1008. if (!elem) {
  1009. elem = kmalloc(desc->elem_size,
  1010. GFP_KERNEL);
  1011. err = -ENOMEM;
  1012. if (!elem)
  1013. goto out;
  1014. }
  1015. if (encode) {
  1016. if (!copied) {
  1017. err = desc->xcode(desc, elem);
  1018. if (err)
  1019. goto out;
  1020. }
  1021. memcpy(c, elem + copied, l);
  1022. copied += l;
  1023. if (copied == desc->elem_size)
  1024. copied = 0;
  1025. } else {
  1026. memcpy(elem + copied, c, l);
  1027. copied += l;
  1028. if (copied == desc->elem_size) {
  1029. err = desc->xcode(desc, elem);
  1030. if (err)
  1031. goto out;
  1032. copied = 0;
  1033. }
  1034. }
  1035. }
  1036. if (avail_here) {
  1037. kunmap(*ppages);
  1038. ppages++;
  1039. c = kmap(*ppages);
  1040. }
  1041. avail_page = min(avail_here,
  1042. (unsigned int) PAGE_CACHE_SIZE);
  1043. }
  1044. base = buf->page_len; /* align to start of tail */
  1045. }
  1046. /* process tail */
  1047. base -= buf->page_len;
  1048. if (todo) {
  1049. c = buf->tail->iov_base + base;
  1050. if (copied) {
  1051. unsigned int l = desc->elem_size - copied;
  1052. if (encode)
  1053. memcpy(c, elem + copied, l);
  1054. else {
  1055. memcpy(elem + copied, c, l);
  1056. err = desc->xcode(desc, elem);
  1057. if (err)
  1058. goto out;
  1059. }
  1060. todo -= l;
  1061. c += l;
  1062. }
  1063. while (todo) {
  1064. err = desc->xcode(desc, c);
  1065. if (err)
  1066. goto out;
  1067. c += desc->elem_size;
  1068. todo -= desc->elem_size;
  1069. }
  1070. }
  1071. err = 0;
  1072. out:
  1073. kfree(elem);
  1074. if (ppages)
  1075. kunmap(*ppages);
  1076. return err;
  1077. }
  1078. int
  1079. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  1080. struct xdr_array2_desc *desc)
  1081. {
  1082. if (base >= buf->len)
  1083. return -EINVAL;
  1084. return xdr_xcode_array2(buf, base, desc, 0);
  1085. }
  1086. EXPORT_SYMBOL_GPL(xdr_decode_array2);
  1087. int
  1088. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  1089. struct xdr_array2_desc *desc)
  1090. {
  1091. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  1092. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  1093. return -EINVAL;
  1094. return xdr_xcode_array2(buf, base, desc, 1);
  1095. }
  1096. EXPORT_SYMBOL_GPL(xdr_encode_array2);
  1097. int
  1098. xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
  1099. int (*actor)(struct scatterlist *, void *), void *data)
  1100. {
  1101. int i, ret = 0;
  1102. unsigned int page_len, thislen, page_offset;
  1103. struct scatterlist sg[1];
  1104. sg_init_table(sg, 1);
  1105. if (offset >= buf->head[0].iov_len) {
  1106. offset -= buf->head[0].iov_len;
  1107. } else {
  1108. thislen = buf->head[0].iov_len - offset;
  1109. if (thislen > len)
  1110. thislen = len;
  1111. sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
  1112. ret = actor(sg, data);
  1113. if (ret)
  1114. goto out;
  1115. offset = 0;
  1116. len -= thislen;
  1117. }
  1118. if (len == 0)
  1119. goto out;
  1120. if (offset >= buf->page_len) {
  1121. offset -= buf->page_len;
  1122. } else {
  1123. page_len = buf->page_len - offset;
  1124. if (page_len > len)
  1125. page_len = len;
  1126. len -= page_len;
  1127. page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
  1128. i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
  1129. thislen = PAGE_CACHE_SIZE - page_offset;
  1130. do {
  1131. if (thislen > page_len)
  1132. thislen = page_len;
  1133. sg_set_page(sg, buf->pages[i], thislen, page_offset);
  1134. ret = actor(sg, data);
  1135. if (ret)
  1136. goto out;
  1137. page_len -= thislen;
  1138. i++;
  1139. page_offset = 0;
  1140. thislen = PAGE_CACHE_SIZE;
  1141. } while (page_len != 0);
  1142. offset = 0;
  1143. }
  1144. if (len == 0)
  1145. goto out;
  1146. if (offset < buf->tail[0].iov_len) {
  1147. thislen = buf->tail[0].iov_len - offset;
  1148. if (thislen > len)
  1149. thislen = len;
  1150. sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
  1151. ret = actor(sg, data);
  1152. len -= thislen;
  1153. }
  1154. if (len != 0)
  1155. ret = -EINVAL;
  1156. out:
  1157. return ret;
  1158. }
  1159. EXPORT_SYMBOL_GPL(xdr_process_buf);