xdr.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110
  1. /*
  2. * linux/net/sunrpc/xdr.c
  3. *
  4. * Generic XDR support.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/types.h>
  10. #include <linux/string.h>
  11. #include <linux/kernel.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/errno.h>
  14. #include <linux/sunrpc/xdr.h>
  15. #include <linux/sunrpc/msg_prot.h>
  16. /*
  17. * XDR functions for basic NFS types
  18. */
  19. __be32 *
  20. xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  21. {
  22. unsigned int quadlen = XDR_QUADLEN(obj->len);
  23. p[quadlen] = 0; /* zero trailing bytes */
  24. *p++ = htonl(obj->len);
  25. memcpy(p, obj->data, obj->len);
  26. return p + XDR_QUADLEN(obj->len);
  27. }
  28. EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  29. __be32 *
  30. xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  31. {
  32. unsigned int len;
  33. if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
  34. return NULL;
  35. obj->len = len;
  36. obj->data = (u8 *) p;
  37. return p + XDR_QUADLEN(len);
  38. }
  39. EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  40. /**
  41. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  42. * @p: pointer to current position in XDR buffer.
  43. * @ptr: pointer to data to encode (or NULL)
  44. * @nbytes: size of data.
  45. *
  46. * Copy the array of data of length nbytes at ptr to the XDR buffer
  47. * at position p, then align to the next 32-bit boundary by padding
  48. * with zero bytes (see RFC1832).
  49. * Note: if ptr is NULL, only the padding is performed.
  50. *
  51. * Returns the updated current XDR buffer position
  52. *
  53. */
  54. __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  55. {
  56. if (likely(nbytes != 0)) {
  57. unsigned int quadlen = XDR_QUADLEN(nbytes);
  58. unsigned int padding = (quadlen << 2) - nbytes;
  59. if (ptr != NULL)
  60. memcpy(p, ptr, nbytes);
  61. if (padding != 0)
  62. memset((char *)p + nbytes, 0, padding);
  63. p += quadlen;
  64. }
  65. return p;
  66. }
  67. EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  68. /**
  69. * xdr_encode_opaque - Encode variable length opaque data
  70. * @p: pointer to current position in XDR buffer.
  71. * @ptr: pointer to data to encode (or NULL)
  72. * @nbytes: size of data.
  73. *
  74. * Returns the updated current XDR buffer position
  75. */
  76. __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  77. {
  78. *p++ = htonl(nbytes);
  79. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  80. }
  81. EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  82. __be32 *
  83. xdr_encode_string(__be32 *p, const char *string)
  84. {
  85. return xdr_encode_array(p, string, strlen(string));
  86. }
  87. EXPORT_SYMBOL_GPL(xdr_encode_string);
  88. __be32 *
  89. xdr_decode_string_inplace(__be32 *p, char **sp,
  90. unsigned int *lenp, unsigned int maxlen)
  91. {
  92. u32 len;
  93. len = ntohl(*p++);
  94. if (len > maxlen)
  95. return NULL;
  96. *lenp = len;
  97. *sp = (char *) p;
  98. return p + XDR_QUADLEN(len);
  99. }
  100. EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
  101. void
  102. xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
  103. unsigned int len)
  104. {
  105. struct kvec *tail = xdr->tail;
  106. u32 *p;
  107. xdr->pages = pages;
  108. xdr->page_base = base;
  109. xdr->page_len = len;
  110. p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
  111. tail->iov_base = p;
  112. tail->iov_len = 0;
  113. if (len & 3) {
  114. unsigned int pad = 4 - (len & 3);
  115. *p = 0;
  116. tail->iov_base = (char *)p + (len & 3);
  117. tail->iov_len = pad;
  118. len += pad;
  119. }
  120. xdr->buflen += len;
  121. xdr->len += len;
  122. }
  123. EXPORT_SYMBOL_GPL(xdr_encode_pages);
  124. void
  125. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  126. struct page **pages, unsigned int base, unsigned int len)
  127. {
  128. struct kvec *head = xdr->head;
  129. struct kvec *tail = xdr->tail;
  130. char *buf = (char *)head->iov_base;
  131. unsigned int buflen = head->iov_len;
  132. head->iov_len = offset;
  133. xdr->pages = pages;
  134. xdr->page_base = base;
  135. xdr->page_len = len;
  136. tail->iov_base = buf + offset;
  137. tail->iov_len = buflen - offset;
  138. xdr->buflen += len;
  139. }
  140. EXPORT_SYMBOL_GPL(xdr_inline_pages);
  141. /*
  142. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  143. *
  144. * _shift_data_right_pages
  145. * @pages: vector of pages containing both the source and dest memory area.
  146. * @pgto_base: page vector address of destination
  147. * @pgfrom_base: page vector address of source
  148. * @len: number of bytes to copy
  149. *
  150. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  151. * the same way:
  152. * if a memory area starts at byte 'base' in page 'pages[i]',
  153. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  154. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  155. * they point to may overlap.
  156. */
  157. static void
  158. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  159. size_t pgfrom_base, size_t len)
  160. {
  161. struct page **pgfrom, **pgto;
  162. char *vfrom, *vto;
  163. size_t copy;
  164. BUG_ON(pgto_base <= pgfrom_base);
  165. pgto_base += len;
  166. pgfrom_base += len;
  167. pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
  168. pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
  169. pgto_base &= ~PAGE_CACHE_MASK;
  170. pgfrom_base &= ~PAGE_CACHE_MASK;
  171. do {
  172. /* Are any pointers crossing a page boundary? */
  173. if (pgto_base == 0) {
  174. pgto_base = PAGE_CACHE_SIZE;
  175. pgto--;
  176. }
  177. if (pgfrom_base == 0) {
  178. pgfrom_base = PAGE_CACHE_SIZE;
  179. pgfrom--;
  180. }
  181. copy = len;
  182. if (copy > pgto_base)
  183. copy = pgto_base;
  184. if (copy > pgfrom_base)
  185. copy = pgfrom_base;
  186. pgto_base -= copy;
  187. pgfrom_base -= copy;
  188. vto = kmap_atomic(*pgto, KM_USER0);
  189. vfrom = kmap_atomic(*pgfrom, KM_USER1);
  190. memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
  191. flush_dcache_page(*pgto);
  192. kunmap_atomic(vfrom, KM_USER1);
  193. kunmap_atomic(vto, KM_USER0);
  194. } while ((len -= copy) != 0);
  195. }
  196. /*
  197. * _copy_to_pages
  198. * @pages: array of pages
  199. * @pgbase: page vector address of destination
  200. * @p: pointer to source data
  201. * @len: length
  202. *
  203. * Copies data from an arbitrary memory location into an array of pages
  204. * The copy is assumed to be non-overlapping.
  205. */
  206. static void
  207. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  208. {
  209. struct page **pgto;
  210. char *vto;
  211. size_t copy;
  212. pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
  213. pgbase &= ~PAGE_CACHE_MASK;
  214. for (;;) {
  215. copy = PAGE_CACHE_SIZE - pgbase;
  216. if (copy > len)
  217. copy = len;
  218. vto = kmap_atomic(*pgto, KM_USER0);
  219. memcpy(vto + pgbase, p, copy);
  220. kunmap_atomic(vto, KM_USER0);
  221. len -= copy;
  222. if (len == 0)
  223. break;
  224. pgbase += copy;
  225. if (pgbase == PAGE_CACHE_SIZE) {
  226. flush_dcache_page(*pgto);
  227. pgbase = 0;
  228. pgto++;
  229. }
  230. p += copy;
  231. }
  232. flush_dcache_page(*pgto);
  233. }
  234. /*
  235. * _copy_from_pages
  236. * @p: pointer to destination
  237. * @pages: array of pages
  238. * @pgbase: offset of source data
  239. * @len: length
  240. *
  241. * Copies data into an arbitrary memory location from an array of pages
  242. * The copy is assumed to be non-overlapping.
  243. */
  244. static void
  245. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  246. {
  247. struct page **pgfrom;
  248. char *vfrom;
  249. size_t copy;
  250. pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
  251. pgbase &= ~PAGE_CACHE_MASK;
  252. do {
  253. copy = PAGE_CACHE_SIZE - pgbase;
  254. if (copy > len)
  255. copy = len;
  256. vfrom = kmap_atomic(*pgfrom, KM_USER0);
  257. memcpy(p, vfrom + pgbase, copy);
  258. kunmap_atomic(vfrom, KM_USER0);
  259. pgbase += copy;
  260. if (pgbase == PAGE_CACHE_SIZE) {
  261. pgbase = 0;
  262. pgfrom++;
  263. }
  264. p += copy;
  265. } while ((len -= copy) != 0);
  266. }
  267. /*
  268. * xdr_shrink_bufhead
  269. * @buf: xdr_buf
  270. * @len: bytes to remove from buf->head[0]
  271. *
  272. * Shrinks XDR buffer's header kvec buf->head[0] by
  273. * 'len' bytes. The extra data is not lost, but is instead
  274. * moved into the inlined pages and/or the tail.
  275. */
  276. static void
  277. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  278. {
  279. struct kvec *head, *tail;
  280. size_t copy, offs;
  281. unsigned int pglen = buf->page_len;
  282. tail = buf->tail;
  283. head = buf->head;
  284. BUG_ON (len > head->iov_len);
  285. /* Shift the tail first */
  286. if (tail->iov_len != 0) {
  287. if (tail->iov_len > len) {
  288. copy = tail->iov_len - len;
  289. memmove((char *)tail->iov_base + len,
  290. tail->iov_base, copy);
  291. }
  292. /* Copy from the inlined pages into the tail */
  293. copy = len;
  294. if (copy > pglen)
  295. copy = pglen;
  296. offs = len - copy;
  297. if (offs >= tail->iov_len)
  298. copy = 0;
  299. else if (copy > tail->iov_len - offs)
  300. copy = tail->iov_len - offs;
  301. if (copy != 0)
  302. _copy_from_pages((char *)tail->iov_base + offs,
  303. buf->pages,
  304. buf->page_base + pglen + offs - len,
  305. copy);
  306. /* Do we also need to copy data from the head into the tail ? */
  307. if (len > pglen) {
  308. offs = copy = len - pglen;
  309. if (copy > tail->iov_len)
  310. copy = tail->iov_len;
  311. memcpy(tail->iov_base,
  312. (char *)head->iov_base +
  313. head->iov_len - offs,
  314. copy);
  315. }
  316. }
  317. /* Now handle pages */
  318. if (pglen != 0) {
  319. if (pglen > len)
  320. _shift_data_right_pages(buf->pages,
  321. buf->page_base + len,
  322. buf->page_base,
  323. pglen - len);
  324. copy = len;
  325. if (len > pglen)
  326. copy = pglen;
  327. _copy_to_pages(buf->pages, buf->page_base,
  328. (char *)head->iov_base + head->iov_len - len,
  329. copy);
  330. }
  331. head->iov_len -= len;
  332. buf->buflen -= len;
  333. /* Have we truncated the message? */
  334. if (buf->len > buf->buflen)
  335. buf->len = buf->buflen;
  336. }
  337. /*
  338. * xdr_shrink_pagelen
  339. * @buf: xdr_buf
  340. * @len: bytes to remove from buf->pages
  341. *
  342. * Shrinks XDR buffer's page array buf->pages by
  343. * 'len' bytes. The extra data is not lost, but is instead
  344. * moved into the tail.
  345. */
  346. static void
  347. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  348. {
  349. struct kvec *tail;
  350. size_t copy;
  351. char *p;
  352. unsigned int pglen = buf->page_len;
  353. tail = buf->tail;
  354. BUG_ON (len > pglen);
  355. /* Shift the tail first */
  356. if (tail->iov_len != 0) {
  357. p = (char *)tail->iov_base + len;
  358. if (tail->iov_len > len) {
  359. copy = tail->iov_len - len;
  360. memmove(p, tail->iov_base, copy);
  361. } else
  362. buf->buflen -= len;
  363. /* Copy from the inlined pages into the tail */
  364. copy = len;
  365. if (copy > tail->iov_len)
  366. copy = tail->iov_len;
  367. _copy_from_pages((char *)tail->iov_base,
  368. buf->pages, buf->page_base + pglen - len,
  369. copy);
  370. }
  371. buf->page_len -= len;
  372. buf->buflen -= len;
  373. /* Have we truncated the message? */
  374. if (buf->len > buf->buflen)
  375. buf->len = buf->buflen;
  376. }
  377. void
  378. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  379. {
  380. xdr_shrink_bufhead(buf, len);
  381. }
  382. EXPORT_SYMBOL_GPL(xdr_shift_buf);
  383. /**
  384. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  385. * @xdr: pointer to xdr_stream struct
  386. * @buf: pointer to XDR buffer in which to encode data
  387. * @p: current pointer inside XDR buffer
  388. *
  389. * Note: at the moment the RPC client only passes the length of our
  390. * scratch buffer in the xdr_buf's header kvec. Previously this
  391. * meant we needed to call xdr_adjust_iovec() after encoding the
  392. * data. With the new scheme, the xdr_stream manages the details
  393. * of the buffer length, and takes care of adjusting the kvec
  394. * length for us.
  395. */
  396. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  397. {
  398. struct kvec *iov = buf->head;
  399. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  400. BUG_ON(scratch_len < 0);
  401. xdr->buf = buf;
  402. xdr->iov = iov;
  403. xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
  404. xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
  405. BUG_ON(iov->iov_len > scratch_len);
  406. if (p != xdr->p && p != NULL) {
  407. size_t len;
  408. BUG_ON(p < xdr->p || p > xdr->end);
  409. len = (char *)p - (char *)xdr->p;
  410. xdr->p = p;
  411. buf->len += len;
  412. iov->iov_len += len;
  413. }
  414. }
  415. EXPORT_SYMBOL_GPL(xdr_init_encode);
  416. /**
  417. * xdr_reserve_space - Reserve buffer space for sending
  418. * @xdr: pointer to xdr_stream
  419. * @nbytes: number of bytes to reserve
  420. *
  421. * Checks that we have enough buffer space to encode 'nbytes' more
  422. * bytes of data. If so, update the total xdr_buf length, and
  423. * adjust the length of the current kvec.
  424. */
  425. __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  426. {
  427. __be32 *p = xdr->p;
  428. __be32 *q;
  429. /* align nbytes on the next 32-bit boundary */
  430. nbytes += 3;
  431. nbytes &= ~3;
  432. q = p + (nbytes >> 2);
  433. if (unlikely(q > xdr->end || q < p))
  434. return NULL;
  435. xdr->p = q;
  436. xdr->iov->iov_len += nbytes;
  437. xdr->buf->len += nbytes;
  438. return p;
  439. }
  440. EXPORT_SYMBOL_GPL(xdr_reserve_space);
  441. /**
  442. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  443. * @xdr: pointer to xdr_stream
  444. * @pages: list of pages
  445. * @base: offset of first byte
  446. * @len: length of data in bytes
  447. *
  448. */
  449. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  450. unsigned int len)
  451. {
  452. struct xdr_buf *buf = xdr->buf;
  453. struct kvec *iov = buf->tail;
  454. buf->pages = pages;
  455. buf->page_base = base;
  456. buf->page_len = len;
  457. iov->iov_base = (char *)xdr->p;
  458. iov->iov_len = 0;
  459. xdr->iov = iov;
  460. if (len & 3) {
  461. unsigned int pad = 4 - (len & 3);
  462. BUG_ON(xdr->p >= xdr->end);
  463. iov->iov_base = (char *)xdr->p + (len & 3);
  464. iov->iov_len += pad;
  465. len += pad;
  466. *xdr->p++ = 0;
  467. }
  468. buf->buflen += len;
  469. buf->len += len;
  470. }
  471. EXPORT_SYMBOL_GPL(xdr_write_pages);
  472. /**
  473. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  474. * @xdr: pointer to xdr_stream struct
  475. * @buf: pointer to XDR buffer from which to decode data
  476. * @p: current pointer inside XDR buffer
  477. */
  478. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  479. {
  480. struct kvec *iov = buf->head;
  481. unsigned int len = iov->iov_len;
  482. if (len > buf->len)
  483. len = buf->len;
  484. xdr->buf = buf;
  485. xdr->iov = iov;
  486. xdr->p = p;
  487. xdr->end = (__be32 *)((char *)iov->iov_base + len);
  488. }
  489. EXPORT_SYMBOL_GPL(xdr_init_decode);
  490. /**
  491. * xdr_inline_decode - Retrieve non-page XDR data to decode
  492. * @xdr: pointer to xdr_stream struct
  493. * @nbytes: number of bytes of data to decode
  494. *
  495. * Check if the input buffer is long enough to enable us to decode
  496. * 'nbytes' more bytes of data starting at the current position.
  497. * If so return the current pointer, then update the current
  498. * pointer position.
  499. */
  500. __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  501. {
  502. __be32 *p = xdr->p;
  503. __be32 *q = p + XDR_QUADLEN(nbytes);
  504. if (unlikely(q > xdr->end || q < p))
  505. return NULL;
  506. xdr->p = q;
  507. return p;
  508. }
  509. EXPORT_SYMBOL_GPL(xdr_inline_decode);
  510. /**
  511. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  512. * @xdr: pointer to xdr_stream struct
  513. * @len: number of bytes of page data
  514. *
  515. * Moves data beyond the current pointer position from the XDR head[] buffer
  516. * into the page list. Any data that lies beyond current position + "len"
  517. * bytes is moved into the XDR tail[].
  518. */
  519. void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  520. {
  521. struct xdr_buf *buf = xdr->buf;
  522. struct kvec *iov;
  523. ssize_t shift;
  524. unsigned int end;
  525. int padding;
  526. /* Realign pages to current pointer position */
  527. iov = buf->head;
  528. shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
  529. if (shift > 0)
  530. xdr_shrink_bufhead(buf, shift);
  531. /* Truncate page data and move it into the tail */
  532. if (buf->page_len > len)
  533. xdr_shrink_pagelen(buf, buf->page_len - len);
  534. padding = (XDR_QUADLEN(len) << 2) - len;
  535. xdr->iov = iov = buf->tail;
  536. /* Compute remaining message length. */
  537. end = iov->iov_len;
  538. shift = buf->buflen - buf->len;
  539. if (shift < end)
  540. end -= shift;
  541. else if (shift > 0)
  542. end = 0;
  543. /*
  544. * Position current pointer at beginning of tail, and
  545. * set remaining message length.
  546. */
  547. xdr->p = (__be32 *)((char *)iov->iov_base + padding);
  548. xdr->end = (__be32 *)((char *)iov->iov_base + end);
  549. }
  550. EXPORT_SYMBOL_GPL(xdr_read_pages);
  551. /**
  552. * xdr_enter_page - decode data from the XDR page
  553. * @xdr: pointer to xdr_stream struct
  554. * @len: number of bytes of page data
  555. *
  556. * Moves data beyond the current pointer position from the XDR head[] buffer
  557. * into the page list. Any data that lies beyond current position + "len"
  558. * bytes is moved into the XDR tail[]. The current pointer is then
  559. * repositioned at the beginning of the first XDR page.
  560. */
  561. void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
  562. {
  563. char * kaddr = page_address(xdr->buf->pages[0]);
  564. xdr_read_pages(xdr, len);
  565. /*
  566. * Position current pointer at beginning of tail, and
  567. * set remaining message length.
  568. */
  569. if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
  570. len = PAGE_CACHE_SIZE - xdr->buf->page_base;
  571. xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
  572. xdr->end = (__be32 *)((char *)xdr->p + len);
  573. }
  574. EXPORT_SYMBOL_GPL(xdr_enter_page);
  575. static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  576. void
  577. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  578. {
  579. buf->head[0] = *iov;
  580. buf->tail[0] = empty_iov;
  581. buf->page_len = 0;
  582. buf->buflen = buf->len = iov->iov_len;
  583. }
  584. EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
  585. /* Sets subbuf to the portion of buf of length len beginning base bytes
  586. * from the start of buf. Returns -1 if base of length are out of bounds. */
  587. int
  588. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  589. unsigned int base, unsigned int len)
  590. {
  591. subbuf->buflen = subbuf->len = len;
  592. if (base < buf->head[0].iov_len) {
  593. subbuf->head[0].iov_base = buf->head[0].iov_base + base;
  594. subbuf->head[0].iov_len = min_t(unsigned int, len,
  595. buf->head[0].iov_len - base);
  596. len -= subbuf->head[0].iov_len;
  597. base = 0;
  598. } else {
  599. subbuf->head[0].iov_base = NULL;
  600. subbuf->head[0].iov_len = 0;
  601. base -= buf->head[0].iov_len;
  602. }
  603. if (base < buf->page_len) {
  604. subbuf->page_len = min(buf->page_len - base, len);
  605. base += buf->page_base;
  606. subbuf->page_base = base & ~PAGE_CACHE_MASK;
  607. subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
  608. len -= subbuf->page_len;
  609. base = 0;
  610. } else {
  611. base -= buf->page_len;
  612. subbuf->page_len = 0;
  613. }
  614. if (base < buf->tail[0].iov_len) {
  615. subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
  616. subbuf->tail[0].iov_len = min_t(unsigned int, len,
  617. buf->tail[0].iov_len - base);
  618. len -= subbuf->tail[0].iov_len;
  619. base = 0;
  620. } else {
  621. subbuf->tail[0].iov_base = NULL;
  622. subbuf->tail[0].iov_len = 0;
  623. base -= buf->tail[0].iov_len;
  624. }
  625. if (base || len)
  626. return -1;
  627. return 0;
  628. }
  629. EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
  630. static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  631. {
  632. unsigned int this_len;
  633. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  634. memcpy(obj, subbuf->head[0].iov_base, this_len);
  635. len -= this_len;
  636. obj += this_len;
  637. this_len = min_t(unsigned int, len, subbuf->page_len);
  638. if (this_len)
  639. _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
  640. len -= this_len;
  641. obj += this_len;
  642. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  643. memcpy(obj, subbuf->tail[0].iov_base, this_len);
  644. }
  645. /* obj is assumed to point to allocated memory of size at least len: */
  646. int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  647. {
  648. struct xdr_buf subbuf;
  649. int status;
  650. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  651. if (status != 0)
  652. return status;
  653. __read_bytes_from_xdr_buf(&subbuf, obj, len);
  654. return 0;
  655. }
  656. EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
  657. static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  658. {
  659. unsigned int this_len;
  660. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  661. memcpy(subbuf->head[0].iov_base, obj, this_len);
  662. len -= this_len;
  663. obj += this_len;
  664. this_len = min_t(unsigned int, len, subbuf->page_len);
  665. if (this_len)
  666. _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
  667. len -= this_len;
  668. obj += this_len;
  669. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  670. memcpy(subbuf->tail[0].iov_base, obj, this_len);
  671. }
  672. /* obj is assumed to point to allocated memory of size at least len: */
  673. int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  674. {
  675. struct xdr_buf subbuf;
  676. int status;
  677. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  678. if (status != 0)
  679. return status;
  680. __write_bytes_to_xdr_buf(&subbuf, obj, len);
  681. return 0;
  682. }
  683. int
  684. xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
  685. {
  686. __be32 raw;
  687. int status;
  688. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  689. if (status)
  690. return status;
  691. *obj = ntohl(raw);
  692. return 0;
  693. }
  694. EXPORT_SYMBOL_GPL(xdr_decode_word);
  695. int
  696. xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
  697. {
  698. __be32 raw = htonl(obj);
  699. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  700. }
  701. EXPORT_SYMBOL_GPL(xdr_encode_word);
  702. /* If the netobj starting offset bytes from the start of xdr_buf is contained
  703. * entirely in the head or the tail, set object to point to it; otherwise
  704. * try to find space for it at the end of the tail, copy it there, and
  705. * set obj to point to it. */
  706. int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
  707. {
  708. struct xdr_buf subbuf;
  709. if (xdr_decode_word(buf, offset, &obj->len))
  710. return -EFAULT;
  711. if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
  712. return -EFAULT;
  713. /* Is the obj contained entirely in the head? */
  714. obj->data = subbuf.head[0].iov_base;
  715. if (subbuf.head[0].iov_len == obj->len)
  716. return 0;
  717. /* ..or is the obj contained entirely in the tail? */
  718. obj->data = subbuf.tail[0].iov_base;
  719. if (subbuf.tail[0].iov_len == obj->len)
  720. return 0;
  721. /* use end of tail as storage for obj:
  722. * (We don't copy to the beginning because then we'd have
  723. * to worry about doing a potentially overlapping copy.
  724. * This assumes the object is at most half the length of the
  725. * tail.) */
  726. if (obj->len > buf->buflen - buf->len)
  727. return -ENOMEM;
  728. if (buf->tail[0].iov_len != 0)
  729. obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  730. else
  731. obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
  732. __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
  733. return 0;
  734. }
  735. EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
  736. /* Returns 0 on success, or else a negative error code. */
  737. static int
  738. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  739. struct xdr_array2_desc *desc, int encode)
  740. {
  741. char *elem = NULL, *c;
  742. unsigned int copied = 0, todo, avail_here;
  743. struct page **ppages = NULL;
  744. int err;
  745. if (encode) {
  746. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  747. return -EINVAL;
  748. } else {
  749. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  750. desc->array_len > desc->array_maxlen ||
  751. (unsigned long) base + 4 + desc->array_len *
  752. desc->elem_size > buf->len)
  753. return -EINVAL;
  754. }
  755. base += 4;
  756. if (!desc->xcode)
  757. return 0;
  758. todo = desc->array_len * desc->elem_size;
  759. /* process head */
  760. if (todo && base < buf->head->iov_len) {
  761. c = buf->head->iov_base + base;
  762. avail_here = min_t(unsigned int, todo,
  763. buf->head->iov_len - base);
  764. todo -= avail_here;
  765. while (avail_here >= desc->elem_size) {
  766. err = desc->xcode(desc, c);
  767. if (err)
  768. goto out;
  769. c += desc->elem_size;
  770. avail_here -= desc->elem_size;
  771. }
  772. if (avail_here) {
  773. if (!elem) {
  774. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  775. err = -ENOMEM;
  776. if (!elem)
  777. goto out;
  778. }
  779. if (encode) {
  780. err = desc->xcode(desc, elem);
  781. if (err)
  782. goto out;
  783. memcpy(c, elem, avail_here);
  784. } else
  785. memcpy(elem, c, avail_here);
  786. copied = avail_here;
  787. }
  788. base = buf->head->iov_len; /* align to start of pages */
  789. }
  790. /* process pages array */
  791. base -= buf->head->iov_len;
  792. if (todo && base < buf->page_len) {
  793. unsigned int avail_page;
  794. avail_here = min(todo, buf->page_len - base);
  795. todo -= avail_here;
  796. base += buf->page_base;
  797. ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
  798. base &= ~PAGE_CACHE_MASK;
  799. avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
  800. avail_here);
  801. c = kmap(*ppages) + base;
  802. while (avail_here) {
  803. avail_here -= avail_page;
  804. if (copied || avail_page < desc->elem_size) {
  805. unsigned int l = min(avail_page,
  806. desc->elem_size - copied);
  807. if (!elem) {
  808. elem = kmalloc(desc->elem_size,
  809. GFP_KERNEL);
  810. err = -ENOMEM;
  811. if (!elem)
  812. goto out;
  813. }
  814. if (encode) {
  815. if (!copied) {
  816. err = desc->xcode(desc, elem);
  817. if (err)
  818. goto out;
  819. }
  820. memcpy(c, elem + copied, l);
  821. copied += l;
  822. if (copied == desc->elem_size)
  823. copied = 0;
  824. } else {
  825. memcpy(elem + copied, c, l);
  826. copied += l;
  827. if (copied == desc->elem_size) {
  828. err = desc->xcode(desc, elem);
  829. if (err)
  830. goto out;
  831. copied = 0;
  832. }
  833. }
  834. avail_page -= l;
  835. c += l;
  836. }
  837. while (avail_page >= desc->elem_size) {
  838. err = desc->xcode(desc, c);
  839. if (err)
  840. goto out;
  841. c += desc->elem_size;
  842. avail_page -= desc->elem_size;
  843. }
  844. if (avail_page) {
  845. unsigned int l = min(avail_page,
  846. desc->elem_size - copied);
  847. if (!elem) {
  848. elem = kmalloc(desc->elem_size,
  849. GFP_KERNEL);
  850. err = -ENOMEM;
  851. if (!elem)
  852. goto out;
  853. }
  854. if (encode) {
  855. if (!copied) {
  856. err = desc->xcode(desc, elem);
  857. if (err)
  858. goto out;
  859. }
  860. memcpy(c, elem + copied, l);
  861. copied += l;
  862. if (copied == desc->elem_size)
  863. copied = 0;
  864. } else {
  865. memcpy(elem + copied, c, l);
  866. copied += l;
  867. if (copied == desc->elem_size) {
  868. err = desc->xcode(desc, elem);
  869. if (err)
  870. goto out;
  871. copied = 0;
  872. }
  873. }
  874. }
  875. if (avail_here) {
  876. kunmap(*ppages);
  877. ppages++;
  878. c = kmap(*ppages);
  879. }
  880. avail_page = min(avail_here,
  881. (unsigned int) PAGE_CACHE_SIZE);
  882. }
  883. base = buf->page_len; /* align to start of tail */
  884. }
  885. /* process tail */
  886. base -= buf->page_len;
  887. if (todo) {
  888. c = buf->tail->iov_base + base;
  889. if (copied) {
  890. unsigned int l = desc->elem_size - copied;
  891. if (encode)
  892. memcpy(c, elem + copied, l);
  893. else {
  894. memcpy(elem + copied, c, l);
  895. err = desc->xcode(desc, elem);
  896. if (err)
  897. goto out;
  898. }
  899. todo -= l;
  900. c += l;
  901. }
  902. while (todo) {
  903. err = desc->xcode(desc, c);
  904. if (err)
  905. goto out;
  906. c += desc->elem_size;
  907. todo -= desc->elem_size;
  908. }
  909. }
  910. err = 0;
  911. out:
  912. kfree(elem);
  913. if (ppages)
  914. kunmap(*ppages);
  915. return err;
  916. }
  917. int
  918. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  919. struct xdr_array2_desc *desc)
  920. {
  921. if (base >= buf->len)
  922. return -EINVAL;
  923. return xdr_xcode_array2(buf, base, desc, 0);
  924. }
  925. EXPORT_SYMBOL_GPL(xdr_decode_array2);
  926. int
  927. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  928. struct xdr_array2_desc *desc)
  929. {
  930. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  931. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  932. return -EINVAL;
  933. return xdr_xcode_array2(buf, base, desc, 1);
  934. }
  935. EXPORT_SYMBOL_GPL(xdr_encode_array2);
  936. int
  937. xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
  938. int (*actor)(struct scatterlist *, void *), void *data)
  939. {
  940. int i, ret = 0;
  941. unsigned page_len, thislen, page_offset;
  942. struct scatterlist sg[1];
  943. sg_init_table(sg, 1);
  944. if (offset >= buf->head[0].iov_len) {
  945. offset -= buf->head[0].iov_len;
  946. } else {
  947. thislen = buf->head[0].iov_len - offset;
  948. if (thislen > len)
  949. thislen = len;
  950. sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
  951. ret = actor(sg, data);
  952. if (ret)
  953. goto out;
  954. offset = 0;
  955. len -= thislen;
  956. }
  957. if (len == 0)
  958. goto out;
  959. if (offset >= buf->page_len) {
  960. offset -= buf->page_len;
  961. } else {
  962. page_len = buf->page_len - offset;
  963. if (page_len > len)
  964. page_len = len;
  965. len -= page_len;
  966. page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
  967. i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
  968. thislen = PAGE_CACHE_SIZE - page_offset;
  969. do {
  970. if (thislen > page_len)
  971. thislen = page_len;
  972. sg_set_page(sg, buf->pages[i], thislen, page_offset);
  973. ret = actor(sg, data);
  974. if (ret)
  975. goto out;
  976. page_len -= thislen;
  977. i++;
  978. page_offset = 0;
  979. thislen = PAGE_CACHE_SIZE;
  980. } while (page_len != 0);
  981. offset = 0;
  982. }
  983. if (len == 0)
  984. goto out;
  985. if (offset < buf->tail[0].iov_len) {
  986. thislen = buf->tail[0].iov_len - offset;
  987. if (thislen > len)
  988. thislen = len;
  989. sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
  990. ret = actor(sg, data);
  991. len -= thislen;
  992. }
  993. if (len != 0)
  994. ret = -EINVAL;
  995. out:
  996. return ret;
  997. }
  998. EXPORT_SYMBOL_GPL(xdr_process_buf);