xdr.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269
  1. /*
  2. * linux/net/sunrpc/xdr.c
  3. *
  4. * Generic XDR support.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/types.h>
  11. #include <linux/string.h>
  12. #include <linux/kernel.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/errno.h>
  15. #include <linux/sunrpc/xdr.h>
  16. #include <linux/sunrpc/msg_prot.h>
  17. /*
  18. * XDR functions for basic NFS types
  19. */
  20. __be32 *
  21. xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  22. {
  23. unsigned int quadlen = XDR_QUADLEN(obj->len);
  24. p[quadlen] = 0; /* zero trailing bytes */
  25. *p++ = cpu_to_be32(obj->len);
  26. memcpy(p, obj->data, obj->len);
  27. return p + XDR_QUADLEN(obj->len);
  28. }
  29. EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  30. __be32 *
  31. xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  32. {
  33. unsigned int len;
  34. if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  35. return NULL;
  36. obj->len = len;
  37. obj->data = (u8 *) p;
  38. return p + XDR_QUADLEN(len);
  39. }
  40. EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  41. /**
  42. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  43. * @p: pointer to current position in XDR buffer.
  44. * @ptr: pointer to data to encode (or NULL)
  45. * @nbytes: size of data.
  46. *
  47. * Copy the array of data of length nbytes at ptr to the XDR buffer
  48. * at position p, then align to the next 32-bit boundary by padding
  49. * with zero bytes (see RFC1832).
  50. * Note: if ptr is NULL, only the padding is performed.
  51. *
  52. * Returns the updated current XDR buffer position
  53. *
  54. */
  55. __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  56. {
  57. if (likely(nbytes != 0)) {
  58. unsigned int quadlen = XDR_QUADLEN(nbytes);
  59. unsigned int padding = (quadlen << 2) - nbytes;
  60. if (ptr != NULL)
  61. memcpy(p, ptr, nbytes);
  62. if (padding != 0)
  63. memset((char *)p + nbytes, 0, padding);
  64. p += quadlen;
  65. }
  66. return p;
  67. }
  68. EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  69. /**
  70. * xdr_encode_opaque - Encode variable length opaque data
  71. * @p: pointer to current position in XDR buffer.
  72. * @ptr: pointer to data to encode (or NULL)
  73. * @nbytes: size of data.
  74. *
  75. * Returns the updated current XDR buffer position
  76. */
  77. __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  78. {
  79. *p++ = cpu_to_be32(nbytes);
  80. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  81. }
  82. EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  83. __be32 *
  84. xdr_encode_string(__be32 *p, const char *string)
  85. {
  86. return xdr_encode_array(p, string, strlen(string));
  87. }
  88. EXPORT_SYMBOL_GPL(xdr_encode_string);
  89. __be32 *
  90. xdr_decode_string_inplace(__be32 *p, char **sp,
  91. unsigned int *lenp, unsigned int maxlen)
  92. {
  93. u32 len;
  94. len = be32_to_cpu(*p++);
  95. if (len > maxlen)
  96. return NULL;
  97. *lenp = len;
  98. *sp = (char *) p;
  99. return p + XDR_QUADLEN(len);
  100. }
  101. EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
  102. /**
  103. * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
  104. * @buf: XDR buffer where string resides
  105. * @len: length of string, in bytes
  106. *
  107. */
  108. void
  109. xdr_terminate_string(struct xdr_buf *buf, const u32 len)
  110. {
  111. char *kaddr;
  112. kaddr = kmap_atomic(buf->pages[0]);
  113. kaddr[buf->page_base + len] = '\0';
  114. kunmap_atomic(kaddr);
  115. }
  116. EXPORT_SYMBOL_GPL(xdr_terminate_string);
  117. void
  118. xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
  119. unsigned int len)
  120. {
  121. struct kvec *tail = xdr->tail;
  122. u32 *p;
  123. xdr->pages = pages;
  124. xdr->page_base = base;
  125. xdr->page_len = len;
  126. p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
  127. tail->iov_base = p;
  128. tail->iov_len = 0;
  129. if (len & 3) {
  130. unsigned int pad = 4 - (len & 3);
  131. *p = 0;
  132. tail->iov_base = (char *)p + (len & 3);
  133. tail->iov_len = pad;
  134. len += pad;
  135. }
  136. xdr->buflen += len;
  137. xdr->len += len;
  138. }
  139. EXPORT_SYMBOL_GPL(xdr_encode_pages);
  140. void
  141. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  142. struct page **pages, unsigned int base, unsigned int len)
  143. {
  144. struct kvec *head = xdr->head;
  145. struct kvec *tail = xdr->tail;
  146. char *buf = (char *)head->iov_base;
  147. unsigned int buflen = head->iov_len;
  148. head->iov_len = offset;
  149. xdr->pages = pages;
  150. xdr->page_base = base;
  151. xdr->page_len = len;
  152. tail->iov_base = buf + offset;
  153. tail->iov_len = buflen - offset;
  154. xdr->buflen += len;
  155. }
  156. EXPORT_SYMBOL_GPL(xdr_inline_pages);
  157. /*
  158. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  159. *
  160. * _shift_data_right_pages
  161. * @pages: vector of pages containing both the source and dest memory area.
  162. * @pgto_base: page vector address of destination
  163. * @pgfrom_base: page vector address of source
  164. * @len: number of bytes to copy
  165. *
  166. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  167. * the same way:
  168. * if a memory area starts at byte 'base' in page 'pages[i]',
  169. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  170. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  171. * they point to may overlap.
  172. */
  173. static void
  174. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  175. size_t pgfrom_base, size_t len)
  176. {
  177. struct page **pgfrom, **pgto;
  178. char *vfrom, *vto;
  179. size_t copy;
  180. BUG_ON(pgto_base <= pgfrom_base);
  181. pgto_base += len;
  182. pgfrom_base += len;
  183. pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
  184. pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
  185. pgto_base &= ~PAGE_CACHE_MASK;
  186. pgfrom_base &= ~PAGE_CACHE_MASK;
  187. do {
  188. /* Are any pointers crossing a page boundary? */
  189. if (pgto_base == 0) {
  190. pgto_base = PAGE_CACHE_SIZE;
  191. pgto--;
  192. }
  193. if (pgfrom_base == 0) {
  194. pgfrom_base = PAGE_CACHE_SIZE;
  195. pgfrom--;
  196. }
  197. copy = len;
  198. if (copy > pgto_base)
  199. copy = pgto_base;
  200. if (copy > pgfrom_base)
  201. copy = pgfrom_base;
  202. pgto_base -= copy;
  203. pgfrom_base -= copy;
  204. vto = kmap_atomic(*pgto);
  205. vfrom = kmap_atomic(*pgfrom);
  206. memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
  207. flush_dcache_page(*pgto);
  208. kunmap_atomic(vfrom);
  209. kunmap_atomic(vto);
  210. } while ((len -= copy) != 0);
  211. }
  212. /*
  213. * _copy_to_pages
  214. * @pages: array of pages
  215. * @pgbase: page vector address of destination
  216. * @p: pointer to source data
  217. * @len: length
  218. *
  219. * Copies data from an arbitrary memory location into an array of pages
  220. * The copy is assumed to be non-overlapping.
  221. */
  222. static void
  223. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  224. {
  225. struct page **pgto;
  226. char *vto;
  227. size_t copy;
  228. pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
  229. pgbase &= ~PAGE_CACHE_MASK;
  230. for (;;) {
  231. copy = PAGE_CACHE_SIZE - pgbase;
  232. if (copy > len)
  233. copy = len;
  234. vto = kmap_atomic(*pgto);
  235. memcpy(vto + pgbase, p, copy);
  236. kunmap_atomic(vto);
  237. len -= copy;
  238. if (len == 0)
  239. break;
  240. pgbase += copy;
  241. if (pgbase == PAGE_CACHE_SIZE) {
  242. flush_dcache_page(*pgto);
  243. pgbase = 0;
  244. pgto++;
  245. }
  246. p += copy;
  247. }
  248. flush_dcache_page(*pgto);
  249. }
  250. /*
  251. * _copy_from_pages
  252. * @p: pointer to destination
  253. * @pages: array of pages
  254. * @pgbase: offset of source data
  255. * @len: length
  256. *
  257. * Copies data into an arbitrary memory location from an array of pages
  258. * The copy is assumed to be non-overlapping.
  259. */
  260. void
  261. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  262. {
  263. struct page **pgfrom;
  264. char *vfrom;
  265. size_t copy;
  266. pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
  267. pgbase &= ~PAGE_CACHE_MASK;
  268. do {
  269. copy = PAGE_CACHE_SIZE - pgbase;
  270. if (copy > len)
  271. copy = len;
  272. vfrom = kmap_atomic(*pgfrom);
  273. memcpy(p, vfrom + pgbase, copy);
  274. kunmap_atomic(vfrom);
  275. pgbase += copy;
  276. if (pgbase == PAGE_CACHE_SIZE) {
  277. pgbase = 0;
  278. pgfrom++;
  279. }
  280. p += copy;
  281. } while ((len -= copy) != 0);
  282. }
  283. EXPORT_SYMBOL_GPL(_copy_from_pages);
  284. /*
  285. * xdr_shrink_bufhead
  286. * @buf: xdr_buf
  287. * @len: bytes to remove from buf->head[0]
  288. *
  289. * Shrinks XDR buffer's header kvec buf->head[0] by
  290. * 'len' bytes. The extra data is not lost, but is instead
  291. * moved into the inlined pages and/or the tail.
  292. */
  293. static void
  294. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  295. {
  296. struct kvec *head, *tail;
  297. size_t copy, offs;
  298. unsigned int pglen = buf->page_len;
  299. tail = buf->tail;
  300. head = buf->head;
  301. BUG_ON (len > head->iov_len);
  302. /* Shift the tail first */
  303. if (tail->iov_len != 0) {
  304. if (tail->iov_len > len) {
  305. copy = tail->iov_len - len;
  306. memmove((char *)tail->iov_base + len,
  307. tail->iov_base, copy);
  308. }
  309. /* Copy from the inlined pages into the tail */
  310. copy = len;
  311. if (copy > pglen)
  312. copy = pglen;
  313. offs = len - copy;
  314. if (offs >= tail->iov_len)
  315. copy = 0;
  316. else if (copy > tail->iov_len - offs)
  317. copy = tail->iov_len - offs;
  318. if (copy != 0)
  319. _copy_from_pages((char *)tail->iov_base + offs,
  320. buf->pages,
  321. buf->page_base + pglen + offs - len,
  322. copy);
  323. /* Do we also need to copy data from the head into the tail ? */
  324. if (len > pglen) {
  325. offs = copy = len - pglen;
  326. if (copy > tail->iov_len)
  327. copy = tail->iov_len;
  328. memcpy(tail->iov_base,
  329. (char *)head->iov_base +
  330. head->iov_len - offs,
  331. copy);
  332. }
  333. }
  334. /* Now handle pages */
  335. if (pglen != 0) {
  336. if (pglen > len)
  337. _shift_data_right_pages(buf->pages,
  338. buf->page_base + len,
  339. buf->page_base,
  340. pglen - len);
  341. copy = len;
  342. if (len > pglen)
  343. copy = pglen;
  344. _copy_to_pages(buf->pages, buf->page_base,
  345. (char *)head->iov_base + head->iov_len - len,
  346. copy);
  347. }
  348. head->iov_len -= len;
  349. buf->buflen -= len;
  350. /* Have we truncated the message? */
  351. if (buf->len > buf->buflen)
  352. buf->len = buf->buflen;
  353. }
  354. /*
  355. * xdr_shrink_pagelen
  356. * @buf: xdr_buf
  357. * @len: bytes to remove from buf->pages
  358. *
  359. * Shrinks XDR buffer's page array buf->pages by
  360. * 'len' bytes. The extra data is not lost, but is instead
  361. * moved into the tail.
  362. */
  363. static void
  364. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  365. {
  366. struct kvec *tail;
  367. size_t copy;
  368. unsigned int pglen = buf->page_len;
  369. unsigned int tailbuf_len;
  370. tail = buf->tail;
  371. BUG_ON (len > pglen);
  372. tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
  373. /* Shift the tail first */
  374. if (tailbuf_len != 0) {
  375. unsigned int free_space = tailbuf_len - tail->iov_len;
  376. if (len < free_space)
  377. free_space = len;
  378. tail->iov_len += free_space;
  379. copy = len;
  380. if (tail->iov_len > len) {
  381. char *p = (char *)tail->iov_base + len;
  382. memmove(p, tail->iov_base, tail->iov_len - len);
  383. } else
  384. copy = tail->iov_len;
  385. /* Copy from the inlined pages into the tail */
  386. _copy_from_pages((char *)tail->iov_base,
  387. buf->pages, buf->page_base + pglen - len,
  388. copy);
  389. }
  390. buf->page_len -= len;
  391. buf->buflen -= len;
  392. /* Have we truncated the message? */
  393. if (buf->len > buf->buflen)
  394. buf->len = buf->buflen;
  395. }
  396. void
  397. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  398. {
  399. xdr_shrink_bufhead(buf, len);
  400. }
  401. EXPORT_SYMBOL_GPL(xdr_shift_buf);
  402. /**
  403. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  404. * @xdr: pointer to xdr_stream struct
  405. * @buf: pointer to XDR buffer in which to encode data
  406. * @p: current pointer inside XDR buffer
  407. *
  408. * Note: at the moment the RPC client only passes the length of our
  409. * scratch buffer in the xdr_buf's header kvec. Previously this
  410. * meant we needed to call xdr_adjust_iovec() after encoding the
  411. * data. With the new scheme, the xdr_stream manages the details
  412. * of the buffer length, and takes care of adjusting the kvec
  413. * length for us.
  414. */
  415. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  416. {
  417. struct kvec *iov = buf->head;
  418. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  419. BUG_ON(scratch_len < 0);
  420. xdr->buf = buf;
  421. xdr->iov = iov;
  422. xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
  423. xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
  424. BUG_ON(iov->iov_len > scratch_len);
  425. if (p != xdr->p && p != NULL) {
  426. size_t len;
  427. BUG_ON(p < xdr->p || p > xdr->end);
  428. len = (char *)p - (char *)xdr->p;
  429. xdr->p = p;
  430. buf->len += len;
  431. iov->iov_len += len;
  432. }
  433. }
  434. EXPORT_SYMBOL_GPL(xdr_init_encode);
  435. /**
  436. * xdr_reserve_space - Reserve buffer space for sending
  437. * @xdr: pointer to xdr_stream
  438. * @nbytes: number of bytes to reserve
  439. *
  440. * Checks that we have enough buffer space to encode 'nbytes' more
  441. * bytes of data. If so, update the total xdr_buf length, and
  442. * adjust the length of the current kvec.
  443. */
  444. __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  445. {
  446. __be32 *p = xdr->p;
  447. __be32 *q;
  448. /* align nbytes on the next 32-bit boundary */
  449. nbytes += 3;
  450. nbytes &= ~3;
  451. q = p + (nbytes >> 2);
  452. if (unlikely(q > xdr->end || q < p))
  453. return NULL;
  454. xdr->p = q;
  455. xdr->iov->iov_len += nbytes;
  456. xdr->buf->len += nbytes;
  457. return p;
  458. }
  459. EXPORT_SYMBOL_GPL(xdr_reserve_space);
  460. /**
  461. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  462. * @xdr: pointer to xdr_stream
  463. * @pages: list of pages
  464. * @base: offset of first byte
  465. * @len: length of data in bytes
  466. *
  467. */
  468. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  469. unsigned int len)
  470. {
  471. struct xdr_buf *buf = xdr->buf;
  472. struct kvec *iov = buf->tail;
  473. buf->pages = pages;
  474. buf->page_base = base;
  475. buf->page_len = len;
  476. iov->iov_base = (char *)xdr->p;
  477. iov->iov_len = 0;
  478. xdr->iov = iov;
  479. if (len & 3) {
  480. unsigned int pad = 4 - (len & 3);
  481. BUG_ON(xdr->p >= xdr->end);
  482. iov->iov_base = (char *)xdr->p + (len & 3);
  483. iov->iov_len += pad;
  484. len += pad;
  485. *xdr->p++ = 0;
  486. }
  487. buf->buflen += len;
  488. buf->len += len;
  489. }
  490. EXPORT_SYMBOL_GPL(xdr_write_pages);
  491. static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
  492. __be32 *p, unsigned int len)
  493. {
  494. if (len > iov->iov_len)
  495. len = iov->iov_len;
  496. if (p == NULL)
  497. p = (__be32*)iov->iov_base;
  498. xdr->p = p;
  499. xdr->end = (__be32*)(iov->iov_base + len);
  500. xdr->iov = iov;
  501. xdr->page_ptr = NULL;
  502. }
  503. static int xdr_set_page_base(struct xdr_stream *xdr,
  504. unsigned int base, unsigned int len)
  505. {
  506. unsigned int pgnr;
  507. unsigned int maxlen;
  508. unsigned int pgoff;
  509. unsigned int pgend;
  510. void *kaddr;
  511. maxlen = xdr->buf->page_len;
  512. if (base >= maxlen)
  513. return -EINVAL;
  514. maxlen -= base;
  515. if (len > maxlen)
  516. len = maxlen;
  517. base += xdr->buf->page_base;
  518. pgnr = base >> PAGE_SHIFT;
  519. xdr->page_ptr = &xdr->buf->pages[pgnr];
  520. kaddr = page_address(*xdr->page_ptr);
  521. pgoff = base & ~PAGE_MASK;
  522. xdr->p = (__be32*)(kaddr + pgoff);
  523. pgend = pgoff + len;
  524. if (pgend > PAGE_SIZE)
  525. pgend = PAGE_SIZE;
  526. xdr->end = (__be32*)(kaddr + pgend);
  527. xdr->iov = NULL;
  528. return 0;
  529. }
  530. static void xdr_set_next_page(struct xdr_stream *xdr)
  531. {
  532. unsigned int newbase;
  533. newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
  534. newbase -= xdr->buf->page_base;
  535. if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
  536. xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
  537. }
  538. static bool xdr_set_next_buffer(struct xdr_stream *xdr)
  539. {
  540. if (xdr->page_ptr != NULL)
  541. xdr_set_next_page(xdr);
  542. else if (xdr->iov == xdr->buf->head) {
  543. if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
  544. xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
  545. }
  546. return xdr->p != xdr->end;
  547. }
  548. /**
  549. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  550. * @xdr: pointer to xdr_stream struct
  551. * @buf: pointer to XDR buffer from which to decode data
  552. * @p: current pointer inside XDR buffer
  553. */
  554. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  555. {
  556. xdr->buf = buf;
  557. xdr->scratch.iov_base = NULL;
  558. xdr->scratch.iov_len = 0;
  559. if (buf->head[0].iov_len != 0)
  560. xdr_set_iov(xdr, buf->head, p, buf->len);
  561. else if (buf->page_len != 0)
  562. xdr_set_page_base(xdr, 0, buf->len);
  563. }
  564. EXPORT_SYMBOL_GPL(xdr_init_decode);
  565. /**
  566. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  567. * @xdr: pointer to xdr_stream struct
  568. * @buf: pointer to XDR buffer from which to decode data
  569. * @pages: list of pages to decode into
  570. * @len: length in bytes of buffer in pages
  571. */
  572. void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
  573. struct page **pages, unsigned int len)
  574. {
  575. memset(buf, 0, sizeof(*buf));
  576. buf->pages = pages;
  577. buf->page_len = len;
  578. buf->buflen = len;
  579. buf->len = len;
  580. xdr_init_decode(xdr, buf, NULL);
  581. }
  582. EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
  583. static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  584. {
  585. __be32 *p = xdr->p;
  586. __be32 *q = p + XDR_QUADLEN(nbytes);
  587. if (unlikely(q > xdr->end || q < p))
  588. return NULL;
  589. xdr->p = q;
  590. return p;
  591. }
  592. /**
  593. * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
  594. * @xdr: pointer to xdr_stream struct
  595. * @buf: pointer to an empty buffer
  596. * @buflen: size of 'buf'
  597. *
  598. * The scratch buffer is used when decoding from an array of pages.
  599. * If an xdr_inline_decode() call spans across page boundaries, then
  600. * we copy the data into the scratch buffer in order to allow linear
  601. * access.
  602. */
  603. void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
  604. {
  605. xdr->scratch.iov_base = buf;
  606. xdr->scratch.iov_len = buflen;
  607. }
  608. EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
  609. static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
  610. {
  611. __be32 *p;
  612. void *cpdest = xdr->scratch.iov_base;
  613. size_t cplen = (char *)xdr->end - (char *)xdr->p;
  614. if (nbytes > xdr->scratch.iov_len)
  615. return NULL;
  616. memcpy(cpdest, xdr->p, cplen);
  617. cpdest += cplen;
  618. nbytes -= cplen;
  619. if (!xdr_set_next_buffer(xdr))
  620. return NULL;
  621. p = __xdr_inline_decode(xdr, nbytes);
  622. if (p == NULL)
  623. return NULL;
  624. memcpy(cpdest, p, nbytes);
  625. return xdr->scratch.iov_base;
  626. }
  627. /**
  628. * xdr_inline_decode - Retrieve XDR data to decode
  629. * @xdr: pointer to xdr_stream struct
  630. * @nbytes: number of bytes of data to decode
  631. *
  632. * Check if the input buffer is long enough to enable us to decode
  633. * 'nbytes' more bytes of data starting at the current position.
  634. * If so return the current pointer, then update the current
  635. * pointer position.
  636. */
  637. __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  638. {
  639. __be32 *p;
  640. if (nbytes == 0)
  641. return xdr->p;
  642. if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
  643. return NULL;
  644. p = __xdr_inline_decode(xdr, nbytes);
  645. if (p != NULL)
  646. return p;
  647. return xdr_copy_to_scratch(xdr, nbytes);
  648. }
  649. EXPORT_SYMBOL_GPL(xdr_inline_decode);
  650. /**
  651. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  652. * @xdr: pointer to xdr_stream struct
  653. * @len: number of bytes of page data
  654. *
  655. * Moves data beyond the current pointer position from the XDR head[] buffer
  656. * into the page list. Any data that lies beyond current position + "len"
  657. * bytes is moved into the XDR tail[].
  658. */
  659. void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  660. {
  661. struct xdr_buf *buf = xdr->buf;
  662. struct kvec *iov;
  663. ssize_t shift;
  664. unsigned int end;
  665. int padding;
  666. /* Realign pages to current pointer position */
  667. iov = buf->head;
  668. shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
  669. if (shift > 0)
  670. xdr_shrink_bufhead(buf, shift);
  671. /* Truncate page data and move it into the tail */
  672. if (buf->page_len > len)
  673. xdr_shrink_pagelen(buf, buf->page_len - len);
  674. padding = (XDR_QUADLEN(len) << 2) - len;
  675. xdr->iov = iov = buf->tail;
  676. /* Compute remaining message length. */
  677. end = iov->iov_len;
  678. shift = buf->buflen - buf->len;
  679. if (shift < end)
  680. end -= shift;
  681. else if (shift > 0)
  682. end = 0;
  683. /*
  684. * Position current pointer at beginning of tail, and
  685. * set remaining message length.
  686. */
  687. xdr->p = (__be32 *)((char *)iov->iov_base + padding);
  688. xdr->end = (__be32 *)((char *)iov->iov_base + end);
  689. xdr->page_ptr = NULL;
  690. }
  691. EXPORT_SYMBOL_GPL(xdr_read_pages);
  692. /**
  693. * xdr_enter_page - decode data from the XDR page
  694. * @xdr: pointer to xdr_stream struct
  695. * @len: number of bytes of page data
  696. *
  697. * Moves data beyond the current pointer position from the XDR head[] buffer
  698. * into the page list. Any data that lies beyond current position + "len"
  699. * bytes is moved into the XDR tail[]. The current pointer is then
  700. * repositioned at the beginning of the first XDR page.
  701. */
  702. void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
  703. {
  704. xdr_read_pages(xdr, len);
  705. /*
  706. * Position current pointer at beginning of tail, and
  707. * set remaining message length.
  708. */
  709. xdr_set_page_base(xdr, 0, len);
  710. }
  711. EXPORT_SYMBOL_GPL(xdr_enter_page);
  712. static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  713. void
  714. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  715. {
  716. buf->head[0] = *iov;
  717. buf->tail[0] = empty_iov;
  718. buf->page_len = 0;
  719. buf->buflen = buf->len = iov->iov_len;
  720. }
  721. EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
  722. /* Sets subbuf to the portion of buf of length len beginning base bytes
  723. * from the start of buf. Returns -1 if base of length are out of bounds. */
  724. int
  725. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  726. unsigned int base, unsigned int len)
  727. {
  728. subbuf->buflen = subbuf->len = len;
  729. if (base < buf->head[0].iov_len) {
  730. subbuf->head[0].iov_base = buf->head[0].iov_base + base;
  731. subbuf->head[0].iov_len = min_t(unsigned int, len,
  732. buf->head[0].iov_len - base);
  733. len -= subbuf->head[0].iov_len;
  734. base = 0;
  735. } else {
  736. subbuf->head[0].iov_base = NULL;
  737. subbuf->head[0].iov_len = 0;
  738. base -= buf->head[0].iov_len;
  739. }
  740. if (base < buf->page_len) {
  741. subbuf->page_len = min(buf->page_len - base, len);
  742. base += buf->page_base;
  743. subbuf->page_base = base & ~PAGE_CACHE_MASK;
  744. subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
  745. len -= subbuf->page_len;
  746. base = 0;
  747. } else {
  748. base -= buf->page_len;
  749. subbuf->page_len = 0;
  750. }
  751. if (base < buf->tail[0].iov_len) {
  752. subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
  753. subbuf->tail[0].iov_len = min_t(unsigned int, len,
  754. buf->tail[0].iov_len - base);
  755. len -= subbuf->tail[0].iov_len;
  756. base = 0;
  757. } else {
  758. subbuf->tail[0].iov_base = NULL;
  759. subbuf->tail[0].iov_len = 0;
  760. base -= buf->tail[0].iov_len;
  761. }
  762. if (base || len)
  763. return -1;
  764. return 0;
  765. }
  766. EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
  767. static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  768. {
  769. unsigned int this_len;
  770. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  771. memcpy(obj, subbuf->head[0].iov_base, this_len);
  772. len -= this_len;
  773. obj += this_len;
  774. this_len = min_t(unsigned int, len, subbuf->page_len);
  775. if (this_len)
  776. _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
  777. len -= this_len;
  778. obj += this_len;
  779. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  780. memcpy(obj, subbuf->tail[0].iov_base, this_len);
  781. }
  782. /* obj is assumed to point to allocated memory of size at least len: */
  783. int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  784. {
  785. struct xdr_buf subbuf;
  786. int status;
  787. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  788. if (status != 0)
  789. return status;
  790. __read_bytes_from_xdr_buf(&subbuf, obj, len);
  791. return 0;
  792. }
  793. EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
  794. static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  795. {
  796. unsigned int this_len;
  797. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  798. memcpy(subbuf->head[0].iov_base, obj, this_len);
  799. len -= this_len;
  800. obj += this_len;
  801. this_len = min_t(unsigned int, len, subbuf->page_len);
  802. if (this_len)
  803. _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
  804. len -= this_len;
  805. obj += this_len;
  806. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  807. memcpy(subbuf->tail[0].iov_base, obj, this_len);
  808. }
  809. /* obj is assumed to point to allocated memory of size at least len: */
  810. int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  811. {
  812. struct xdr_buf subbuf;
  813. int status;
  814. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  815. if (status != 0)
  816. return status;
  817. __write_bytes_to_xdr_buf(&subbuf, obj, len);
  818. return 0;
  819. }
  820. EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
  821. int
  822. xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
  823. {
  824. __be32 raw;
  825. int status;
  826. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  827. if (status)
  828. return status;
  829. *obj = be32_to_cpu(raw);
  830. return 0;
  831. }
  832. EXPORT_SYMBOL_GPL(xdr_decode_word);
  833. int
  834. xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
  835. {
  836. __be32 raw = cpu_to_be32(obj);
  837. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  838. }
  839. EXPORT_SYMBOL_GPL(xdr_encode_word);
  840. /* If the netobj starting offset bytes from the start of xdr_buf is contained
  841. * entirely in the head or the tail, set object to point to it; otherwise
  842. * try to find space for it at the end of the tail, copy it there, and
  843. * set obj to point to it. */
  844. int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
  845. {
  846. struct xdr_buf subbuf;
  847. if (xdr_decode_word(buf, offset, &obj->len))
  848. return -EFAULT;
  849. if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
  850. return -EFAULT;
  851. /* Is the obj contained entirely in the head? */
  852. obj->data = subbuf.head[0].iov_base;
  853. if (subbuf.head[0].iov_len == obj->len)
  854. return 0;
  855. /* ..or is the obj contained entirely in the tail? */
  856. obj->data = subbuf.tail[0].iov_base;
  857. if (subbuf.tail[0].iov_len == obj->len)
  858. return 0;
  859. /* use end of tail as storage for obj:
  860. * (We don't copy to the beginning because then we'd have
  861. * to worry about doing a potentially overlapping copy.
  862. * This assumes the object is at most half the length of the
  863. * tail.) */
  864. if (obj->len > buf->buflen - buf->len)
  865. return -ENOMEM;
  866. if (buf->tail[0].iov_len != 0)
  867. obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  868. else
  869. obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
  870. __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
  871. return 0;
  872. }
  873. EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
  874. /* Returns 0 on success, or else a negative error code. */
  875. static int
  876. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  877. struct xdr_array2_desc *desc, int encode)
  878. {
  879. char *elem = NULL, *c;
  880. unsigned int copied = 0, todo, avail_here;
  881. struct page **ppages = NULL;
  882. int err;
  883. if (encode) {
  884. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  885. return -EINVAL;
  886. } else {
  887. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  888. desc->array_len > desc->array_maxlen ||
  889. (unsigned long) base + 4 + desc->array_len *
  890. desc->elem_size > buf->len)
  891. return -EINVAL;
  892. }
  893. base += 4;
  894. if (!desc->xcode)
  895. return 0;
  896. todo = desc->array_len * desc->elem_size;
  897. /* process head */
  898. if (todo && base < buf->head->iov_len) {
  899. c = buf->head->iov_base + base;
  900. avail_here = min_t(unsigned int, todo,
  901. buf->head->iov_len - base);
  902. todo -= avail_here;
  903. while (avail_here >= desc->elem_size) {
  904. err = desc->xcode(desc, c);
  905. if (err)
  906. goto out;
  907. c += desc->elem_size;
  908. avail_here -= desc->elem_size;
  909. }
  910. if (avail_here) {
  911. if (!elem) {
  912. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  913. err = -ENOMEM;
  914. if (!elem)
  915. goto out;
  916. }
  917. if (encode) {
  918. err = desc->xcode(desc, elem);
  919. if (err)
  920. goto out;
  921. memcpy(c, elem, avail_here);
  922. } else
  923. memcpy(elem, c, avail_here);
  924. copied = avail_here;
  925. }
  926. base = buf->head->iov_len; /* align to start of pages */
  927. }
  928. /* process pages array */
  929. base -= buf->head->iov_len;
  930. if (todo && base < buf->page_len) {
  931. unsigned int avail_page;
  932. avail_here = min(todo, buf->page_len - base);
  933. todo -= avail_here;
  934. base += buf->page_base;
  935. ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
  936. base &= ~PAGE_CACHE_MASK;
  937. avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
  938. avail_here);
  939. c = kmap(*ppages) + base;
  940. while (avail_here) {
  941. avail_here -= avail_page;
  942. if (copied || avail_page < desc->elem_size) {
  943. unsigned int l = min(avail_page,
  944. desc->elem_size - copied);
  945. if (!elem) {
  946. elem = kmalloc(desc->elem_size,
  947. GFP_KERNEL);
  948. err = -ENOMEM;
  949. if (!elem)
  950. goto out;
  951. }
  952. if (encode) {
  953. if (!copied) {
  954. err = desc->xcode(desc, elem);
  955. if (err)
  956. goto out;
  957. }
  958. memcpy(c, elem + copied, l);
  959. copied += l;
  960. if (copied == desc->elem_size)
  961. copied = 0;
  962. } else {
  963. memcpy(elem + copied, c, l);
  964. copied += l;
  965. if (copied == desc->elem_size) {
  966. err = desc->xcode(desc, elem);
  967. if (err)
  968. goto out;
  969. copied = 0;
  970. }
  971. }
  972. avail_page -= l;
  973. c += l;
  974. }
  975. while (avail_page >= desc->elem_size) {
  976. err = desc->xcode(desc, c);
  977. if (err)
  978. goto out;
  979. c += desc->elem_size;
  980. avail_page -= desc->elem_size;
  981. }
  982. if (avail_page) {
  983. unsigned int l = min(avail_page,
  984. desc->elem_size - copied);
  985. if (!elem) {
  986. elem = kmalloc(desc->elem_size,
  987. GFP_KERNEL);
  988. err = -ENOMEM;
  989. if (!elem)
  990. goto out;
  991. }
  992. if (encode) {
  993. if (!copied) {
  994. err = desc->xcode(desc, elem);
  995. if (err)
  996. goto out;
  997. }
  998. memcpy(c, elem + copied, l);
  999. copied += l;
  1000. if (copied == desc->elem_size)
  1001. copied = 0;
  1002. } else {
  1003. memcpy(elem + copied, c, l);
  1004. copied += l;
  1005. if (copied == desc->elem_size) {
  1006. err = desc->xcode(desc, elem);
  1007. if (err)
  1008. goto out;
  1009. copied = 0;
  1010. }
  1011. }
  1012. }
  1013. if (avail_here) {
  1014. kunmap(*ppages);
  1015. ppages++;
  1016. c = kmap(*ppages);
  1017. }
  1018. avail_page = min(avail_here,
  1019. (unsigned int) PAGE_CACHE_SIZE);
  1020. }
  1021. base = buf->page_len; /* align to start of tail */
  1022. }
  1023. /* process tail */
  1024. base -= buf->page_len;
  1025. if (todo) {
  1026. c = buf->tail->iov_base + base;
  1027. if (copied) {
  1028. unsigned int l = desc->elem_size - copied;
  1029. if (encode)
  1030. memcpy(c, elem + copied, l);
  1031. else {
  1032. memcpy(elem + copied, c, l);
  1033. err = desc->xcode(desc, elem);
  1034. if (err)
  1035. goto out;
  1036. }
  1037. todo -= l;
  1038. c += l;
  1039. }
  1040. while (todo) {
  1041. err = desc->xcode(desc, c);
  1042. if (err)
  1043. goto out;
  1044. c += desc->elem_size;
  1045. todo -= desc->elem_size;
  1046. }
  1047. }
  1048. err = 0;
  1049. out:
  1050. kfree(elem);
  1051. if (ppages)
  1052. kunmap(*ppages);
  1053. return err;
  1054. }
  1055. int
  1056. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  1057. struct xdr_array2_desc *desc)
  1058. {
  1059. if (base >= buf->len)
  1060. return -EINVAL;
  1061. return xdr_xcode_array2(buf, base, desc, 0);
  1062. }
  1063. EXPORT_SYMBOL_GPL(xdr_decode_array2);
  1064. int
  1065. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  1066. struct xdr_array2_desc *desc)
  1067. {
  1068. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  1069. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  1070. return -EINVAL;
  1071. return xdr_xcode_array2(buf, base, desc, 1);
  1072. }
  1073. EXPORT_SYMBOL_GPL(xdr_encode_array2);
  1074. int
  1075. xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
  1076. int (*actor)(struct scatterlist *, void *), void *data)
  1077. {
  1078. int i, ret = 0;
  1079. unsigned int page_len, thislen, page_offset;
  1080. struct scatterlist sg[1];
  1081. sg_init_table(sg, 1);
  1082. if (offset >= buf->head[0].iov_len) {
  1083. offset -= buf->head[0].iov_len;
  1084. } else {
  1085. thislen = buf->head[0].iov_len - offset;
  1086. if (thislen > len)
  1087. thislen = len;
  1088. sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
  1089. ret = actor(sg, data);
  1090. if (ret)
  1091. goto out;
  1092. offset = 0;
  1093. len -= thislen;
  1094. }
  1095. if (len == 0)
  1096. goto out;
  1097. if (offset >= buf->page_len) {
  1098. offset -= buf->page_len;
  1099. } else {
  1100. page_len = buf->page_len - offset;
  1101. if (page_len > len)
  1102. page_len = len;
  1103. len -= page_len;
  1104. page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
  1105. i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
  1106. thislen = PAGE_CACHE_SIZE - page_offset;
  1107. do {
  1108. if (thislen > page_len)
  1109. thislen = page_len;
  1110. sg_set_page(sg, buf->pages[i], thislen, page_offset);
  1111. ret = actor(sg, data);
  1112. if (ret)
  1113. goto out;
  1114. page_len -= thislen;
  1115. i++;
  1116. page_offset = 0;
  1117. thislen = PAGE_CACHE_SIZE;
  1118. } while (page_len != 0);
  1119. offset = 0;
  1120. }
  1121. if (len == 0)
  1122. goto out;
  1123. if (offset < buf->tail[0].iov_len) {
  1124. thislen = buf->tail[0].iov_len - offset;
  1125. if (thislen > len)
  1126. thislen = len;
  1127. sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
  1128. ret = actor(sg, data);
  1129. len -= thislen;
  1130. }
  1131. if (len != 0)
  1132. ret = -EINVAL;
  1133. out:
  1134. return ret;
  1135. }
  1136. EXPORT_SYMBOL_GPL(xdr_process_buf);