xdr.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195
  1. /*
  2. * linux/net/sunrpc/xdr.c
  3. *
  4. * Generic XDR support.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/socket.h>
  10. #include <linux/string.h>
  11. #include <linux/kernel.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/errno.h>
  14. #include <linux/in.h>
  15. #include <linux/net.h>
  16. #include <net/sock.h>
  17. #include <linux/sunrpc/xdr.h>
  18. #include <linux/sunrpc/msg_prot.h>
  19. /*
  20. * XDR functions for basic NFS types
  21. */
  22. u32 *
  23. xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
  24. {
  25. unsigned int quadlen = XDR_QUADLEN(obj->len);
  26. p[quadlen] = 0; /* zero trailing bytes */
  27. *p++ = htonl(obj->len);
  28. memcpy(p, obj->data, obj->len);
  29. return p + XDR_QUADLEN(obj->len);
  30. }
  31. u32 *
  32. xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
  33. {
  34. unsigned int len;
  35. if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
  36. return NULL;
  37. obj->len = len;
  38. obj->data = (u8 *) p;
  39. return p + XDR_QUADLEN(len);
  40. }
  41. /**
  42. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  43. * @p: pointer to current position in XDR buffer.
  44. * @ptr: pointer to data to encode (or NULL)
  45. * @nbytes: size of data.
  46. *
  47. * Copy the array of data of length nbytes at ptr to the XDR buffer
  48. * at position p, then align to the next 32-bit boundary by padding
  49. * with zero bytes (see RFC1832).
  50. * Note: if ptr is NULL, only the padding is performed.
  51. *
  52. * Returns the updated current XDR buffer position
  53. *
  54. */
  55. u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes)
  56. {
  57. if (likely(nbytes != 0)) {
  58. unsigned int quadlen = XDR_QUADLEN(nbytes);
  59. unsigned int padding = (quadlen << 2) - nbytes;
  60. if (ptr != NULL)
  61. memcpy(p, ptr, nbytes);
  62. if (padding != 0)
  63. memset((char *)p + nbytes, 0, padding);
  64. p += quadlen;
  65. }
  66. return p;
  67. }
  68. EXPORT_SYMBOL(xdr_encode_opaque_fixed);
  69. /**
  70. * xdr_encode_opaque - Encode variable length opaque data
  71. * @p: pointer to current position in XDR buffer.
  72. * @ptr: pointer to data to encode (or NULL)
  73. * @nbytes: size of data.
  74. *
  75. * Returns the updated current XDR buffer position
  76. */
  77. u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes)
  78. {
  79. *p++ = htonl(nbytes);
  80. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  81. }
  82. EXPORT_SYMBOL(xdr_encode_opaque);
  83. u32 *
  84. xdr_encode_string(u32 *p, const char *string)
  85. {
  86. return xdr_encode_array(p, string, strlen(string));
  87. }
  88. u32 *
  89. xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen)
  90. {
  91. unsigned int len;
  92. char *string;
  93. if ((len = ntohl(*p++)) > maxlen)
  94. return NULL;
  95. if (lenp)
  96. *lenp = len;
  97. if ((len % 4) != 0) {
  98. string = (char *) p;
  99. } else {
  100. string = (char *) (p - 1);
  101. memmove(string, p, len);
  102. }
  103. string[len] = '\0';
  104. *sp = string;
  105. return p + XDR_QUADLEN(len);
  106. }
  107. u32 *
  108. xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
  109. {
  110. unsigned int len;
  111. if ((len = ntohl(*p++)) > maxlen)
  112. return NULL;
  113. *lenp = len;
  114. *sp = (char *) p;
  115. return p + XDR_QUADLEN(len);
  116. }
  117. void
  118. xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
  119. unsigned int len)
  120. {
  121. struct kvec *tail = xdr->tail;
  122. u32 *p;
  123. xdr->pages = pages;
  124. xdr->page_base = base;
  125. xdr->page_len = len;
  126. p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
  127. tail->iov_base = p;
  128. tail->iov_len = 0;
  129. if (len & 3) {
  130. unsigned int pad = 4 - (len & 3);
  131. *p = 0;
  132. tail->iov_base = (char *)p + (len & 3);
  133. tail->iov_len = pad;
  134. len += pad;
  135. }
  136. xdr->buflen += len;
  137. xdr->len += len;
  138. }
  139. void
  140. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  141. struct page **pages, unsigned int base, unsigned int len)
  142. {
  143. struct kvec *head = xdr->head;
  144. struct kvec *tail = xdr->tail;
  145. char *buf = (char *)head->iov_base;
  146. unsigned int buflen = head->iov_len;
  147. head->iov_len = offset;
  148. xdr->pages = pages;
  149. xdr->page_base = base;
  150. xdr->page_len = len;
  151. tail->iov_base = buf + offset;
  152. tail->iov_len = buflen - offset;
  153. xdr->buflen += len;
  154. }
  155. ssize_t
  156. xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
  157. skb_reader_t *desc,
  158. skb_read_actor_t copy_actor)
  159. {
  160. struct page **ppage = xdr->pages;
  161. unsigned int len, pglen = xdr->page_len;
  162. ssize_t copied = 0;
  163. int ret;
  164. len = xdr->head[0].iov_len;
  165. if (base < len) {
  166. len -= base;
  167. ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
  168. copied += ret;
  169. if (ret != len || !desc->count)
  170. goto out;
  171. base = 0;
  172. } else
  173. base -= len;
  174. if (pglen == 0)
  175. goto copy_tail;
  176. if (base >= pglen) {
  177. base -= pglen;
  178. goto copy_tail;
  179. }
  180. if (base || xdr->page_base) {
  181. pglen -= base;
  182. base += xdr->page_base;
  183. ppage += base >> PAGE_CACHE_SHIFT;
  184. base &= ~PAGE_CACHE_MASK;
  185. }
  186. do {
  187. char *kaddr;
  188. /* ACL likes to be lazy in allocating pages - ACLs
  189. * are small by default but can get huge. */
  190. if (unlikely(*ppage == NULL)) {
  191. *ppage = alloc_page(GFP_ATOMIC);
  192. if (unlikely(*ppage == NULL)) {
  193. if (copied == 0)
  194. copied = -ENOMEM;
  195. goto out;
  196. }
  197. }
  198. len = PAGE_CACHE_SIZE;
  199. kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
  200. if (base) {
  201. len -= base;
  202. if (pglen < len)
  203. len = pglen;
  204. ret = copy_actor(desc, kaddr + base, len);
  205. base = 0;
  206. } else {
  207. if (pglen < len)
  208. len = pglen;
  209. ret = copy_actor(desc, kaddr, len);
  210. }
  211. flush_dcache_page(*ppage);
  212. kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
  213. copied += ret;
  214. if (ret != len || !desc->count)
  215. goto out;
  216. ppage++;
  217. } while ((pglen -= len) != 0);
  218. copy_tail:
  219. len = xdr->tail[0].iov_len;
  220. if (base < len)
  221. copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
  222. out:
  223. return copied;
  224. }
  225. int
  226. xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
  227. struct xdr_buf *xdr, unsigned int base, int msgflags)
  228. {
  229. struct page **ppage = xdr->pages;
  230. unsigned int len, pglen = xdr->page_len;
  231. int err, ret = 0;
  232. ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
  233. len = xdr->head[0].iov_len;
  234. if (base < len || (addr != NULL && base == 0)) {
  235. struct kvec iov = {
  236. .iov_base = xdr->head[0].iov_base + base,
  237. .iov_len = len - base,
  238. };
  239. struct msghdr msg = {
  240. .msg_name = addr,
  241. .msg_namelen = addrlen,
  242. .msg_flags = msgflags,
  243. };
  244. if (xdr->len > len)
  245. msg.msg_flags |= MSG_MORE;
  246. if (iov.iov_len != 0)
  247. err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
  248. else
  249. err = kernel_sendmsg(sock, &msg, NULL, 0, 0);
  250. if (ret == 0)
  251. ret = err;
  252. else if (err > 0)
  253. ret += err;
  254. if (err != iov.iov_len)
  255. goto out;
  256. base = 0;
  257. } else
  258. base -= len;
  259. if (pglen == 0)
  260. goto copy_tail;
  261. if (base >= pglen) {
  262. base -= pglen;
  263. goto copy_tail;
  264. }
  265. if (base || xdr->page_base) {
  266. pglen -= base;
  267. base += xdr->page_base;
  268. ppage += base >> PAGE_CACHE_SHIFT;
  269. base &= ~PAGE_CACHE_MASK;
  270. }
  271. sendpage = sock->ops->sendpage ? : sock_no_sendpage;
  272. do {
  273. int flags = msgflags;
  274. len = PAGE_CACHE_SIZE;
  275. if (base)
  276. len -= base;
  277. if (pglen < len)
  278. len = pglen;
  279. if (pglen != len || xdr->tail[0].iov_len != 0)
  280. flags |= MSG_MORE;
  281. /* Hmm... We might be dealing with highmem pages */
  282. if (PageHighMem(*ppage))
  283. sendpage = sock_no_sendpage;
  284. err = sendpage(sock, *ppage, base, len, flags);
  285. if (ret == 0)
  286. ret = err;
  287. else if (err > 0)
  288. ret += err;
  289. if (err != len)
  290. goto out;
  291. base = 0;
  292. ppage++;
  293. } while ((pglen -= len) != 0);
  294. copy_tail:
  295. len = xdr->tail[0].iov_len;
  296. if (base < len) {
  297. struct kvec iov = {
  298. .iov_base = xdr->tail[0].iov_base + base,
  299. .iov_len = len - base,
  300. };
  301. struct msghdr msg = {
  302. .msg_flags = msgflags,
  303. };
  304. err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
  305. if (ret == 0)
  306. ret = err;
  307. else if (err > 0)
  308. ret += err;
  309. }
  310. out:
  311. return ret;
  312. }
  313. /*
  314. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  315. *
  316. * _shift_data_right_pages
  317. * @pages: vector of pages containing both the source and dest memory area.
  318. * @pgto_base: page vector address of destination
  319. * @pgfrom_base: page vector address of source
  320. * @len: number of bytes to copy
  321. *
  322. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  323. * the same way:
  324. * if a memory area starts at byte 'base' in page 'pages[i]',
  325. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  326. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  327. * they point to may overlap.
  328. */
  329. static void
  330. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  331. size_t pgfrom_base, size_t len)
  332. {
  333. struct page **pgfrom, **pgto;
  334. char *vfrom, *vto;
  335. size_t copy;
  336. BUG_ON(pgto_base <= pgfrom_base);
  337. pgto_base += len;
  338. pgfrom_base += len;
  339. pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
  340. pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
  341. pgto_base &= ~PAGE_CACHE_MASK;
  342. pgfrom_base &= ~PAGE_CACHE_MASK;
  343. do {
  344. /* Are any pointers crossing a page boundary? */
  345. if (pgto_base == 0) {
  346. flush_dcache_page(*pgto);
  347. pgto_base = PAGE_CACHE_SIZE;
  348. pgto--;
  349. }
  350. if (pgfrom_base == 0) {
  351. pgfrom_base = PAGE_CACHE_SIZE;
  352. pgfrom--;
  353. }
  354. copy = len;
  355. if (copy > pgto_base)
  356. copy = pgto_base;
  357. if (copy > pgfrom_base)
  358. copy = pgfrom_base;
  359. pgto_base -= copy;
  360. pgfrom_base -= copy;
  361. vto = kmap_atomic(*pgto, KM_USER0);
  362. vfrom = kmap_atomic(*pgfrom, KM_USER1);
  363. memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
  364. kunmap_atomic(vfrom, KM_USER1);
  365. kunmap_atomic(vto, KM_USER0);
  366. } while ((len -= copy) != 0);
  367. flush_dcache_page(*pgto);
  368. }
  369. /*
  370. * _copy_to_pages
  371. * @pages: array of pages
  372. * @pgbase: page vector address of destination
  373. * @p: pointer to source data
  374. * @len: length
  375. *
  376. * Copies data from an arbitrary memory location into an array of pages
  377. * The copy is assumed to be non-overlapping.
  378. */
  379. static void
  380. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  381. {
  382. struct page **pgto;
  383. char *vto;
  384. size_t copy;
  385. pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
  386. pgbase &= ~PAGE_CACHE_MASK;
  387. do {
  388. copy = PAGE_CACHE_SIZE - pgbase;
  389. if (copy > len)
  390. copy = len;
  391. vto = kmap_atomic(*pgto, KM_USER0);
  392. memcpy(vto + pgbase, p, copy);
  393. kunmap_atomic(vto, KM_USER0);
  394. pgbase += copy;
  395. if (pgbase == PAGE_CACHE_SIZE) {
  396. flush_dcache_page(*pgto);
  397. pgbase = 0;
  398. pgto++;
  399. }
  400. p += copy;
  401. } while ((len -= copy) != 0);
  402. flush_dcache_page(*pgto);
  403. }
  404. /*
  405. * _copy_from_pages
  406. * @p: pointer to destination
  407. * @pages: array of pages
  408. * @pgbase: offset of source data
  409. * @len: length
  410. *
  411. * Copies data into an arbitrary memory location from an array of pages
  412. * The copy is assumed to be non-overlapping.
  413. */
  414. static void
  415. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  416. {
  417. struct page **pgfrom;
  418. char *vfrom;
  419. size_t copy;
  420. pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
  421. pgbase &= ~PAGE_CACHE_MASK;
  422. do {
  423. copy = PAGE_CACHE_SIZE - pgbase;
  424. if (copy > len)
  425. copy = len;
  426. vfrom = kmap_atomic(*pgfrom, KM_USER0);
  427. memcpy(p, vfrom + pgbase, copy);
  428. kunmap_atomic(vfrom, KM_USER0);
  429. pgbase += copy;
  430. if (pgbase == PAGE_CACHE_SIZE) {
  431. pgbase = 0;
  432. pgfrom++;
  433. }
  434. p += copy;
  435. } while ((len -= copy) != 0);
  436. }
  437. /*
  438. * xdr_shrink_bufhead
  439. * @buf: xdr_buf
  440. * @len: bytes to remove from buf->head[0]
  441. *
  442. * Shrinks XDR buffer's header kvec buf->head[0] by
  443. * 'len' bytes. The extra data is not lost, but is instead
  444. * moved into the inlined pages and/or the tail.
  445. */
  446. static void
  447. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  448. {
  449. struct kvec *head, *tail;
  450. size_t copy, offs;
  451. unsigned int pglen = buf->page_len;
  452. tail = buf->tail;
  453. head = buf->head;
  454. BUG_ON (len > head->iov_len);
  455. /* Shift the tail first */
  456. if (tail->iov_len != 0) {
  457. if (tail->iov_len > len) {
  458. copy = tail->iov_len - len;
  459. memmove((char *)tail->iov_base + len,
  460. tail->iov_base, copy);
  461. }
  462. /* Copy from the inlined pages into the tail */
  463. copy = len;
  464. if (copy > pglen)
  465. copy = pglen;
  466. offs = len - copy;
  467. if (offs >= tail->iov_len)
  468. copy = 0;
  469. else if (copy > tail->iov_len - offs)
  470. copy = tail->iov_len - offs;
  471. if (copy != 0)
  472. _copy_from_pages((char *)tail->iov_base + offs,
  473. buf->pages,
  474. buf->page_base + pglen + offs - len,
  475. copy);
  476. /* Do we also need to copy data from the head into the tail ? */
  477. if (len > pglen) {
  478. offs = copy = len - pglen;
  479. if (copy > tail->iov_len)
  480. copy = tail->iov_len;
  481. memcpy(tail->iov_base,
  482. (char *)head->iov_base +
  483. head->iov_len - offs,
  484. copy);
  485. }
  486. }
  487. /* Now handle pages */
  488. if (pglen != 0) {
  489. if (pglen > len)
  490. _shift_data_right_pages(buf->pages,
  491. buf->page_base + len,
  492. buf->page_base,
  493. pglen - len);
  494. copy = len;
  495. if (len > pglen)
  496. copy = pglen;
  497. _copy_to_pages(buf->pages, buf->page_base,
  498. (char *)head->iov_base + head->iov_len - len,
  499. copy);
  500. }
  501. head->iov_len -= len;
  502. buf->buflen -= len;
  503. /* Have we truncated the message? */
  504. if (buf->len > buf->buflen)
  505. buf->len = buf->buflen;
  506. }
  507. /*
  508. * xdr_shrink_pagelen
  509. * @buf: xdr_buf
  510. * @len: bytes to remove from buf->pages
  511. *
  512. * Shrinks XDR buffer's page array buf->pages by
  513. * 'len' bytes. The extra data is not lost, but is instead
  514. * moved into the tail.
  515. */
  516. static void
  517. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  518. {
  519. struct kvec *tail;
  520. size_t copy;
  521. char *p;
  522. unsigned int pglen = buf->page_len;
  523. tail = buf->tail;
  524. BUG_ON (len > pglen);
  525. /* Shift the tail first */
  526. if (tail->iov_len != 0) {
  527. p = (char *)tail->iov_base + len;
  528. if (tail->iov_len > len) {
  529. copy = tail->iov_len - len;
  530. memmove(p, tail->iov_base, copy);
  531. } else
  532. buf->buflen -= len;
  533. /* Copy from the inlined pages into the tail */
  534. copy = len;
  535. if (copy > tail->iov_len)
  536. copy = tail->iov_len;
  537. _copy_from_pages((char *)tail->iov_base,
  538. buf->pages, buf->page_base + pglen - len,
  539. copy);
  540. }
  541. buf->page_len -= len;
  542. buf->buflen -= len;
  543. /* Have we truncated the message? */
  544. if (buf->len > buf->buflen)
  545. buf->len = buf->buflen;
  546. }
  547. void
  548. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  549. {
  550. xdr_shrink_bufhead(buf, len);
  551. }
  552. /**
  553. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  554. * @xdr: pointer to xdr_stream struct
  555. * @buf: pointer to XDR buffer in which to encode data
  556. * @p: current pointer inside XDR buffer
  557. *
  558. * Note: at the moment the RPC client only passes the length of our
  559. * scratch buffer in the xdr_buf's header kvec. Previously this
  560. * meant we needed to call xdr_adjust_iovec() after encoding the
  561. * data. With the new scheme, the xdr_stream manages the details
  562. * of the buffer length, and takes care of adjusting the kvec
  563. * length for us.
  564. */
  565. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
  566. {
  567. struct kvec *iov = buf->head;
  568. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  569. BUG_ON(scratch_len < 0);
  570. xdr->buf = buf;
  571. xdr->iov = iov;
  572. xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len);
  573. xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len);
  574. BUG_ON(iov->iov_len > scratch_len);
  575. if (p != xdr->p && p != NULL) {
  576. size_t len;
  577. BUG_ON(p < xdr->p || p > xdr->end);
  578. len = (char *)p - (char *)xdr->p;
  579. xdr->p = p;
  580. buf->len += len;
  581. iov->iov_len += len;
  582. }
  583. }
  584. EXPORT_SYMBOL(xdr_init_encode);
  585. /**
  586. * xdr_reserve_space - Reserve buffer space for sending
  587. * @xdr: pointer to xdr_stream
  588. * @nbytes: number of bytes to reserve
  589. *
  590. * Checks that we have enough buffer space to encode 'nbytes' more
  591. * bytes of data. If so, update the total xdr_buf length, and
  592. * adjust the length of the current kvec.
  593. */
  594. uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  595. {
  596. uint32_t *p = xdr->p;
  597. uint32_t *q;
  598. /* align nbytes on the next 32-bit boundary */
  599. nbytes += 3;
  600. nbytes &= ~3;
  601. q = p + (nbytes >> 2);
  602. if (unlikely(q > xdr->end || q < p))
  603. return NULL;
  604. xdr->p = q;
  605. xdr->iov->iov_len += nbytes;
  606. xdr->buf->len += nbytes;
  607. return p;
  608. }
  609. EXPORT_SYMBOL(xdr_reserve_space);
  610. /**
  611. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  612. * @xdr: pointer to xdr_stream
  613. * @pages: list of pages
  614. * @base: offset of first byte
  615. * @len: length of data in bytes
  616. *
  617. */
  618. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  619. unsigned int len)
  620. {
  621. struct xdr_buf *buf = xdr->buf;
  622. struct kvec *iov = buf->tail;
  623. buf->pages = pages;
  624. buf->page_base = base;
  625. buf->page_len = len;
  626. iov->iov_base = (char *)xdr->p;
  627. iov->iov_len = 0;
  628. xdr->iov = iov;
  629. if (len & 3) {
  630. unsigned int pad = 4 - (len & 3);
  631. BUG_ON(xdr->p >= xdr->end);
  632. iov->iov_base = (char *)xdr->p + (len & 3);
  633. iov->iov_len += pad;
  634. len += pad;
  635. *xdr->p++ = 0;
  636. }
  637. buf->buflen += len;
  638. buf->len += len;
  639. }
  640. EXPORT_SYMBOL(xdr_write_pages);
  641. /**
  642. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  643. * @xdr: pointer to xdr_stream struct
  644. * @buf: pointer to XDR buffer from which to decode data
  645. * @p: current pointer inside XDR buffer
  646. */
  647. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
  648. {
  649. struct kvec *iov = buf->head;
  650. unsigned int len = iov->iov_len;
  651. if (len > buf->len)
  652. len = buf->len;
  653. xdr->buf = buf;
  654. xdr->iov = iov;
  655. xdr->p = p;
  656. xdr->end = (uint32_t *)((char *)iov->iov_base + len);
  657. }
  658. EXPORT_SYMBOL(xdr_init_decode);
  659. /**
  660. * xdr_inline_decode - Retrieve non-page XDR data to decode
  661. * @xdr: pointer to xdr_stream struct
  662. * @nbytes: number of bytes of data to decode
  663. *
  664. * Check if the input buffer is long enough to enable us to decode
  665. * 'nbytes' more bytes of data starting at the current position.
  666. * If so return the current pointer, then update the current
  667. * pointer position.
  668. */
  669. uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  670. {
  671. uint32_t *p = xdr->p;
  672. uint32_t *q = p + XDR_QUADLEN(nbytes);
  673. if (unlikely(q > xdr->end || q < p))
  674. return NULL;
  675. xdr->p = q;
  676. return p;
  677. }
  678. EXPORT_SYMBOL(xdr_inline_decode);
  679. /**
  680. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  681. * @xdr: pointer to xdr_stream struct
  682. * @len: number of bytes of page data
  683. *
  684. * Moves data beyond the current pointer position from the XDR head[] buffer
  685. * into the page list. Any data that lies beyond current position + "len"
  686. * bytes is moved into the XDR tail[]. The current pointer is then
  687. * repositioned at the beginning of the XDR tail.
  688. */
  689. void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  690. {
  691. struct xdr_buf *buf = xdr->buf;
  692. struct kvec *iov;
  693. ssize_t shift;
  694. unsigned int end;
  695. int padding;
  696. /* Realign pages to current pointer position */
  697. iov = buf->head;
  698. shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
  699. if (shift > 0)
  700. xdr_shrink_bufhead(buf, shift);
  701. /* Truncate page data and move it into the tail */
  702. if (buf->page_len > len)
  703. xdr_shrink_pagelen(buf, buf->page_len - len);
  704. padding = (XDR_QUADLEN(len) << 2) - len;
  705. xdr->iov = iov = buf->tail;
  706. /* Compute remaining message length. */
  707. end = iov->iov_len;
  708. shift = buf->buflen - buf->len;
  709. if (shift < end)
  710. end -= shift;
  711. else if (shift > 0)
  712. end = 0;
  713. /*
  714. * Position current pointer at beginning of tail, and
  715. * set remaining message length.
  716. */
  717. xdr->p = (uint32_t *)((char *)iov->iov_base + padding);
  718. xdr->end = (uint32_t *)((char *)iov->iov_base + end);
  719. }
  720. EXPORT_SYMBOL(xdr_read_pages);
  721. static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  722. void
  723. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  724. {
  725. buf->head[0] = *iov;
  726. buf->tail[0] = empty_iov;
  727. buf->page_len = 0;
  728. buf->buflen = buf->len = iov->iov_len;
  729. }
  730. /* Sets subiov to the intersection of iov with the buffer of length len
  731. * starting base bytes after iov. Indicates empty intersection by setting
  732. * length of subiov to zero. Decrements len by length of subiov, sets base
  733. * to zero (or decrements it by length of iov if subiov is empty). */
  734. static void
  735. iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len)
  736. {
  737. if (*base > iov->iov_len) {
  738. subiov->iov_base = NULL;
  739. subiov->iov_len = 0;
  740. *base -= iov->iov_len;
  741. } else {
  742. subiov->iov_base = iov->iov_base + *base;
  743. subiov->iov_len = min(*len, (int)iov->iov_len - *base);
  744. *base = 0;
  745. }
  746. *len -= subiov->iov_len;
  747. }
  748. /* Sets subbuf to the portion of buf of length len beginning base bytes
  749. * from the start of buf. Returns -1 if base of length are out of bounds. */
  750. int
  751. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  752. int base, int len)
  753. {
  754. int i;
  755. subbuf->buflen = subbuf->len = len;
  756. iov_subsegment(buf->head, subbuf->head, &base, &len);
  757. if (base < buf->page_len) {
  758. i = (base + buf->page_base) >> PAGE_CACHE_SHIFT;
  759. subbuf->pages = &buf->pages[i];
  760. subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK;
  761. subbuf->page_len = min((int)buf->page_len - base, len);
  762. len -= subbuf->page_len;
  763. base = 0;
  764. } else {
  765. base -= buf->page_len;
  766. subbuf->page_len = 0;
  767. }
  768. iov_subsegment(buf->tail, subbuf->tail, &base, &len);
  769. if (base || len)
  770. return -1;
  771. return 0;
  772. }
  773. /* obj is assumed to point to allocated memory of size at least len: */
  774. int
  775. read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
  776. {
  777. struct xdr_buf subbuf;
  778. int this_len;
  779. int status;
  780. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  781. if (status)
  782. goto out;
  783. this_len = min(len, (int)subbuf.head[0].iov_len);
  784. memcpy(obj, subbuf.head[0].iov_base, this_len);
  785. len -= this_len;
  786. obj += this_len;
  787. this_len = min(len, (int)subbuf.page_len);
  788. if (this_len)
  789. _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len);
  790. len -= this_len;
  791. obj += this_len;
  792. this_len = min(len, (int)subbuf.tail[0].iov_len);
  793. memcpy(obj, subbuf.tail[0].iov_base, this_len);
  794. out:
  795. return status;
  796. }
  797. /* obj is assumed to point to allocated memory of size at least len: */
  798. int
  799. write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
  800. {
  801. struct xdr_buf subbuf;
  802. int this_len;
  803. int status;
  804. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  805. if (status)
  806. goto out;
  807. this_len = min(len, (int)subbuf.head[0].iov_len);
  808. memcpy(subbuf.head[0].iov_base, obj, this_len);
  809. len -= this_len;
  810. obj += this_len;
  811. this_len = min(len, (int)subbuf.page_len);
  812. if (this_len)
  813. _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len);
  814. len -= this_len;
  815. obj += this_len;
  816. this_len = min(len, (int)subbuf.tail[0].iov_len);
  817. memcpy(subbuf.tail[0].iov_base, obj, this_len);
  818. out:
  819. return status;
  820. }
  821. int
  822. xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
  823. {
  824. u32 raw;
  825. int status;
  826. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  827. if (status)
  828. return status;
  829. *obj = ntohl(raw);
  830. return 0;
  831. }
  832. int
  833. xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
  834. {
  835. u32 raw = htonl(obj);
  836. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  837. }
  838. /* If the netobj starting offset bytes from the start of xdr_buf is contained
  839. * entirely in the head or the tail, set object to point to it; otherwise
  840. * try to find space for it at the end of the tail, copy it there, and
  841. * set obj to point to it. */
  842. int
  843. xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset)
  844. {
  845. u32 tail_offset = buf->head[0].iov_len + buf->page_len;
  846. u32 obj_end_offset;
  847. if (xdr_decode_word(buf, offset, &obj->len))
  848. goto out;
  849. obj_end_offset = offset + 4 + obj->len;
  850. if (obj_end_offset <= buf->head[0].iov_len) {
  851. /* The obj is contained entirely in the head: */
  852. obj->data = buf->head[0].iov_base + offset + 4;
  853. } else if (offset + 4 >= tail_offset) {
  854. if (obj_end_offset - tail_offset
  855. > buf->tail[0].iov_len)
  856. goto out;
  857. /* The obj is contained entirely in the tail: */
  858. obj->data = buf->tail[0].iov_base
  859. + offset - tail_offset + 4;
  860. } else {
  861. /* use end of tail as storage for obj:
  862. * (We don't copy to the beginning because then we'd have
  863. * to worry about doing a potentially overlapping copy.
  864. * This assumes the object is at most half the length of the
  865. * tail.) */
  866. if (obj->len > buf->tail[0].iov_len)
  867. goto out;
  868. obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len -
  869. obj->len;
  870. if (read_bytes_from_xdr_buf(buf, offset + 4,
  871. obj->data, obj->len))
  872. goto out;
  873. }
  874. return 0;
  875. out:
  876. return -1;
  877. }
  878. /* Returns 0 on success, or else a negative error code. */
  879. static int
  880. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  881. struct xdr_array2_desc *desc, int encode)
  882. {
  883. char *elem = NULL, *c;
  884. unsigned int copied = 0, todo, avail_here;
  885. struct page **ppages = NULL;
  886. int err;
  887. if (encode) {
  888. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  889. return -EINVAL;
  890. } else {
  891. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  892. (unsigned long) base + 4 + desc->array_len *
  893. desc->elem_size > buf->len)
  894. return -EINVAL;
  895. }
  896. base += 4;
  897. if (!desc->xcode)
  898. return 0;
  899. todo = desc->array_len * desc->elem_size;
  900. /* process head */
  901. if (todo && base < buf->head->iov_len) {
  902. c = buf->head->iov_base + base;
  903. avail_here = min_t(unsigned int, todo,
  904. buf->head->iov_len - base);
  905. todo -= avail_here;
  906. while (avail_here >= desc->elem_size) {
  907. err = desc->xcode(desc, c);
  908. if (err)
  909. goto out;
  910. c += desc->elem_size;
  911. avail_here -= desc->elem_size;
  912. }
  913. if (avail_here) {
  914. if (!elem) {
  915. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  916. err = -ENOMEM;
  917. if (!elem)
  918. goto out;
  919. }
  920. if (encode) {
  921. err = desc->xcode(desc, elem);
  922. if (err)
  923. goto out;
  924. memcpy(c, elem, avail_here);
  925. } else
  926. memcpy(elem, c, avail_here);
  927. copied = avail_here;
  928. }
  929. base = buf->head->iov_len; /* align to start of pages */
  930. }
  931. /* process pages array */
  932. base -= buf->head->iov_len;
  933. if (todo && base < buf->page_len) {
  934. unsigned int avail_page;
  935. avail_here = min(todo, buf->page_len - base);
  936. todo -= avail_here;
  937. base += buf->page_base;
  938. ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
  939. base &= ~PAGE_CACHE_MASK;
  940. avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
  941. avail_here);
  942. c = kmap(*ppages) + base;
  943. while (avail_here) {
  944. avail_here -= avail_page;
  945. if (copied || avail_page < desc->elem_size) {
  946. unsigned int l = min(avail_page,
  947. desc->elem_size - copied);
  948. if (!elem) {
  949. elem = kmalloc(desc->elem_size,
  950. GFP_KERNEL);
  951. err = -ENOMEM;
  952. if (!elem)
  953. goto out;
  954. }
  955. if (encode) {
  956. if (!copied) {
  957. err = desc->xcode(desc, elem);
  958. if (err)
  959. goto out;
  960. }
  961. memcpy(c, elem + copied, l);
  962. copied += l;
  963. if (copied == desc->elem_size)
  964. copied = 0;
  965. } else {
  966. memcpy(elem + copied, c, l);
  967. copied += l;
  968. if (copied == desc->elem_size) {
  969. err = desc->xcode(desc, elem);
  970. if (err)
  971. goto out;
  972. copied = 0;
  973. }
  974. }
  975. avail_page -= l;
  976. c += l;
  977. }
  978. while (avail_page >= desc->elem_size) {
  979. err = desc->xcode(desc, c);
  980. if (err)
  981. goto out;
  982. c += desc->elem_size;
  983. avail_page -= desc->elem_size;
  984. }
  985. if (avail_page) {
  986. unsigned int l = min(avail_page,
  987. desc->elem_size - copied);
  988. if (!elem) {
  989. elem = kmalloc(desc->elem_size,
  990. GFP_KERNEL);
  991. err = -ENOMEM;
  992. if (!elem)
  993. goto out;
  994. }
  995. if (encode) {
  996. if (!copied) {
  997. err = desc->xcode(desc, elem);
  998. if (err)
  999. goto out;
  1000. }
  1001. memcpy(c, elem + copied, l);
  1002. copied += l;
  1003. if (copied == desc->elem_size)
  1004. copied = 0;
  1005. } else {
  1006. memcpy(elem + copied, c, l);
  1007. copied += l;
  1008. if (copied == desc->elem_size) {
  1009. err = desc->xcode(desc, elem);
  1010. if (err)
  1011. goto out;
  1012. copied = 0;
  1013. }
  1014. }
  1015. }
  1016. if (avail_here) {
  1017. kunmap(*ppages);
  1018. ppages++;
  1019. c = kmap(*ppages);
  1020. }
  1021. avail_page = min(avail_here,
  1022. (unsigned int) PAGE_CACHE_SIZE);
  1023. }
  1024. base = buf->page_len; /* align to start of tail */
  1025. }
  1026. /* process tail */
  1027. base -= buf->page_len;
  1028. if (todo) {
  1029. c = buf->tail->iov_base + base;
  1030. if (copied) {
  1031. unsigned int l = desc->elem_size - copied;
  1032. if (encode)
  1033. memcpy(c, elem + copied, l);
  1034. else {
  1035. memcpy(elem + copied, c, l);
  1036. err = desc->xcode(desc, elem);
  1037. if (err)
  1038. goto out;
  1039. }
  1040. todo -= l;
  1041. c += l;
  1042. }
  1043. while (todo) {
  1044. err = desc->xcode(desc, c);
  1045. if (err)
  1046. goto out;
  1047. c += desc->elem_size;
  1048. todo -= desc->elem_size;
  1049. }
  1050. }
  1051. err = 0;
  1052. out:
  1053. if (elem)
  1054. kfree(elem);
  1055. if (ppages)
  1056. kunmap(*ppages);
  1057. return err;
  1058. }
  1059. int
  1060. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  1061. struct xdr_array2_desc *desc)
  1062. {
  1063. if (base >= buf->len)
  1064. return -EINVAL;
  1065. return xdr_xcode_array2(buf, base, desc, 0);
  1066. }
  1067. int
  1068. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  1069. struct xdr_array2_desc *desc)
  1070. {
  1071. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  1072. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  1073. return -EINVAL;
  1074. return xdr_xcode_array2(buf, base, desc, 1);
  1075. }