xdr.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196
  1. /*
  2. * linux/net/sunrpc/xdr.c
  3. *
  4. * Generic XDR support.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/socket.h>
  10. #include <linux/string.h>
  11. #include <linux/kernel.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/errno.h>
  14. #include <linux/in.h>
  15. #include <linux/net.h>
  16. #include <net/sock.h>
  17. #include <linux/sunrpc/xdr.h>
  18. #include <linux/sunrpc/msg_prot.h>
  19. /*
  20. * XDR functions for basic NFS types
  21. */
  22. u32 *
  23. xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
  24. {
  25. unsigned int quadlen = XDR_QUADLEN(obj->len);
  26. p[quadlen] = 0; /* zero trailing bytes */
  27. *p++ = htonl(obj->len);
  28. memcpy(p, obj->data, obj->len);
  29. return p + XDR_QUADLEN(obj->len);
  30. }
  31. u32 *
  32. xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
  33. {
  34. unsigned int len;
  35. if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
  36. return NULL;
  37. obj->len = len;
  38. obj->data = (u8 *) p;
  39. return p + XDR_QUADLEN(len);
  40. }
  41. /**
  42. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  43. * @p: pointer to current position in XDR buffer.
  44. * @ptr: pointer to data to encode (or NULL)
  45. * @nbytes: size of data.
  46. *
  47. * Copy the array of data of length nbytes at ptr to the XDR buffer
  48. * at position p, then align to the next 32-bit boundary by padding
  49. * with zero bytes (see RFC1832).
  50. * Note: if ptr is NULL, only the padding is performed.
  51. *
  52. * Returns the updated current XDR buffer position
  53. *
  54. */
  55. u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes)
  56. {
  57. if (likely(nbytes != 0)) {
  58. unsigned int quadlen = XDR_QUADLEN(nbytes);
  59. unsigned int padding = (quadlen << 2) - nbytes;
  60. if (ptr != NULL)
  61. memcpy(p, ptr, nbytes);
  62. if (padding != 0)
  63. memset((char *)p + nbytes, 0, padding);
  64. p += quadlen;
  65. }
  66. return p;
  67. }
  68. EXPORT_SYMBOL(xdr_encode_opaque_fixed);
  69. /**
  70. * xdr_encode_opaque - Encode variable length opaque data
  71. * @p: pointer to current position in XDR buffer.
  72. * @ptr: pointer to data to encode (or NULL)
  73. * @nbytes: size of data.
  74. *
  75. * Returns the updated current XDR buffer position
  76. */
  77. u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes)
  78. {
  79. *p++ = htonl(nbytes);
  80. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  81. }
  82. EXPORT_SYMBOL(xdr_encode_opaque);
  83. u32 *
  84. xdr_encode_string(u32 *p, const char *string)
  85. {
  86. return xdr_encode_array(p, string, strlen(string));
  87. }
  88. u32 *
  89. xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen)
  90. {
  91. unsigned int len;
  92. char *string;
  93. if ((len = ntohl(*p++)) > maxlen)
  94. return NULL;
  95. if (lenp)
  96. *lenp = len;
  97. if ((len % 4) != 0) {
  98. string = (char *) p;
  99. } else {
  100. string = (char *) (p - 1);
  101. memmove(string, p, len);
  102. }
  103. string[len] = '\0';
  104. *sp = string;
  105. return p + XDR_QUADLEN(len);
  106. }
  107. u32 *
  108. xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
  109. {
  110. unsigned int len;
  111. if ((len = ntohl(*p++)) > maxlen)
  112. return NULL;
  113. *lenp = len;
  114. *sp = (char *) p;
  115. return p + XDR_QUADLEN(len);
  116. }
  117. void
  118. xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
  119. unsigned int len)
  120. {
  121. struct kvec *tail = xdr->tail;
  122. u32 *p;
  123. xdr->pages = pages;
  124. xdr->page_base = base;
  125. xdr->page_len = len;
  126. p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
  127. tail->iov_base = p;
  128. tail->iov_len = 0;
  129. if (len & 3) {
  130. unsigned int pad = 4 - (len & 3);
  131. *p = 0;
  132. tail->iov_base = (char *)p + (len & 3);
  133. tail->iov_len = pad;
  134. len += pad;
  135. }
  136. xdr->buflen += len;
  137. xdr->len += len;
  138. }
  139. void
  140. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  141. struct page **pages, unsigned int base, unsigned int len)
  142. {
  143. struct kvec *head = xdr->head;
  144. struct kvec *tail = xdr->tail;
  145. char *buf = (char *)head->iov_base;
  146. unsigned int buflen = head->iov_len;
  147. head->iov_len = offset;
  148. xdr->pages = pages;
  149. xdr->page_base = base;
  150. xdr->page_len = len;
  151. tail->iov_base = buf + offset;
  152. tail->iov_len = buflen - offset;
  153. xdr->buflen += len;
  154. }
  155. ssize_t
  156. xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
  157. skb_reader_t *desc,
  158. skb_read_actor_t copy_actor)
  159. {
  160. struct page **ppage = xdr->pages;
  161. unsigned int len, pglen = xdr->page_len;
  162. ssize_t copied = 0;
  163. int ret;
  164. len = xdr->head[0].iov_len;
  165. if (base < len) {
  166. len -= base;
  167. ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
  168. copied += ret;
  169. if (ret != len || !desc->count)
  170. goto out;
  171. base = 0;
  172. } else
  173. base -= len;
  174. if (pglen == 0)
  175. goto copy_tail;
  176. if (base >= pglen) {
  177. base -= pglen;
  178. goto copy_tail;
  179. }
  180. if (base || xdr->page_base) {
  181. pglen -= base;
  182. base += xdr->page_base;
  183. ppage += base >> PAGE_CACHE_SHIFT;
  184. base &= ~PAGE_CACHE_MASK;
  185. }
  186. do {
  187. char *kaddr;
  188. /* ACL likes to be lazy in allocating pages - ACLs
  189. * are small by default but can get huge. */
  190. if (unlikely(*ppage == NULL)) {
  191. *ppage = alloc_page(GFP_ATOMIC);
  192. if (unlikely(*ppage == NULL)) {
  193. if (copied == 0)
  194. copied = -ENOMEM;
  195. goto out;
  196. }
  197. }
  198. len = PAGE_CACHE_SIZE;
  199. kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
  200. if (base) {
  201. len -= base;
  202. if (pglen < len)
  203. len = pglen;
  204. ret = copy_actor(desc, kaddr + base, len);
  205. base = 0;
  206. } else {
  207. if (pglen < len)
  208. len = pglen;
  209. ret = copy_actor(desc, kaddr, len);
  210. }
  211. flush_dcache_page(*ppage);
  212. kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
  213. copied += ret;
  214. if (ret != len || !desc->count)
  215. goto out;
  216. ppage++;
  217. } while ((pglen -= len) != 0);
  218. copy_tail:
  219. len = xdr->tail[0].iov_len;
  220. if (base < len)
  221. copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
  222. out:
  223. return copied;
  224. }
  225. int
  226. xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
  227. struct xdr_buf *xdr, unsigned int base, int msgflags)
  228. {
  229. struct page **ppage = xdr->pages;
  230. unsigned int len, pglen = xdr->page_len;
  231. int err, ret = 0;
  232. ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
  233. len = xdr->head[0].iov_len;
  234. if (base < len || (addr != NULL && base == 0)) {
  235. struct kvec iov = {
  236. .iov_base = xdr->head[0].iov_base + base,
  237. .iov_len = len - base,
  238. };
  239. struct msghdr msg = {
  240. .msg_name = addr,
  241. .msg_namelen = addrlen,
  242. .msg_flags = msgflags,
  243. };
  244. if (xdr->len > len)
  245. msg.msg_flags |= MSG_MORE;
  246. if (iov.iov_len != 0)
  247. err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
  248. else
  249. err = kernel_sendmsg(sock, &msg, NULL, 0, 0);
  250. if (ret == 0)
  251. ret = err;
  252. else if (err > 0)
  253. ret += err;
  254. if (err != iov.iov_len)
  255. goto out;
  256. base = 0;
  257. } else
  258. base -= len;
  259. if (pglen == 0)
  260. goto copy_tail;
  261. if (base >= pglen) {
  262. base -= pglen;
  263. goto copy_tail;
  264. }
  265. if (base || xdr->page_base) {
  266. pglen -= base;
  267. base += xdr->page_base;
  268. ppage += base >> PAGE_CACHE_SHIFT;
  269. base &= ~PAGE_CACHE_MASK;
  270. }
  271. sendpage = sock->ops->sendpage ? : sock_no_sendpage;
  272. do {
  273. int flags = msgflags;
  274. len = PAGE_CACHE_SIZE;
  275. if (base)
  276. len -= base;
  277. if (pglen < len)
  278. len = pglen;
  279. if (pglen != len || xdr->tail[0].iov_len != 0)
  280. flags |= MSG_MORE;
  281. /* Hmm... We might be dealing with highmem pages */
  282. if (PageHighMem(*ppage))
  283. sendpage = sock_no_sendpage;
  284. err = sendpage(sock, *ppage, base, len, flags);
  285. if (ret == 0)
  286. ret = err;
  287. else if (err > 0)
  288. ret += err;
  289. if (err != len)
  290. goto out;
  291. base = 0;
  292. ppage++;
  293. } while ((pglen -= len) != 0);
  294. copy_tail:
  295. len = xdr->tail[0].iov_len;
  296. if (base < len) {
  297. struct kvec iov = {
  298. .iov_base = xdr->tail[0].iov_base + base,
  299. .iov_len = len - base,
  300. };
  301. struct msghdr msg = {
  302. .msg_flags = msgflags,
  303. };
  304. err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
  305. if (ret == 0)
  306. ret = err;
  307. else if (err > 0)
  308. ret += err;
  309. }
  310. out:
  311. return ret;
  312. }
  313. /*
  314. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  315. *
  316. * _shift_data_right_pages
  317. * @pages: vector of pages containing both the source and dest memory area.
  318. * @pgto_base: page vector address of destination
  319. * @pgfrom_base: page vector address of source
  320. * @len: number of bytes to copy
  321. *
  322. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  323. * the same way:
  324. * if a memory area starts at byte 'base' in page 'pages[i]',
  325. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  326. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  327. * they point to may overlap.
  328. */
  329. static void
  330. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  331. size_t pgfrom_base, size_t len)
  332. {
  333. struct page **pgfrom, **pgto;
  334. char *vfrom, *vto;
  335. size_t copy;
  336. BUG_ON(pgto_base <= pgfrom_base);
  337. pgto_base += len;
  338. pgfrom_base += len;
  339. pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
  340. pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
  341. pgto_base &= ~PAGE_CACHE_MASK;
  342. pgfrom_base &= ~PAGE_CACHE_MASK;
  343. do {
  344. /* Are any pointers crossing a page boundary? */
  345. if (pgto_base == 0) {
  346. flush_dcache_page(*pgto);
  347. pgto_base = PAGE_CACHE_SIZE;
  348. pgto--;
  349. }
  350. if (pgfrom_base == 0) {
  351. pgfrom_base = PAGE_CACHE_SIZE;
  352. pgfrom--;
  353. }
  354. copy = len;
  355. if (copy > pgto_base)
  356. copy = pgto_base;
  357. if (copy > pgfrom_base)
  358. copy = pgfrom_base;
  359. pgto_base -= copy;
  360. pgfrom_base -= copy;
  361. vto = kmap_atomic(*pgto, KM_USER0);
  362. vfrom = kmap_atomic(*pgfrom, KM_USER1);
  363. memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
  364. kunmap_atomic(vfrom, KM_USER1);
  365. kunmap_atomic(vto, KM_USER0);
  366. } while ((len -= copy) != 0);
  367. flush_dcache_page(*pgto);
  368. }
  369. /*
  370. * _copy_to_pages
  371. * @pages: array of pages
  372. * @pgbase: page vector address of destination
  373. * @p: pointer to source data
  374. * @len: length
  375. *
  376. * Copies data from an arbitrary memory location into an array of pages
  377. * The copy is assumed to be non-overlapping.
  378. */
  379. static void
  380. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  381. {
  382. struct page **pgto;
  383. char *vto;
  384. size_t copy;
  385. pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
  386. pgbase &= ~PAGE_CACHE_MASK;
  387. do {
  388. copy = PAGE_CACHE_SIZE - pgbase;
  389. if (copy > len)
  390. copy = len;
  391. vto = kmap_atomic(*pgto, KM_USER0);
  392. memcpy(vto + pgbase, p, copy);
  393. kunmap_atomic(vto, KM_USER0);
  394. pgbase += copy;
  395. if (pgbase == PAGE_CACHE_SIZE) {
  396. flush_dcache_page(*pgto);
  397. pgbase = 0;
  398. pgto++;
  399. }
  400. p += copy;
  401. } while ((len -= copy) != 0);
  402. flush_dcache_page(*pgto);
  403. }
  404. /*
  405. * _copy_from_pages
  406. * @p: pointer to destination
  407. * @pages: array of pages
  408. * @pgbase: offset of source data
  409. * @len: length
  410. *
  411. * Copies data into an arbitrary memory location from an array of pages
  412. * The copy is assumed to be non-overlapping.
  413. */
  414. static void
  415. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  416. {
  417. struct page **pgfrom;
  418. char *vfrom;
  419. size_t copy;
  420. pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
  421. pgbase &= ~PAGE_CACHE_MASK;
  422. do {
  423. copy = PAGE_CACHE_SIZE - pgbase;
  424. if (copy > len)
  425. copy = len;
  426. vfrom = kmap_atomic(*pgfrom, KM_USER0);
  427. memcpy(p, vfrom + pgbase, copy);
  428. kunmap_atomic(vfrom, KM_USER0);
  429. pgbase += copy;
  430. if (pgbase == PAGE_CACHE_SIZE) {
  431. pgbase = 0;
  432. pgfrom++;
  433. }
  434. p += copy;
  435. } while ((len -= copy) != 0);
  436. }
  437. /*
  438. * xdr_shrink_bufhead
  439. * @buf: xdr_buf
  440. * @len: bytes to remove from buf->head[0]
  441. *
  442. * Shrinks XDR buffer's header kvec buf->head[0] by
  443. * 'len' bytes. The extra data is not lost, but is instead
  444. * moved into the inlined pages and/or the tail.
  445. */
  446. static void
  447. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  448. {
  449. struct kvec *head, *tail;
  450. size_t copy, offs;
  451. unsigned int pglen = buf->page_len;
  452. tail = buf->tail;
  453. head = buf->head;
  454. BUG_ON (len > head->iov_len);
  455. /* Shift the tail first */
  456. if (tail->iov_len != 0) {
  457. if (tail->iov_len > len) {
  458. copy = tail->iov_len - len;
  459. memmove((char *)tail->iov_base + len,
  460. tail->iov_base, copy);
  461. }
  462. /* Copy from the inlined pages into the tail */
  463. copy = len;
  464. if (copy > pglen)
  465. copy = pglen;
  466. offs = len - copy;
  467. if (offs >= tail->iov_len)
  468. copy = 0;
  469. else if (copy > tail->iov_len - offs)
  470. copy = tail->iov_len - offs;
  471. if (copy != 0)
  472. _copy_from_pages((char *)tail->iov_base + offs,
  473. buf->pages,
  474. buf->page_base + pglen + offs - len,
  475. copy);
  476. /* Do we also need to copy data from the head into the tail ? */
  477. if (len > pglen) {
  478. offs = copy = len - pglen;
  479. if (copy > tail->iov_len)
  480. copy = tail->iov_len;
  481. memcpy(tail->iov_base,
  482. (char *)head->iov_base +
  483. head->iov_len - offs,
  484. copy);
  485. }
  486. }
  487. /* Now handle pages */
  488. if (pglen != 0) {
  489. if (pglen > len)
  490. _shift_data_right_pages(buf->pages,
  491. buf->page_base + len,
  492. buf->page_base,
  493. pglen - len);
  494. copy = len;
  495. if (len > pglen)
  496. copy = pglen;
  497. _copy_to_pages(buf->pages, buf->page_base,
  498. (char *)head->iov_base + head->iov_len - len,
  499. copy);
  500. }
  501. head->iov_len -= len;
  502. buf->buflen -= len;
  503. /* Have we truncated the message? */
  504. if (buf->len > buf->buflen)
  505. buf->len = buf->buflen;
  506. }
  507. /*
  508. * xdr_shrink_pagelen
  509. * @buf: xdr_buf
  510. * @len: bytes to remove from buf->pages
  511. *
  512. * Shrinks XDR buffer's page array buf->pages by
  513. * 'len' bytes. The extra data is not lost, but is instead
  514. * moved into the tail.
  515. */
  516. static void
  517. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  518. {
  519. struct kvec *tail;
  520. size_t copy;
  521. char *p;
  522. unsigned int pglen = buf->page_len;
  523. tail = buf->tail;
  524. BUG_ON (len > pglen);
  525. /* Shift the tail first */
  526. if (tail->iov_len != 0) {
  527. p = (char *)tail->iov_base + len;
  528. if (tail->iov_len > len) {
  529. copy = tail->iov_len - len;
  530. memmove(p, tail->iov_base, copy);
  531. } else
  532. buf->buflen -= len;
  533. /* Copy from the inlined pages into the tail */
  534. copy = len;
  535. if (copy > tail->iov_len)
  536. copy = tail->iov_len;
  537. _copy_from_pages((char *)tail->iov_base,
  538. buf->pages, buf->page_base + pglen - len,
  539. copy);
  540. }
  541. buf->page_len -= len;
  542. buf->buflen -= len;
  543. /* Have we truncated the message? */
  544. if (buf->len > buf->buflen)
  545. buf->len = buf->buflen;
  546. }
  547. void
  548. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  549. {
  550. xdr_shrink_bufhead(buf, len);
  551. }
  552. /**
  553. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  554. * @xdr: pointer to xdr_stream struct
  555. * @buf: pointer to XDR buffer in which to encode data
  556. * @p: current pointer inside XDR buffer
  557. *
  558. * Note: at the moment the RPC client only passes the length of our
  559. * scratch buffer in the xdr_buf's header kvec. Previously this
  560. * meant we needed to call xdr_adjust_iovec() after encoding the
  561. * data. With the new scheme, the xdr_stream manages the details
  562. * of the buffer length, and takes care of adjusting the kvec
  563. * length for us.
  564. */
  565. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
  566. {
  567. struct kvec *iov = buf->head;
  568. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  569. BUG_ON(scratch_len < 0);
  570. xdr->buf = buf;
  571. xdr->iov = iov;
  572. xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len);
  573. xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len);
  574. BUG_ON(iov->iov_len > scratch_len);
  575. if (p != xdr->p && p != NULL) {
  576. size_t len;
  577. BUG_ON(p < xdr->p || p > xdr->end);
  578. len = (char *)p - (char *)xdr->p;
  579. xdr->p = p;
  580. buf->len += len;
  581. iov->iov_len += len;
  582. }
  583. }
  584. EXPORT_SYMBOL(xdr_init_encode);
  585. /**
  586. * xdr_reserve_space - Reserve buffer space for sending
  587. * @xdr: pointer to xdr_stream
  588. * @nbytes: number of bytes to reserve
  589. *
  590. * Checks that we have enough buffer space to encode 'nbytes' more
  591. * bytes of data. If so, update the total xdr_buf length, and
  592. * adjust the length of the current kvec.
  593. */
  594. uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  595. {
  596. uint32_t *p = xdr->p;
  597. uint32_t *q;
  598. /* align nbytes on the next 32-bit boundary */
  599. nbytes += 3;
  600. nbytes &= ~3;
  601. q = p + (nbytes >> 2);
  602. if (unlikely(q > xdr->end || q < p))
  603. return NULL;
  604. xdr->p = q;
  605. xdr->iov->iov_len += nbytes;
  606. xdr->buf->len += nbytes;
  607. return p;
  608. }
  609. EXPORT_SYMBOL(xdr_reserve_space);
  610. /**
  611. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  612. * @xdr: pointer to xdr_stream
  613. * @pages: list of pages
  614. * @base: offset of first byte
  615. * @len: length of data in bytes
  616. *
  617. */
  618. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  619. unsigned int len)
  620. {
  621. struct xdr_buf *buf = xdr->buf;
  622. struct kvec *iov = buf->tail;
  623. buf->pages = pages;
  624. buf->page_base = base;
  625. buf->page_len = len;
  626. iov->iov_base = (char *)xdr->p;
  627. iov->iov_len = 0;
  628. xdr->iov = iov;
  629. if (len & 3) {
  630. unsigned int pad = 4 - (len & 3);
  631. BUG_ON(xdr->p >= xdr->end);
  632. iov->iov_base = (char *)xdr->p + (len & 3);
  633. iov->iov_len += pad;
  634. len += pad;
  635. *xdr->p++ = 0;
  636. }
  637. buf->buflen += len;
  638. buf->len += len;
  639. }
  640. EXPORT_SYMBOL(xdr_write_pages);
  641. /**
  642. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  643. * @xdr: pointer to xdr_stream struct
  644. * @buf: pointer to XDR buffer from which to decode data
  645. * @p: current pointer inside XDR buffer
  646. */
  647. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
  648. {
  649. struct kvec *iov = buf->head;
  650. unsigned int len = iov->iov_len;
  651. if (len > buf->len)
  652. len = buf->len;
  653. xdr->buf = buf;
  654. xdr->iov = iov;
  655. xdr->p = p;
  656. xdr->end = (uint32_t *)((char *)iov->iov_base + len);
  657. }
  658. EXPORT_SYMBOL(xdr_init_decode);
  659. /**
  660. * xdr_inline_decode - Retrieve non-page XDR data to decode
  661. * @xdr: pointer to xdr_stream struct
  662. * @nbytes: number of bytes of data to decode
  663. *
  664. * Check if the input buffer is long enough to enable us to decode
  665. * 'nbytes' more bytes of data starting at the current position.
  666. * If so return the current pointer, then update the current
  667. * pointer position.
  668. */
  669. uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  670. {
  671. uint32_t *p = xdr->p;
  672. uint32_t *q = p + XDR_QUADLEN(nbytes);
  673. if (unlikely(q > xdr->end || q < p))
  674. return NULL;
  675. xdr->p = q;
  676. return p;
  677. }
  678. EXPORT_SYMBOL(xdr_inline_decode);
  679. /**
  680. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  681. * @xdr: pointer to xdr_stream struct
  682. * @len: number of bytes of page data
  683. *
  684. * Moves data beyond the current pointer position from the XDR head[] buffer
  685. * into the page list. Any data that lies beyond current position + "len"
  686. * bytes is moved into the XDR tail[]. The current pointer is then
  687. * repositioned at the beginning of the XDR tail.
  688. */
  689. void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  690. {
  691. struct xdr_buf *buf = xdr->buf;
  692. struct kvec *iov;
  693. ssize_t shift;
  694. unsigned int end;
  695. int padding;
  696. /* Realign pages to current pointer position */
  697. iov = buf->head;
  698. shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
  699. if (shift > 0)
  700. xdr_shrink_bufhead(buf, shift);
  701. /* Truncate page data and move it into the tail */
  702. if (buf->page_len > len)
  703. xdr_shrink_pagelen(buf, buf->page_len - len);
  704. padding = (XDR_QUADLEN(len) << 2) - len;
  705. xdr->iov = iov = buf->tail;
  706. /* Compute remaining message length. */
  707. end = iov->iov_len;
  708. shift = buf->buflen - buf->len;
  709. if (shift < end)
  710. end -= shift;
  711. else if (shift > 0)
  712. end = 0;
  713. /*
  714. * Position current pointer at beginning of tail, and
  715. * set remaining message length.
  716. */
  717. xdr->p = (uint32_t *)((char *)iov->iov_base + padding);
  718. xdr->end = (uint32_t *)((char *)iov->iov_base + end);
  719. }
  720. EXPORT_SYMBOL(xdr_read_pages);
  721. static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  722. void
  723. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  724. {
  725. buf->head[0] = *iov;
  726. buf->tail[0] = empty_iov;
  727. buf->page_len = 0;
  728. buf->buflen = buf->len = iov->iov_len;
  729. }
  730. /* Sets subiov to the intersection of iov with the buffer of length len
  731. * starting base bytes after iov. Indicates empty intersection by setting
  732. * length of subiov to zero. Decrements len by length of subiov, sets base
  733. * to zero (or decrements it by length of iov if subiov is empty). */
  734. static void
  735. iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len)
  736. {
  737. if (*base > iov->iov_len) {
  738. subiov->iov_base = NULL;
  739. subiov->iov_len = 0;
  740. *base -= iov->iov_len;
  741. } else {
  742. subiov->iov_base = iov->iov_base + *base;
  743. subiov->iov_len = min(*len, (int)iov->iov_len - *base);
  744. *base = 0;
  745. }
  746. *len -= subiov->iov_len;
  747. }
  748. /* Sets subbuf to the portion of buf of length len beginning base bytes
  749. * from the start of buf. Returns -1 if base of length are out of bounds. */
  750. int
  751. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  752. int base, int len)
  753. {
  754. int i;
  755. subbuf->buflen = subbuf->len = len;
  756. iov_subsegment(buf->head, subbuf->head, &base, &len);
  757. if (base < buf->page_len) {
  758. i = (base + buf->page_base) >> PAGE_CACHE_SHIFT;
  759. subbuf->pages = &buf->pages[i];
  760. subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK;
  761. subbuf->page_len = min((int)buf->page_len - base, len);
  762. len -= subbuf->page_len;
  763. base = 0;
  764. } else {
  765. base -= buf->page_len;
  766. subbuf->page_len = 0;
  767. }
  768. iov_subsegment(buf->tail, subbuf->tail, &base, &len);
  769. if (base || len)
  770. return -1;
  771. return 0;
  772. }
  773. /* obj is assumed to point to allocated memory of size at least len: */
  774. int
  775. read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
  776. {
  777. struct xdr_buf subbuf;
  778. int this_len;
  779. int status;
  780. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  781. if (status)
  782. goto out;
  783. this_len = min(len, (int)subbuf.head[0].iov_len);
  784. memcpy(obj, subbuf.head[0].iov_base, this_len);
  785. len -= this_len;
  786. obj += this_len;
  787. this_len = min(len, (int)subbuf.page_len);
  788. if (this_len)
  789. _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len);
  790. len -= this_len;
  791. obj += this_len;
  792. this_len = min(len, (int)subbuf.tail[0].iov_len);
  793. memcpy(obj, subbuf.tail[0].iov_base, this_len);
  794. out:
  795. return status;
  796. }
  797. /* obj is assumed to point to allocated memory of size at least len: */
  798. int
  799. write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
  800. {
  801. struct xdr_buf subbuf;
  802. int this_len;
  803. int status;
  804. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  805. if (status)
  806. goto out;
  807. this_len = min(len, (int)subbuf.head[0].iov_len);
  808. memcpy(subbuf.head[0].iov_base, obj, this_len);
  809. len -= this_len;
  810. obj += this_len;
  811. this_len = min(len, (int)subbuf.page_len);
  812. if (this_len)
  813. _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len);
  814. len -= this_len;
  815. obj += this_len;
  816. this_len = min(len, (int)subbuf.tail[0].iov_len);
  817. memcpy(subbuf.tail[0].iov_base, obj, this_len);
  818. out:
  819. return status;
  820. }
  821. int
  822. xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
  823. {
  824. u32 raw;
  825. int status;
  826. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  827. if (status)
  828. return status;
  829. *obj = ntohl(raw);
  830. return 0;
  831. }
  832. int
  833. xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
  834. {
  835. u32 raw = htonl(obj);
  836. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  837. }
  838. /* If the netobj starting offset bytes from the start of xdr_buf is contained
  839. * entirely in the head or the tail, set object to point to it; otherwise
  840. * try to find space for it at the end of the tail, copy it there, and
  841. * set obj to point to it. */
  842. int
  843. xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset)
  844. {
  845. u32 tail_offset = buf->head[0].iov_len + buf->page_len;
  846. u32 obj_end_offset;
  847. if (xdr_decode_word(buf, offset, &obj->len))
  848. goto out;
  849. obj_end_offset = offset + 4 + obj->len;
  850. if (obj_end_offset <= buf->head[0].iov_len) {
  851. /* The obj is contained entirely in the head: */
  852. obj->data = buf->head[0].iov_base + offset + 4;
  853. } else if (offset + 4 >= tail_offset) {
  854. if (obj_end_offset - tail_offset
  855. > buf->tail[0].iov_len)
  856. goto out;
  857. /* The obj is contained entirely in the tail: */
  858. obj->data = buf->tail[0].iov_base
  859. + offset - tail_offset + 4;
  860. } else {
  861. /* use end of tail as storage for obj:
  862. * (We don't copy to the beginning because then we'd have
  863. * to worry about doing a potentially overlapping copy.
  864. * This assumes the object is at most half the length of the
  865. * tail.) */
  866. if (obj->len > buf->tail[0].iov_len)
  867. goto out;
  868. obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len -
  869. obj->len;
  870. if (read_bytes_from_xdr_buf(buf, offset + 4,
  871. obj->data, obj->len))
  872. goto out;
  873. }
  874. return 0;
  875. out:
  876. return -1;
  877. }
  878. /* Returns 0 on success, or else a negative error code. */
  879. static int
  880. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  881. struct xdr_array2_desc *desc, int encode)
  882. {
  883. char *elem = NULL, *c;
  884. unsigned int copied = 0, todo, avail_here;
  885. struct page **ppages = NULL;
  886. int err;
  887. if (encode) {
  888. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  889. return -EINVAL;
  890. } else {
  891. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  892. desc->array_len > desc->array_maxlen ||
  893. (unsigned long) base + 4 + desc->array_len *
  894. desc->elem_size > buf->len)
  895. return -EINVAL;
  896. }
  897. base += 4;
  898. if (!desc->xcode)
  899. return 0;
  900. todo = desc->array_len * desc->elem_size;
  901. /* process head */
  902. if (todo && base < buf->head->iov_len) {
  903. c = buf->head->iov_base + base;
  904. avail_here = min_t(unsigned int, todo,
  905. buf->head->iov_len - base);
  906. todo -= avail_here;
  907. while (avail_here >= desc->elem_size) {
  908. err = desc->xcode(desc, c);
  909. if (err)
  910. goto out;
  911. c += desc->elem_size;
  912. avail_here -= desc->elem_size;
  913. }
  914. if (avail_here) {
  915. if (!elem) {
  916. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  917. err = -ENOMEM;
  918. if (!elem)
  919. goto out;
  920. }
  921. if (encode) {
  922. err = desc->xcode(desc, elem);
  923. if (err)
  924. goto out;
  925. memcpy(c, elem, avail_here);
  926. } else
  927. memcpy(elem, c, avail_here);
  928. copied = avail_here;
  929. }
  930. base = buf->head->iov_len; /* align to start of pages */
  931. }
  932. /* process pages array */
  933. base -= buf->head->iov_len;
  934. if (todo && base < buf->page_len) {
  935. unsigned int avail_page;
  936. avail_here = min(todo, buf->page_len - base);
  937. todo -= avail_here;
  938. base += buf->page_base;
  939. ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
  940. base &= ~PAGE_CACHE_MASK;
  941. avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
  942. avail_here);
  943. c = kmap(*ppages) + base;
  944. while (avail_here) {
  945. avail_here -= avail_page;
  946. if (copied || avail_page < desc->elem_size) {
  947. unsigned int l = min(avail_page,
  948. desc->elem_size - copied);
  949. if (!elem) {
  950. elem = kmalloc(desc->elem_size,
  951. GFP_KERNEL);
  952. err = -ENOMEM;
  953. if (!elem)
  954. goto out;
  955. }
  956. if (encode) {
  957. if (!copied) {
  958. err = desc->xcode(desc, elem);
  959. if (err)
  960. goto out;
  961. }
  962. memcpy(c, elem + copied, l);
  963. copied += l;
  964. if (copied == desc->elem_size)
  965. copied = 0;
  966. } else {
  967. memcpy(elem + copied, c, l);
  968. copied += l;
  969. if (copied == desc->elem_size) {
  970. err = desc->xcode(desc, elem);
  971. if (err)
  972. goto out;
  973. copied = 0;
  974. }
  975. }
  976. avail_page -= l;
  977. c += l;
  978. }
  979. while (avail_page >= desc->elem_size) {
  980. err = desc->xcode(desc, c);
  981. if (err)
  982. goto out;
  983. c += desc->elem_size;
  984. avail_page -= desc->elem_size;
  985. }
  986. if (avail_page) {
  987. unsigned int l = min(avail_page,
  988. desc->elem_size - copied);
  989. if (!elem) {
  990. elem = kmalloc(desc->elem_size,
  991. GFP_KERNEL);
  992. err = -ENOMEM;
  993. if (!elem)
  994. goto out;
  995. }
  996. if (encode) {
  997. if (!copied) {
  998. err = desc->xcode(desc, elem);
  999. if (err)
  1000. goto out;
  1001. }
  1002. memcpy(c, elem + copied, l);
  1003. copied += l;
  1004. if (copied == desc->elem_size)
  1005. copied = 0;
  1006. } else {
  1007. memcpy(elem + copied, c, l);
  1008. copied += l;
  1009. if (copied == desc->elem_size) {
  1010. err = desc->xcode(desc, elem);
  1011. if (err)
  1012. goto out;
  1013. copied = 0;
  1014. }
  1015. }
  1016. }
  1017. if (avail_here) {
  1018. kunmap(*ppages);
  1019. ppages++;
  1020. c = kmap(*ppages);
  1021. }
  1022. avail_page = min(avail_here,
  1023. (unsigned int) PAGE_CACHE_SIZE);
  1024. }
  1025. base = buf->page_len; /* align to start of tail */
  1026. }
  1027. /* process tail */
  1028. base -= buf->page_len;
  1029. if (todo) {
  1030. c = buf->tail->iov_base + base;
  1031. if (copied) {
  1032. unsigned int l = desc->elem_size - copied;
  1033. if (encode)
  1034. memcpy(c, elem + copied, l);
  1035. else {
  1036. memcpy(elem + copied, c, l);
  1037. err = desc->xcode(desc, elem);
  1038. if (err)
  1039. goto out;
  1040. }
  1041. todo -= l;
  1042. c += l;
  1043. }
  1044. while (todo) {
  1045. err = desc->xcode(desc, c);
  1046. if (err)
  1047. goto out;
  1048. c += desc->elem_size;
  1049. todo -= desc->elem_size;
  1050. }
  1051. }
  1052. err = 0;
  1053. out:
  1054. if (elem)
  1055. kfree(elem);
  1056. if (ppages)
  1057. kunmap(*ppages);
  1058. return err;
  1059. }
  1060. int
  1061. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  1062. struct xdr_array2_desc *desc)
  1063. {
  1064. if (base >= buf->len)
  1065. return -EINVAL;
  1066. return xdr_xcode_array2(buf, base, desc, 0);
  1067. }
  1068. int
  1069. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  1070. struct xdr_array2_desc *desc)
  1071. {
  1072. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  1073. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  1074. return -EINVAL;
  1075. return xdr_xcode_array2(buf, base, desc, 1);
  1076. }