xdr.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. /*
  2. * linux/net/sunrpc/xdr.c
  3. *
  4. * Generic XDR support.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/types.h>
  11. #include <linux/string.h>
  12. #include <linux/kernel.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/errno.h>
  15. #include <linux/sunrpc/xdr.h>
  16. #include <linux/sunrpc/msg_prot.h>
  17. /*
  18. * XDR functions for basic NFS types
  19. */
  20. __be32 *
  21. xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  22. {
  23. unsigned int quadlen = XDR_QUADLEN(obj->len);
  24. p[quadlen] = 0; /* zero trailing bytes */
  25. *p++ = cpu_to_be32(obj->len);
  26. memcpy(p, obj->data, obj->len);
  27. return p + XDR_QUADLEN(obj->len);
  28. }
  29. EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  30. __be32 *
  31. xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  32. {
  33. unsigned int len;
  34. if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  35. return NULL;
  36. obj->len = len;
  37. obj->data = (u8 *) p;
  38. return p + XDR_QUADLEN(len);
  39. }
  40. EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  41. /**
  42. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  43. * @p: pointer to current position in XDR buffer.
  44. * @ptr: pointer to data to encode (or NULL)
  45. * @nbytes: size of data.
  46. *
  47. * Copy the array of data of length nbytes at ptr to the XDR buffer
  48. * at position p, then align to the next 32-bit boundary by padding
  49. * with zero bytes (see RFC1832).
  50. * Note: if ptr is NULL, only the padding is performed.
  51. *
  52. * Returns the updated current XDR buffer position
  53. *
  54. */
  55. __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  56. {
  57. if (likely(nbytes != 0)) {
  58. unsigned int quadlen = XDR_QUADLEN(nbytes);
  59. unsigned int padding = (quadlen << 2) - nbytes;
  60. if (ptr != NULL)
  61. memcpy(p, ptr, nbytes);
  62. if (padding != 0)
  63. memset((char *)p + nbytes, 0, padding);
  64. p += quadlen;
  65. }
  66. return p;
  67. }
  68. EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  69. /**
  70. * xdr_encode_opaque - Encode variable length opaque data
  71. * @p: pointer to current position in XDR buffer.
  72. * @ptr: pointer to data to encode (or NULL)
  73. * @nbytes: size of data.
  74. *
  75. * Returns the updated current XDR buffer position
  76. */
  77. __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  78. {
  79. *p++ = cpu_to_be32(nbytes);
  80. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  81. }
  82. EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  83. __be32 *
  84. xdr_encode_string(__be32 *p, const char *string)
  85. {
  86. return xdr_encode_array(p, string, strlen(string));
  87. }
  88. EXPORT_SYMBOL_GPL(xdr_encode_string);
  89. __be32 *
  90. xdr_decode_string_inplace(__be32 *p, char **sp,
  91. unsigned int *lenp, unsigned int maxlen)
  92. {
  93. u32 len;
  94. len = be32_to_cpu(*p++);
  95. if (len > maxlen)
  96. return NULL;
  97. *lenp = len;
  98. *sp = (char *) p;
  99. return p + XDR_QUADLEN(len);
  100. }
  101. EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
  102. /**
  103. * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
  104. * @buf: XDR buffer where string resides
  105. * @len: length of string, in bytes
  106. *
  107. */
  108. void
  109. xdr_terminate_string(struct xdr_buf *buf, const u32 len)
  110. {
  111. char *kaddr;
  112. kaddr = kmap_atomic(buf->pages[0]);
  113. kaddr[buf->page_base + len] = '\0';
  114. kunmap_atomic(kaddr);
  115. }
  116. EXPORT_SYMBOL_GPL(xdr_terminate_string);
  117. void
  118. xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
  119. unsigned int len)
  120. {
  121. struct kvec *tail = xdr->tail;
  122. u32 *p;
  123. xdr->pages = pages;
  124. xdr->page_base = base;
  125. xdr->page_len = len;
  126. p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
  127. tail->iov_base = p;
  128. tail->iov_len = 0;
  129. if (len & 3) {
  130. unsigned int pad = 4 - (len & 3);
  131. *p = 0;
  132. tail->iov_base = (char *)p + (len & 3);
  133. tail->iov_len = pad;
  134. len += pad;
  135. }
  136. xdr->buflen += len;
  137. xdr->len += len;
  138. }
  139. EXPORT_SYMBOL_GPL(xdr_encode_pages);
  140. void
  141. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  142. struct page **pages, unsigned int base, unsigned int len)
  143. {
  144. struct kvec *head = xdr->head;
  145. struct kvec *tail = xdr->tail;
  146. char *buf = (char *)head->iov_base;
  147. unsigned int buflen = head->iov_len;
  148. head->iov_len = offset;
  149. xdr->pages = pages;
  150. xdr->page_base = base;
  151. xdr->page_len = len;
  152. tail->iov_base = buf + offset;
  153. tail->iov_len = buflen - offset;
  154. xdr->buflen += len;
  155. }
  156. EXPORT_SYMBOL_GPL(xdr_inline_pages);
  157. /*
  158. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  159. *
  160. * _shift_data_right_pages
  161. * @pages: vector of pages containing both the source and dest memory area.
  162. * @pgto_base: page vector address of destination
  163. * @pgfrom_base: page vector address of source
  164. * @len: number of bytes to copy
  165. *
  166. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  167. * the same way:
  168. * if a memory area starts at byte 'base' in page 'pages[i]',
  169. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  170. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  171. * they point to may overlap.
  172. */
  173. static void
  174. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  175. size_t pgfrom_base, size_t len)
  176. {
  177. struct page **pgfrom, **pgto;
  178. char *vfrom, *vto;
  179. size_t copy;
  180. BUG_ON(pgto_base <= pgfrom_base);
  181. pgto_base += len;
  182. pgfrom_base += len;
  183. pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
  184. pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
  185. pgto_base &= ~PAGE_CACHE_MASK;
  186. pgfrom_base &= ~PAGE_CACHE_MASK;
  187. do {
  188. /* Are any pointers crossing a page boundary? */
  189. if (pgto_base == 0) {
  190. pgto_base = PAGE_CACHE_SIZE;
  191. pgto--;
  192. }
  193. if (pgfrom_base == 0) {
  194. pgfrom_base = PAGE_CACHE_SIZE;
  195. pgfrom--;
  196. }
  197. copy = len;
  198. if (copy > pgto_base)
  199. copy = pgto_base;
  200. if (copy > pgfrom_base)
  201. copy = pgfrom_base;
  202. pgto_base -= copy;
  203. pgfrom_base -= copy;
  204. vto = kmap_atomic(*pgto);
  205. if (*pgto != *pgfrom) {
  206. vfrom = kmap_atomic(*pgfrom);
  207. memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
  208. kunmap_atomic(vfrom);
  209. } else
  210. memmove(vto + pgto_base, vto + pgfrom_base, copy);
  211. flush_dcache_page(*pgto);
  212. kunmap_atomic(vto);
  213. } while ((len -= copy) != 0);
  214. }
  215. /*
  216. * _copy_to_pages
  217. * @pages: array of pages
  218. * @pgbase: page vector address of destination
  219. * @p: pointer to source data
  220. * @len: length
  221. *
  222. * Copies data from an arbitrary memory location into an array of pages
  223. * The copy is assumed to be non-overlapping.
  224. */
  225. static void
  226. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  227. {
  228. struct page **pgto;
  229. char *vto;
  230. size_t copy;
  231. pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
  232. pgbase &= ~PAGE_CACHE_MASK;
  233. for (;;) {
  234. copy = PAGE_CACHE_SIZE - pgbase;
  235. if (copy > len)
  236. copy = len;
  237. vto = kmap_atomic(*pgto);
  238. memcpy(vto + pgbase, p, copy);
  239. kunmap_atomic(vto);
  240. len -= copy;
  241. if (len == 0)
  242. break;
  243. pgbase += copy;
  244. if (pgbase == PAGE_CACHE_SIZE) {
  245. flush_dcache_page(*pgto);
  246. pgbase = 0;
  247. pgto++;
  248. }
  249. p += copy;
  250. }
  251. flush_dcache_page(*pgto);
  252. }
  253. /*
  254. * _copy_from_pages
  255. * @p: pointer to destination
  256. * @pages: array of pages
  257. * @pgbase: offset of source data
  258. * @len: length
  259. *
  260. * Copies data into an arbitrary memory location from an array of pages
  261. * The copy is assumed to be non-overlapping.
  262. */
  263. void
  264. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  265. {
  266. struct page **pgfrom;
  267. char *vfrom;
  268. size_t copy;
  269. pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
  270. pgbase &= ~PAGE_CACHE_MASK;
  271. do {
  272. copy = PAGE_CACHE_SIZE - pgbase;
  273. if (copy > len)
  274. copy = len;
  275. vfrom = kmap_atomic(*pgfrom);
  276. memcpy(p, vfrom + pgbase, copy);
  277. kunmap_atomic(vfrom);
  278. pgbase += copy;
  279. if (pgbase == PAGE_CACHE_SIZE) {
  280. pgbase = 0;
  281. pgfrom++;
  282. }
  283. p += copy;
  284. } while ((len -= copy) != 0);
  285. }
  286. EXPORT_SYMBOL_GPL(_copy_from_pages);
  287. /*
  288. * xdr_shrink_bufhead
  289. * @buf: xdr_buf
  290. * @len: bytes to remove from buf->head[0]
  291. *
  292. * Shrinks XDR buffer's header kvec buf->head[0] by
  293. * 'len' bytes. The extra data is not lost, but is instead
  294. * moved into the inlined pages and/or the tail.
  295. */
  296. static void
  297. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  298. {
  299. struct kvec *head, *tail;
  300. size_t copy, offs;
  301. unsigned int pglen = buf->page_len;
  302. tail = buf->tail;
  303. head = buf->head;
  304. BUG_ON (len > head->iov_len);
  305. /* Shift the tail first */
  306. if (tail->iov_len != 0) {
  307. if (tail->iov_len > len) {
  308. copy = tail->iov_len - len;
  309. memmove((char *)tail->iov_base + len,
  310. tail->iov_base, copy);
  311. }
  312. /* Copy from the inlined pages into the tail */
  313. copy = len;
  314. if (copy > pglen)
  315. copy = pglen;
  316. offs = len - copy;
  317. if (offs >= tail->iov_len)
  318. copy = 0;
  319. else if (copy > tail->iov_len - offs)
  320. copy = tail->iov_len - offs;
  321. if (copy != 0)
  322. _copy_from_pages((char *)tail->iov_base + offs,
  323. buf->pages,
  324. buf->page_base + pglen + offs - len,
  325. copy);
  326. /* Do we also need to copy data from the head into the tail ? */
  327. if (len > pglen) {
  328. offs = copy = len - pglen;
  329. if (copy > tail->iov_len)
  330. copy = tail->iov_len;
  331. memcpy(tail->iov_base,
  332. (char *)head->iov_base +
  333. head->iov_len - offs,
  334. copy);
  335. }
  336. }
  337. /* Now handle pages */
  338. if (pglen != 0) {
  339. if (pglen > len)
  340. _shift_data_right_pages(buf->pages,
  341. buf->page_base + len,
  342. buf->page_base,
  343. pglen - len);
  344. copy = len;
  345. if (len > pglen)
  346. copy = pglen;
  347. _copy_to_pages(buf->pages, buf->page_base,
  348. (char *)head->iov_base + head->iov_len - len,
  349. copy);
  350. }
  351. head->iov_len -= len;
  352. buf->buflen -= len;
  353. /* Have we truncated the message? */
  354. if (buf->len > buf->buflen)
  355. buf->len = buf->buflen;
  356. }
  357. /*
  358. * xdr_shrink_pagelen
  359. * @buf: xdr_buf
  360. * @len: bytes to remove from buf->pages
  361. *
  362. * Shrinks XDR buffer's page array buf->pages by
  363. * 'len' bytes. The extra data is not lost, but is instead
  364. * moved into the tail.
  365. */
  366. static void
  367. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  368. {
  369. struct kvec *tail;
  370. size_t copy;
  371. unsigned int pglen = buf->page_len;
  372. unsigned int tailbuf_len;
  373. tail = buf->tail;
  374. BUG_ON (len > pglen);
  375. tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
  376. /* Shift the tail first */
  377. if (tailbuf_len != 0) {
  378. unsigned int free_space = tailbuf_len - tail->iov_len;
  379. if (len < free_space)
  380. free_space = len;
  381. tail->iov_len += free_space;
  382. copy = len;
  383. if (tail->iov_len > len) {
  384. char *p = (char *)tail->iov_base + len;
  385. memmove(p, tail->iov_base, tail->iov_len - len);
  386. } else
  387. copy = tail->iov_len;
  388. /* Copy from the inlined pages into the tail */
  389. _copy_from_pages((char *)tail->iov_base,
  390. buf->pages, buf->page_base + pglen - len,
  391. copy);
  392. }
  393. buf->page_len -= len;
  394. buf->buflen -= len;
  395. /* Have we truncated the message? */
  396. if (buf->len > buf->buflen)
  397. buf->len = buf->buflen;
  398. }
  399. void
  400. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  401. {
  402. xdr_shrink_bufhead(buf, len);
  403. }
  404. EXPORT_SYMBOL_GPL(xdr_shift_buf);
  405. /**
  406. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  407. * @xdr: pointer to xdr_stream struct
  408. * @buf: pointer to XDR buffer in which to encode data
  409. * @p: current pointer inside XDR buffer
  410. *
  411. * Note: at the moment the RPC client only passes the length of our
  412. * scratch buffer in the xdr_buf's header kvec. Previously this
  413. * meant we needed to call xdr_adjust_iovec() after encoding the
  414. * data. With the new scheme, the xdr_stream manages the details
  415. * of the buffer length, and takes care of adjusting the kvec
  416. * length for us.
  417. */
  418. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  419. {
  420. struct kvec *iov = buf->head;
  421. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  422. BUG_ON(scratch_len < 0);
  423. xdr->buf = buf;
  424. xdr->iov = iov;
  425. xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
  426. xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
  427. BUG_ON(iov->iov_len > scratch_len);
  428. if (p != xdr->p && p != NULL) {
  429. size_t len;
  430. BUG_ON(p < xdr->p || p > xdr->end);
  431. len = (char *)p - (char *)xdr->p;
  432. xdr->p = p;
  433. buf->len += len;
  434. iov->iov_len += len;
  435. }
  436. }
  437. EXPORT_SYMBOL_GPL(xdr_init_encode);
  438. /**
  439. * xdr_reserve_space - Reserve buffer space for sending
  440. * @xdr: pointer to xdr_stream
  441. * @nbytes: number of bytes to reserve
  442. *
  443. * Checks that we have enough buffer space to encode 'nbytes' more
  444. * bytes of data. If so, update the total xdr_buf length, and
  445. * adjust the length of the current kvec.
  446. */
  447. __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  448. {
  449. __be32 *p = xdr->p;
  450. __be32 *q;
  451. /* align nbytes on the next 32-bit boundary */
  452. nbytes += 3;
  453. nbytes &= ~3;
  454. q = p + (nbytes >> 2);
  455. if (unlikely(q > xdr->end || q < p))
  456. return NULL;
  457. xdr->p = q;
  458. xdr->iov->iov_len += nbytes;
  459. xdr->buf->len += nbytes;
  460. return p;
  461. }
  462. EXPORT_SYMBOL_GPL(xdr_reserve_space);
  463. /**
  464. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  465. * @xdr: pointer to xdr_stream
  466. * @pages: list of pages
  467. * @base: offset of first byte
  468. * @len: length of data in bytes
  469. *
  470. */
  471. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  472. unsigned int len)
  473. {
  474. struct xdr_buf *buf = xdr->buf;
  475. struct kvec *iov = buf->tail;
  476. buf->pages = pages;
  477. buf->page_base = base;
  478. buf->page_len = len;
  479. iov->iov_base = (char *)xdr->p;
  480. iov->iov_len = 0;
  481. xdr->iov = iov;
  482. if (len & 3) {
  483. unsigned int pad = 4 - (len & 3);
  484. BUG_ON(xdr->p >= xdr->end);
  485. iov->iov_base = (char *)xdr->p + (len & 3);
  486. iov->iov_len += pad;
  487. len += pad;
  488. *xdr->p++ = 0;
  489. }
  490. buf->buflen += len;
  491. buf->len += len;
  492. }
  493. EXPORT_SYMBOL_GPL(xdr_write_pages);
  494. static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
  495. __be32 *p, unsigned int len)
  496. {
  497. if (len > iov->iov_len)
  498. len = iov->iov_len;
  499. if (p == NULL)
  500. p = (__be32*)iov->iov_base;
  501. xdr->p = p;
  502. xdr->end = (__be32*)(iov->iov_base + len);
  503. xdr->iov = iov;
  504. xdr->page_ptr = NULL;
  505. }
  506. static int xdr_set_page_base(struct xdr_stream *xdr,
  507. unsigned int base, unsigned int len)
  508. {
  509. unsigned int pgnr;
  510. unsigned int maxlen;
  511. unsigned int pgoff;
  512. unsigned int pgend;
  513. void *kaddr;
  514. maxlen = xdr->buf->page_len;
  515. if (base >= maxlen)
  516. return -EINVAL;
  517. maxlen -= base;
  518. if (len > maxlen)
  519. len = maxlen;
  520. base += xdr->buf->page_base;
  521. pgnr = base >> PAGE_SHIFT;
  522. xdr->page_ptr = &xdr->buf->pages[pgnr];
  523. kaddr = page_address(*xdr->page_ptr);
  524. pgoff = base & ~PAGE_MASK;
  525. xdr->p = (__be32*)(kaddr + pgoff);
  526. pgend = pgoff + len;
  527. if (pgend > PAGE_SIZE)
  528. pgend = PAGE_SIZE;
  529. xdr->end = (__be32*)(kaddr + pgend);
  530. xdr->iov = NULL;
  531. return 0;
  532. }
  533. static void xdr_set_next_page(struct xdr_stream *xdr)
  534. {
  535. unsigned int newbase;
  536. newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
  537. newbase -= xdr->buf->page_base;
  538. if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
  539. xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
  540. }
  541. static bool xdr_set_next_buffer(struct xdr_stream *xdr)
  542. {
  543. if (xdr->page_ptr != NULL)
  544. xdr_set_next_page(xdr);
  545. else if (xdr->iov == xdr->buf->head) {
  546. if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
  547. xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
  548. }
  549. return xdr->p != xdr->end;
  550. }
  551. /**
  552. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  553. * @xdr: pointer to xdr_stream struct
  554. * @buf: pointer to XDR buffer from which to decode data
  555. * @p: current pointer inside XDR buffer
  556. */
  557. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  558. {
  559. xdr->buf = buf;
  560. xdr->scratch.iov_base = NULL;
  561. xdr->scratch.iov_len = 0;
  562. if (buf->head[0].iov_len != 0)
  563. xdr_set_iov(xdr, buf->head, p, buf->len);
  564. else if (buf->page_len != 0)
  565. xdr_set_page_base(xdr, 0, buf->len);
  566. }
  567. EXPORT_SYMBOL_GPL(xdr_init_decode);
  568. /**
  569. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  570. * @xdr: pointer to xdr_stream struct
  571. * @buf: pointer to XDR buffer from which to decode data
  572. * @pages: list of pages to decode into
  573. * @len: length in bytes of buffer in pages
  574. */
  575. void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
  576. struct page **pages, unsigned int len)
  577. {
  578. memset(buf, 0, sizeof(*buf));
  579. buf->pages = pages;
  580. buf->page_len = len;
  581. buf->buflen = len;
  582. buf->len = len;
  583. xdr_init_decode(xdr, buf, NULL);
  584. }
  585. EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
  586. static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  587. {
  588. __be32 *p = xdr->p;
  589. __be32 *q = p + XDR_QUADLEN(nbytes);
  590. if (unlikely(q > xdr->end || q < p))
  591. return NULL;
  592. xdr->p = q;
  593. return p;
  594. }
  595. /**
  596. * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
  597. * @xdr: pointer to xdr_stream struct
  598. * @buf: pointer to an empty buffer
  599. * @buflen: size of 'buf'
  600. *
  601. * The scratch buffer is used when decoding from an array of pages.
  602. * If an xdr_inline_decode() call spans across page boundaries, then
  603. * we copy the data into the scratch buffer in order to allow linear
  604. * access.
  605. */
  606. void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
  607. {
  608. xdr->scratch.iov_base = buf;
  609. xdr->scratch.iov_len = buflen;
  610. }
  611. EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
  612. static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
  613. {
  614. __be32 *p;
  615. void *cpdest = xdr->scratch.iov_base;
  616. size_t cplen = (char *)xdr->end - (char *)xdr->p;
  617. if (nbytes > xdr->scratch.iov_len)
  618. return NULL;
  619. memcpy(cpdest, xdr->p, cplen);
  620. cpdest += cplen;
  621. nbytes -= cplen;
  622. if (!xdr_set_next_buffer(xdr))
  623. return NULL;
  624. p = __xdr_inline_decode(xdr, nbytes);
  625. if (p == NULL)
  626. return NULL;
  627. memcpy(cpdest, p, nbytes);
  628. return xdr->scratch.iov_base;
  629. }
  630. /**
  631. * xdr_inline_decode - Retrieve XDR data to decode
  632. * @xdr: pointer to xdr_stream struct
  633. * @nbytes: number of bytes of data to decode
  634. *
  635. * Check if the input buffer is long enough to enable us to decode
  636. * 'nbytes' more bytes of data starting at the current position.
  637. * If so return the current pointer, then update the current
  638. * pointer position.
  639. */
  640. __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  641. {
  642. __be32 *p;
  643. if (nbytes == 0)
  644. return xdr->p;
  645. if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
  646. return NULL;
  647. p = __xdr_inline_decode(xdr, nbytes);
  648. if (p != NULL)
  649. return p;
  650. return xdr_copy_to_scratch(xdr, nbytes);
  651. }
  652. EXPORT_SYMBOL_GPL(xdr_inline_decode);
  653. /**
  654. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  655. * @xdr: pointer to xdr_stream struct
  656. * @len: number of bytes of page data
  657. *
  658. * Moves data beyond the current pointer position from the XDR head[] buffer
  659. * into the page list. Any data that lies beyond current position + "len"
  660. * bytes is moved into the XDR tail[].
  661. */
  662. void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  663. {
  664. struct xdr_buf *buf = xdr->buf;
  665. struct kvec *iov;
  666. ssize_t shift;
  667. unsigned int end;
  668. int padding;
  669. /* Realign pages to current pointer position */
  670. iov = buf->head;
  671. shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
  672. if (shift > 0)
  673. xdr_shrink_bufhead(buf, shift);
  674. /* Truncate page data and move it into the tail */
  675. if (buf->page_len > len)
  676. xdr_shrink_pagelen(buf, buf->page_len - len);
  677. padding = (XDR_QUADLEN(len) << 2) - len;
  678. xdr->iov = iov = buf->tail;
  679. /* Compute remaining message length. */
  680. end = iov->iov_len;
  681. shift = buf->buflen - buf->len;
  682. if (shift < end)
  683. end -= shift;
  684. else if (shift > 0)
  685. end = 0;
  686. /*
  687. * Position current pointer at beginning of tail, and
  688. * set remaining message length.
  689. */
  690. xdr->p = (__be32 *)((char *)iov->iov_base + padding);
  691. xdr->end = (__be32 *)((char *)iov->iov_base + end);
  692. }
  693. EXPORT_SYMBOL_GPL(xdr_read_pages);
  694. /**
  695. * xdr_enter_page - decode data from the XDR page
  696. * @xdr: pointer to xdr_stream struct
  697. * @len: number of bytes of page data
  698. *
  699. * Moves data beyond the current pointer position from the XDR head[] buffer
  700. * into the page list. Any data that lies beyond current position + "len"
  701. * bytes is moved into the XDR tail[]. The current pointer is then
  702. * repositioned at the beginning of the first XDR page.
  703. */
  704. void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
  705. {
  706. xdr_read_pages(xdr, len);
  707. /*
  708. * Position current pointer at beginning of tail, and
  709. * set remaining message length.
  710. */
  711. xdr_set_page_base(xdr, 0, len);
  712. }
  713. EXPORT_SYMBOL_GPL(xdr_enter_page);
  714. static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  715. void
  716. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  717. {
  718. buf->head[0] = *iov;
  719. buf->tail[0] = empty_iov;
  720. buf->page_len = 0;
  721. buf->buflen = buf->len = iov->iov_len;
  722. }
  723. EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
  724. /* Sets subbuf to the portion of buf of length len beginning base bytes
  725. * from the start of buf. Returns -1 if base of length are out of bounds. */
  726. int
  727. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  728. unsigned int base, unsigned int len)
  729. {
  730. subbuf->buflen = subbuf->len = len;
  731. if (base < buf->head[0].iov_len) {
  732. subbuf->head[0].iov_base = buf->head[0].iov_base + base;
  733. subbuf->head[0].iov_len = min_t(unsigned int, len,
  734. buf->head[0].iov_len - base);
  735. len -= subbuf->head[0].iov_len;
  736. base = 0;
  737. } else {
  738. subbuf->head[0].iov_base = NULL;
  739. subbuf->head[0].iov_len = 0;
  740. base -= buf->head[0].iov_len;
  741. }
  742. if (base < buf->page_len) {
  743. subbuf->page_len = min(buf->page_len - base, len);
  744. base += buf->page_base;
  745. subbuf->page_base = base & ~PAGE_CACHE_MASK;
  746. subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
  747. len -= subbuf->page_len;
  748. base = 0;
  749. } else {
  750. base -= buf->page_len;
  751. subbuf->page_len = 0;
  752. }
  753. if (base < buf->tail[0].iov_len) {
  754. subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
  755. subbuf->tail[0].iov_len = min_t(unsigned int, len,
  756. buf->tail[0].iov_len - base);
  757. len -= subbuf->tail[0].iov_len;
  758. base = 0;
  759. } else {
  760. subbuf->tail[0].iov_base = NULL;
  761. subbuf->tail[0].iov_len = 0;
  762. base -= buf->tail[0].iov_len;
  763. }
  764. if (base || len)
  765. return -1;
  766. return 0;
  767. }
  768. EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
  769. static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  770. {
  771. unsigned int this_len;
  772. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  773. memcpy(obj, subbuf->head[0].iov_base, this_len);
  774. len -= this_len;
  775. obj += this_len;
  776. this_len = min_t(unsigned int, len, subbuf->page_len);
  777. if (this_len)
  778. _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
  779. len -= this_len;
  780. obj += this_len;
  781. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  782. memcpy(obj, subbuf->tail[0].iov_base, this_len);
  783. }
  784. /* obj is assumed to point to allocated memory of size at least len: */
  785. int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  786. {
  787. struct xdr_buf subbuf;
  788. int status;
  789. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  790. if (status != 0)
  791. return status;
  792. __read_bytes_from_xdr_buf(&subbuf, obj, len);
  793. return 0;
  794. }
  795. EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
  796. static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  797. {
  798. unsigned int this_len;
  799. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  800. memcpy(subbuf->head[0].iov_base, obj, this_len);
  801. len -= this_len;
  802. obj += this_len;
  803. this_len = min_t(unsigned int, len, subbuf->page_len);
  804. if (this_len)
  805. _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
  806. len -= this_len;
  807. obj += this_len;
  808. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  809. memcpy(subbuf->tail[0].iov_base, obj, this_len);
  810. }
  811. /* obj is assumed to point to allocated memory of size at least len: */
  812. int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  813. {
  814. struct xdr_buf subbuf;
  815. int status;
  816. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  817. if (status != 0)
  818. return status;
  819. __write_bytes_to_xdr_buf(&subbuf, obj, len);
  820. return 0;
  821. }
  822. EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
  823. int
  824. xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
  825. {
  826. __be32 raw;
  827. int status;
  828. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  829. if (status)
  830. return status;
  831. *obj = be32_to_cpu(raw);
  832. return 0;
  833. }
  834. EXPORT_SYMBOL_GPL(xdr_decode_word);
  835. int
  836. xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
  837. {
  838. __be32 raw = cpu_to_be32(obj);
  839. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  840. }
  841. EXPORT_SYMBOL_GPL(xdr_encode_word);
  842. /* If the netobj starting offset bytes from the start of xdr_buf is contained
  843. * entirely in the head or the tail, set object to point to it; otherwise
  844. * try to find space for it at the end of the tail, copy it there, and
  845. * set obj to point to it. */
  846. int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
  847. {
  848. struct xdr_buf subbuf;
  849. if (xdr_decode_word(buf, offset, &obj->len))
  850. return -EFAULT;
  851. if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
  852. return -EFAULT;
  853. /* Is the obj contained entirely in the head? */
  854. obj->data = subbuf.head[0].iov_base;
  855. if (subbuf.head[0].iov_len == obj->len)
  856. return 0;
  857. /* ..or is the obj contained entirely in the tail? */
  858. obj->data = subbuf.tail[0].iov_base;
  859. if (subbuf.tail[0].iov_len == obj->len)
  860. return 0;
  861. /* use end of tail as storage for obj:
  862. * (We don't copy to the beginning because then we'd have
  863. * to worry about doing a potentially overlapping copy.
  864. * This assumes the object is at most half the length of the
  865. * tail.) */
  866. if (obj->len > buf->buflen - buf->len)
  867. return -ENOMEM;
  868. if (buf->tail[0].iov_len != 0)
  869. obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  870. else
  871. obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
  872. __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
  873. return 0;
  874. }
  875. EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
  876. /* Returns 0 on success, or else a negative error code. */
  877. static int
  878. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  879. struct xdr_array2_desc *desc, int encode)
  880. {
  881. char *elem = NULL, *c;
  882. unsigned int copied = 0, todo, avail_here;
  883. struct page **ppages = NULL;
  884. int err;
  885. if (encode) {
  886. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  887. return -EINVAL;
  888. } else {
  889. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  890. desc->array_len > desc->array_maxlen ||
  891. (unsigned long) base + 4 + desc->array_len *
  892. desc->elem_size > buf->len)
  893. return -EINVAL;
  894. }
  895. base += 4;
  896. if (!desc->xcode)
  897. return 0;
  898. todo = desc->array_len * desc->elem_size;
  899. /* process head */
  900. if (todo && base < buf->head->iov_len) {
  901. c = buf->head->iov_base + base;
  902. avail_here = min_t(unsigned int, todo,
  903. buf->head->iov_len - base);
  904. todo -= avail_here;
  905. while (avail_here >= desc->elem_size) {
  906. err = desc->xcode(desc, c);
  907. if (err)
  908. goto out;
  909. c += desc->elem_size;
  910. avail_here -= desc->elem_size;
  911. }
  912. if (avail_here) {
  913. if (!elem) {
  914. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  915. err = -ENOMEM;
  916. if (!elem)
  917. goto out;
  918. }
  919. if (encode) {
  920. err = desc->xcode(desc, elem);
  921. if (err)
  922. goto out;
  923. memcpy(c, elem, avail_here);
  924. } else
  925. memcpy(elem, c, avail_here);
  926. copied = avail_here;
  927. }
  928. base = buf->head->iov_len; /* align to start of pages */
  929. }
  930. /* process pages array */
  931. base -= buf->head->iov_len;
  932. if (todo && base < buf->page_len) {
  933. unsigned int avail_page;
  934. avail_here = min(todo, buf->page_len - base);
  935. todo -= avail_here;
  936. base += buf->page_base;
  937. ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
  938. base &= ~PAGE_CACHE_MASK;
  939. avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
  940. avail_here);
  941. c = kmap(*ppages) + base;
  942. while (avail_here) {
  943. avail_here -= avail_page;
  944. if (copied || avail_page < desc->elem_size) {
  945. unsigned int l = min(avail_page,
  946. desc->elem_size - copied);
  947. if (!elem) {
  948. elem = kmalloc(desc->elem_size,
  949. GFP_KERNEL);
  950. err = -ENOMEM;
  951. if (!elem)
  952. goto out;
  953. }
  954. if (encode) {
  955. if (!copied) {
  956. err = desc->xcode(desc, elem);
  957. if (err)
  958. goto out;
  959. }
  960. memcpy(c, elem + copied, l);
  961. copied += l;
  962. if (copied == desc->elem_size)
  963. copied = 0;
  964. } else {
  965. memcpy(elem + copied, c, l);
  966. copied += l;
  967. if (copied == desc->elem_size) {
  968. err = desc->xcode(desc, elem);
  969. if (err)
  970. goto out;
  971. copied = 0;
  972. }
  973. }
  974. avail_page -= l;
  975. c += l;
  976. }
  977. while (avail_page >= desc->elem_size) {
  978. err = desc->xcode(desc, c);
  979. if (err)
  980. goto out;
  981. c += desc->elem_size;
  982. avail_page -= desc->elem_size;
  983. }
  984. if (avail_page) {
  985. unsigned int l = min(avail_page,
  986. desc->elem_size - copied);
  987. if (!elem) {
  988. elem = kmalloc(desc->elem_size,
  989. GFP_KERNEL);
  990. err = -ENOMEM;
  991. if (!elem)
  992. goto out;
  993. }
  994. if (encode) {
  995. if (!copied) {
  996. err = desc->xcode(desc, elem);
  997. if (err)
  998. goto out;
  999. }
  1000. memcpy(c, elem + copied, l);
  1001. copied += l;
  1002. if (copied == desc->elem_size)
  1003. copied = 0;
  1004. } else {
  1005. memcpy(elem + copied, c, l);
  1006. copied += l;
  1007. if (copied == desc->elem_size) {
  1008. err = desc->xcode(desc, elem);
  1009. if (err)
  1010. goto out;
  1011. copied = 0;
  1012. }
  1013. }
  1014. }
  1015. if (avail_here) {
  1016. kunmap(*ppages);
  1017. ppages++;
  1018. c = kmap(*ppages);
  1019. }
  1020. avail_page = min(avail_here,
  1021. (unsigned int) PAGE_CACHE_SIZE);
  1022. }
  1023. base = buf->page_len; /* align to start of tail */
  1024. }
  1025. /* process tail */
  1026. base -= buf->page_len;
  1027. if (todo) {
  1028. c = buf->tail->iov_base + base;
  1029. if (copied) {
  1030. unsigned int l = desc->elem_size - copied;
  1031. if (encode)
  1032. memcpy(c, elem + copied, l);
  1033. else {
  1034. memcpy(elem + copied, c, l);
  1035. err = desc->xcode(desc, elem);
  1036. if (err)
  1037. goto out;
  1038. }
  1039. todo -= l;
  1040. c += l;
  1041. }
  1042. while (todo) {
  1043. err = desc->xcode(desc, c);
  1044. if (err)
  1045. goto out;
  1046. c += desc->elem_size;
  1047. todo -= desc->elem_size;
  1048. }
  1049. }
  1050. err = 0;
  1051. out:
  1052. kfree(elem);
  1053. if (ppages)
  1054. kunmap(*ppages);
  1055. return err;
  1056. }
  1057. int
  1058. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  1059. struct xdr_array2_desc *desc)
  1060. {
  1061. if (base >= buf->len)
  1062. return -EINVAL;
  1063. return xdr_xcode_array2(buf, base, desc, 0);
  1064. }
  1065. EXPORT_SYMBOL_GPL(xdr_decode_array2);
  1066. int
  1067. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  1068. struct xdr_array2_desc *desc)
  1069. {
  1070. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  1071. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  1072. return -EINVAL;
  1073. return xdr_xcode_array2(buf, base, desc, 1);
  1074. }
  1075. EXPORT_SYMBOL_GPL(xdr_encode_array2);
  1076. int
  1077. xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
  1078. int (*actor)(struct scatterlist *, void *), void *data)
  1079. {
  1080. int i, ret = 0;
  1081. unsigned int page_len, thislen, page_offset;
  1082. struct scatterlist sg[1];
  1083. sg_init_table(sg, 1);
  1084. if (offset >= buf->head[0].iov_len) {
  1085. offset -= buf->head[0].iov_len;
  1086. } else {
  1087. thislen = buf->head[0].iov_len - offset;
  1088. if (thislen > len)
  1089. thislen = len;
  1090. sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
  1091. ret = actor(sg, data);
  1092. if (ret)
  1093. goto out;
  1094. offset = 0;
  1095. len -= thislen;
  1096. }
  1097. if (len == 0)
  1098. goto out;
  1099. if (offset >= buf->page_len) {
  1100. offset -= buf->page_len;
  1101. } else {
  1102. page_len = buf->page_len - offset;
  1103. if (page_len > len)
  1104. page_len = len;
  1105. len -= page_len;
  1106. page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
  1107. i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
  1108. thislen = PAGE_CACHE_SIZE - page_offset;
  1109. do {
  1110. if (thislen > page_len)
  1111. thislen = page_len;
  1112. sg_set_page(sg, buf->pages[i], thislen, page_offset);
  1113. ret = actor(sg, data);
  1114. if (ret)
  1115. goto out;
  1116. page_len -= thislen;
  1117. i++;
  1118. page_offset = 0;
  1119. thislen = PAGE_CACHE_SIZE;
  1120. } while (page_len != 0);
  1121. offset = 0;
  1122. }
  1123. if (len == 0)
  1124. goto out;
  1125. if (offset < buf->tail[0].iov_len) {
  1126. thislen = buf->tail[0].iov_len - offset;
  1127. if (thislen > len)
  1128. thislen = len;
  1129. sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
  1130. ret = actor(sg, data);
  1131. len -= thislen;
  1132. }
  1133. if (len != 0)
  1134. ret = -EINVAL;
  1135. out:
  1136. return ret;
  1137. }
  1138. EXPORT_SYMBOL_GPL(xdr_process_buf);