gss_krb5_wrap.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. #include <linux/types.h>
  2. #include <linux/jiffies.h>
  3. #include <linux/sunrpc/gss_krb5.h>
  4. #include <linux/random.h>
  5. #include <linux/pagemap.h>
  6. #include <linux/crypto.h>
  7. #ifdef RPC_DEBUG
  8. # define RPCDBG_FACILITY RPCDBG_AUTH
  9. #endif
  10. static inline int
  11. gss_krb5_padding(int blocksize, int length)
  12. {
  13. /* Most of the code is block-size independent but currently we
  14. * use only 8: */
  15. BUG_ON(blocksize != 8);
  16. return 8 - (length & 7);
  17. }
  18. static inline void
  19. gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
  20. {
  21. int padding = gss_krb5_padding(blocksize, buf->len - offset);
  22. char *p;
  23. struct kvec *iov;
  24. if (buf->page_len || buf->tail[0].iov_len)
  25. iov = &buf->tail[0];
  26. else
  27. iov = &buf->head[0];
  28. p = iov->iov_base + iov->iov_len;
  29. iov->iov_len += padding;
  30. buf->len += padding;
  31. memset(p, padding, padding);
  32. }
  33. static inline int
  34. gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
  35. {
  36. u8 *ptr;
  37. u8 pad;
  38. size_t len = buf->len;
  39. if (len <= buf->head[0].iov_len) {
  40. pad = *(u8 *)(buf->head[0].iov_base + len - 1);
  41. if (pad > buf->head[0].iov_len)
  42. return -EINVAL;
  43. buf->head[0].iov_len -= pad;
  44. goto out;
  45. } else
  46. len -= buf->head[0].iov_len;
  47. if (len <= buf->page_len) {
  48. unsigned int last = (buf->page_base + len - 1)
  49. >>PAGE_CACHE_SHIFT;
  50. unsigned int offset = (buf->page_base + len - 1)
  51. & (PAGE_CACHE_SIZE - 1);
  52. ptr = kmap_atomic(buf->pages[last], KM_USER0);
  53. pad = *(ptr + offset);
  54. kunmap_atomic(ptr, KM_USER0);
  55. goto out;
  56. } else
  57. len -= buf->page_len;
  58. BUG_ON(len > buf->tail[0].iov_len);
  59. pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
  60. out:
  61. /* XXX: NOTE: we do not adjust the page lengths--they represent
  62. * a range of data in the real filesystem page cache, and we need
  63. * to know that range so the xdr code can properly place read data.
  64. * However adjusting the head length, as we do above, is harmless.
  65. * In the case of a request that fits into a single page, the server
  66. * also uses length and head length together to determine the original
  67. * start of the request to copy the request for deferal; so it's
  68. * easier on the server if we adjust head and tail length in tandem.
  69. * It's not really a problem that we don't fool with the page and
  70. * tail lengths, though--at worst badly formed xdr might lead the
  71. * server to attempt to parse the padding.
  72. * XXX: Document all these weird requirements for gss mechanism
  73. * wrap/unwrap functions. */
  74. if (pad > blocksize)
  75. return -EINVAL;
  76. if (buf->len > pad)
  77. buf->len -= pad;
  78. else
  79. return -EINVAL;
  80. return 0;
  81. }
  82. static void
  83. make_confounder(char *p, u32 conflen)
  84. {
  85. static u64 i = 0;
  86. u64 *q = (u64 *)p;
  87. /* rfc1964 claims this should be "random". But all that's really
  88. * necessary is that it be unique. And not even that is necessary in
  89. * our case since our "gssapi" implementation exists only to support
  90. * rpcsec_gss, so we know that the only buffers we will ever encrypt
  91. * already begin with a unique sequence number. Just to hedge my bets
  92. * I'll make a half-hearted attempt at something unique, but ensuring
  93. * uniqueness would mean worrying about atomicity and rollover, and I
  94. * don't care enough. */
  95. /* initialize to random value */
  96. if (i == 0) {
  97. i = random32();
  98. i = (i << 32) | random32();
  99. }
  100. switch (conflen) {
  101. case 16:
  102. *q++ = i++;
  103. /* fall through */
  104. case 8:
  105. *q++ = i++;
  106. break;
  107. default:
  108. BUG();
  109. }
  110. }
  111. /* Assumptions: the head and tail of inbuf are ours to play with.
  112. * The pages, however, may be real pages in the page cache and we replace
  113. * them with scratch pages from **pages before writing to them. */
  114. /* XXX: obviously the above should be documentation of wrap interface,
  115. * and shouldn't be in this kerberos-specific file. */
  116. /* XXX factor out common code with seal/unseal. */
  117. u32
  118. gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
  119. struct xdr_buf *buf, struct page **pages)
  120. {
  121. struct krb5_ctx *kctx = ctx->internal_ctx_id;
  122. char cksumdata[16];
  123. struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
  124. int blocksize = 0, plainlen;
  125. unsigned char *ptr, *msg_start;
  126. s32 now;
  127. int headlen;
  128. struct page **tmp_pages;
  129. u32 seq_send;
  130. dprintk("RPC: gss_wrap_kerberos\n");
  131. now = get_seconds();
  132. blocksize = crypto_blkcipher_blocksize(kctx->enc);
  133. gss_krb5_add_padding(buf, offset, blocksize);
  134. BUG_ON((buf->len - offset) % blocksize);
  135. plainlen = blocksize + buf->len - offset;
  136. headlen = g_token_size(&kctx->mech_used, 24 + plainlen) -
  137. (buf->len - offset);
  138. ptr = buf->head[0].iov_base + offset;
  139. /* shift data to make room for header. */
  140. /* XXX Would be cleverer to encrypt while copying. */
  141. /* XXX bounds checking, slack, etc. */
  142. memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
  143. buf->head[0].iov_len += headlen;
  144. buf->len += headlen;
  145. BUG_ON((buf->len - offset - headlen) % blocksize);
  146. g_make_token_header(&kctx->mech_used,
  147. GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr);
  148. /* ptr now at header described in rfc 1964, section 1.2.1: */
  149. ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
  150. ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
  151. msg_start = ptr + 24;
  152. *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5);
  153. memset(ptr + 4, 0xff, 4);
  154. *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES);
  155. make_confounder(msg_start, blocksize);
  156. /* XXXJBF: UGH!: */
  157. tmp_pages = buf->pages;
  158. buf->pages = pages;
  159. if (make_checksum("md5", ptr, 8, buf,
  160. offset + headlen - blocksize, &md5cksum))
  161. return GSS_S_FAILURE;
  162. buf->pages = tmp_pages;
  163. if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
  164. md5cksum.data, md5cksum.len))
  165. return GSS_S_FAILURE;
  166. memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
  167. spin_lock(&krb5_seq_lock);
  168. seq_send = kctx->seq_send++;
  169. spin_unlock(&krb5_seq_lock);
  170. /* XXX would probably be more efficient to compute checksum
  171. * and encrypt at the same time: */
  172. if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
  173. seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
  174. return GSS_S_FAILURE;
  175. if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
  176. pages))
  177. return GSS_S_FAILURE;
  178. return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
  179. }
  180. u32
  181. gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
  182. {
  183. struct krb5_ctx *kctx = ctx->internal_ctx_id;
  184. int signalg;
  185. int sealalg;
  186. char cksumdata[16];
  187. struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
  188. s32 now;
  189. int direction;
  190. s32 seqnum;
  191. unsigned char *ptr;
  192. int bodysize;
  193. void *data_start, *orig_start;
  194. int data_len;
  195. int blocksize;
  196. dprintk("RPC: gss_unwrap_kerberos\n");
  197. ptr = (u8 *)buf->head[0].iov_base + offset;
  198. if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
  199. buf->len - offset))
  200. return GSS_S_DEFECTIVE_TOKEN;
  201. if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
  202. (ptr[1] != (KG_TOK_WRAP_MSG & 0xff)))
  203. return GSS_S_DEFECTIVE_TOKEN;
  204. /* XXX sanity-check bodysize?? */
  205. /* get the sign and seal algorithms */
  206. signalg = ptr[2] + (ptr[3] << 8);
  207. if (signalg != SGN_ALG_DES_MAC_MD5)
  208. return GSS_S_DEFECTIVE_TOKEN;
  209. sealalg = ptr[4] + (ptr[5] << 8);
  210. if (sealalg != SEAL_ALG_DES)
  211. return GSS_S_DEFECTIVE_TOKEN;
  212. if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
  213. return GSS_S_DEFECTIVE_TOKEN;
  214. if (gss_decrypt_xdr_buf(kctx->enc, buf,
  215. ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base))
  216. return GSS_S_DEFECTIVE_TOKEN;
  217. if (make_checksum("md5", ptr, 8, buf,
  218. ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
  219. return GSS_S_FAILURE;
  220. if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
  221. md5cksum.data, md5cksum.len))
  222. return GSS_S_FAILURE;
  223. if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8))
  224. return GSS_S_BAD_SIG;
  225. /* it got through unscathed. Make sure the context is unexpired */
  226. now = get_seconds();
  227. if (now > kctx->endtime)
  228. return GSS_S_CONTEXT_EXPIRED;
  229. /* do sequencing checks */
  230. if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
  231. &direction, &seqnum))
  232. return GSS_S_BAD_SIG;
  233. if ((kctx->initiate && direction != 0xff) ||
  234. (!kctx->initiate && direction != 0))
  235. return GSS_S_BAD_SIG;
  236. /* Copy the data back to the right position. XXX: Would probably be
  237. * better to copy and encrypt at the same time. */
  238. blocksize = crypto_blkcipher_blocksize(kctx->enc);
  239. data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize;
  240. orig_start = buf->head[0].iov_base + offset;
  241. data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
  242. memmove(orig_start, data_start, data_len);
  243. buf->head[0].iov_len -= (data_start - orig_start);
  244. buf->len -= (data_start - orig_start);
  245. if (gss_krb5_remove_padding(buf, blocksize))
  246. return GSS_S_DEFECTIVE_TOKEN;
  247. return GSS_S_COMPLETE;
  248. }