svcauth_gss.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161
  1. /*
  2. * Neil Brown <neilb@cse.unsw.edu.au>
  3. * J. Bruce Fields <bfields@umich.edu>
  4. * Andy Adamson <andros@umich.edu>
  5. * Dug Song <dugsong@monkey.org>
  6. *
  7. * RPCSEC_GSS server authentication.
  8. * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
  9. * (gssapi)
  10. *
  11. * The RPCSEC_GSS involves three stages:
  12. * 1/ context creation
  13. * 2/ data exchange
  14. * 3/ context destruction
  15. *
  16. * Context creation is handled largely by upcalls to user-space.
  17. * In particular, GSS_Accept_sec_context is handled by an upcall
  18. * Data exchange is handled entirely within the kernel
  19. * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
  20. * Context destruction is handled in-kernel
  21. * GSS_Delete_sec_context is in-kernel
  22. *
  23. * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
  24. * The context handle and gss_token are used as a key into the rpcsec_init cache.
  25. * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
  26. * being major_status, minor_status, context_handle, reply_token.
  27. * These are sent back to the client.
  28. * Sequence window management is handled by the kernel. The window size if currently
  29. * a compile time constant.
  30. *
  31. * When user-space is happy that a context is established, it places an entry
  32. * in the rpcsec_context cache. The key for this cache is the context_handle.
  33. * The content includes:
  34. * uid/gidlist - for determining access rights
  35. * mechanism type
  36. * mechanism specific information, such as a key
  37. *
  38. */
  39. #include <linux/types.h>
  40. #include <linux/module.h>
  41. #include <linux/pagemap.h>
  42. #include <linux/sunrpc/auth_gss.h>
  43. #include <linux/sunrpc/svcauth.h>
  44. #include <linux/sunrpc/gss_err.h>
  45. #include <linux/sunrpc/svcauth.h>
  46. #include <linux/sunrpc/svcauth_gss.h>
  47. #include <linux/sunrpc/cache.h>
  48. #ifdef RPC_DEBUG
  49. # define RPCDBG_FACILITY RPCDBG_AUTH
  50. #endif
  51. /* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
  52. * into replies.
  53. *
  54. * Key is context handle (\x if empty) and gss_token.
  55. * Content is major_status minor_status (integers) context_handle, reply_token.
  56. *
  57. */
  58. static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b)
  59. {
  60. return a->len == b->len && 0 == memcmp(a->data, b->data, a->len);
  61. }
  62. #define RSI_HASHBITS 6
  63. #define RSI_HASHMAX (1<<RSI_HASHBITS)
  64. #define RSI_HASHMASK (RSI_HASHMAX-1)
  65. struct rsi {
  66. struct cache_head h;
  67. struct xdr_netobj in_handle, in_token;
  68. struct xdr_netobj out_handle, out_token;
  69. int major_status, minor_status;
  70. };
  71. static struct cache_head *rsi_table[RSI_HASHMAX];
  72. static struct cache_detail rsi_cache;
  73. static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
  74. static struct rsi *rsi_lookup(struct rsi *item);
  75. static void rsi_free(struct rsi *rsii)
  76. {
  77. kfree(rsii->in_handle.data);
  78. kfree(rsii->in_token.data);
  79. kfree(rsii->out_handle.data);
  80. kfree(rsii->out_token.data);
  81. }
  82. static void rsi_put(struct cache_head *item, struct cache_detail *cd)
  83. {
  84. struct rsi *rsii = container_of(item, struct rsi, h);
  85. if (cache_put(item, cd)) {
  86. rsi_free(rsii);
  87. kfree(rsii);
  88. }
  89. }
  90. static inline int rsi_hash(struct rsi *item)
  91. {
  92. return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS)
  93. ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS);
  94. }
  95. static int rsi_match(struct cache_head *a, struct cache_head *b)
  96. {
  97. struct rsi *item = container_of(a, struct rsi, h);
  98. struct rsi *tmp = container_of(b, struct rsi, h);
  99. return netobj_equal(&item->in_handle, &tmp->in_handle)
  100. && netobj_equal(&item->in_token, &tmp->in_token);
  101. }
  102. static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len)
  103. {
  104. dst->len = len;
  105. dst->data = (len ? kmalloc(len, GFP_KERNEL) : NULL);
  106. if (dst->data)
  107. memcpy(dst->data, src, len);
  108. if (len && !dst->data)
  109. return -ENOMEM;
  110. return 0;
  111. }
  112. static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src)
  113. {
  114. return dup_to_netobj(dst, src->data, src->len);
  115. }
  116. static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
  117. {
  118. struct rsi *new = container_of(cnew, struct rsi, h);
  119. struct rsi *item = container_of(citem, struct rsi, h);
  120. new->out_handle.data = NULL;
  121. new->out_handle.len = 0;
  122. new->out_token.data = NULL;
  123. new->out_token.len = 0;
  124. new->in_handle.len = item->in_handle.len;
  125. item->in_handle.len = 0;
  126. new->in_token.len = item->in_token.len;
  127. item->in_token.len = 0;
  128. new->in_handle.data = item->in_handle.data;
  129. item->in_handle.data = NULL;
  130. new->in_token.data = item->in_token.data;
  131. item->in_token.data = NULL;
  132. }
  133. static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
  134. {
  135. struct rsi *new = container_of(cnew, struct rsi, h);
  136. struct rsi *item = container_of(citem, struct rsi, h);
  137. BUG_ON(new->out_handle.data || new->out_token.data);
  138. new->out_handle.len = item->out_handle.len;
  139. item->out_handle.len = 0;
  140. new->out_token.len = item->out_token.len;
  141. item->out_token.len = 0;
  142. new->out_handle.data = item->out_handle.data;
  143. item->out_handle.data = NULL;
  144. new->out_token.data = item->out_token.data;
  145. item->out_token.data = NULL;
  146. new->major_status = item->major_status;
  147. new->minor_status = item->minor_status;
  148. }
  149. static struct cache_head *rsi_alloc(void)
  150. {
  151. struct rsi *rsii = kmalloc(sizeof(*rsii), GFP_KERNEL);
  152. if (rsii)
  153. return &rsii->h;
  154. else
  155. return NULL;
  156. }
  157. static void rsi_request(struct cache_detail *cd,
  158. struct cache_head *h,
  159. char **bpp, int *blen)
  160. {
  161. struct rsi *rsii = container_of(h, struct rsi, h);
  162. qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len);
  163. qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len);
  164. (*bpp)[-1] = '\n';
  165. }
  166. static int rsi_parse(struct cache_detail *cd,
  167. char *mesg, int mlen)
  168. {
  169. /* context token expiry major minor context token */
  170. char *buf = mesg;
  171. char *ep;
  172. int len;
  173. struct rsi rsii, *rsip = NULL;
  174. time_t expiry;
  175. int status = -EINVAL;
  176. memset(&rsii, 0, sizeof(rsii));
  177. /* handle */
  178. len = qword_get(&mesg, buf, mlen);
  179. if (len < 0)
  180. goto out;
  181. status = -ENOMEM;
  182. if (dup_to_netobj(&rsii.in_handle, buf, len))
  183. goto out;
  184. /* token */
  185. len = qword_get(&mesg, buf, mlen);
  186. status = -EINVAL;
  187. if (len < 0)
  188. goto out;
  189. status = -ENOMEM;
  190. if (dup_to_netobj(&rsii.in_token, buf, len))
  191. goto out;
  192. rsip = rsi_lookup(&rsii);
  193. if (!rsip)
  194. goto out;
  195. rsii.h.flags = 0;
  196. /* expiry */
  197. expiry = get_expiry(&mesg);
  198. status = -EINVAL;
  199. if (expiry == 0)
  200. goto out;
  201. /* major/minor */
  202. len = qword_get(&mesg, buf, mlen);
  203. if (len < 0)
  204. goto out;
  205. if (len == 0) {
  206. goto out;
  207. } else {
  208. rsii.major_status = simple_strtoul(buf, &ep, 10);
  209. if (*ep)
  210. goto out;
  211. len = qword_get(&mesg, buf, mlen);
  212. if (len <= 0)
  213. goto out;
  214. rsii.minor_status = simple_strtoul(buf, &ep, 10);
  215. if (*ep)
  216. goto out;
  217. /* out_handle */
  218. len = qword_get(&mesg, buf, mlen);
  219. if (len < 0)
  220. goto out;
  221. status = -ENOMEM;
  222. if (dup_to_netobj(&rsii.out_handle, buf, len))
  223. goto out;
  224. /* out_token */
  225. len = qword_get(&mesg, buf, mlen);
  226. status = -EINVAL;
  227. if (len < 0)
  228. goto out;
  229. status = -ENOMEM;
  230. if (dup_to_netobj(&rsii.out_token, buf, len))
  231. goto out;
  232. }
  233. rsii.h.expiry_time = expiry;
  234. rsip = rsi_update(&rsii, rsip);
  235. status = 0;
  236. out:
  237. rsi_free(&rsii);
  238. if (rsip)
  239. rsi_put(&rsip->h, &rsi_cache);
  240. else
  241. status = -ENOMEM;
  242. return status;
  243. }
  244. static struct cache_detail rsi_cache = {
  245. .owner = THIS_MODULE,
  246. .hash_size = RSI_HASHMAX,
  247. .hash_table = rsi_table,
  248. .name = "auth.rpcsec.init",
  249. .cache_put = rsi_put,
  250. .cache_request = rsi_request,
  251. .cache_parse = rsi_parse,
  252. .match = rsi_match,
  253. .init = rsi_init,
  254. .update = update_rsi,
  255. .alloc = rsi_alloc,
  256. };
  257. static struct rsi *rsi_lookup(struct rsi *item)
  258. {
  259. struct cache_head *ch;
  260. int hash = rsi_hash(item);
  261. ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash);
  262. if (ch)
  263. return container_of(ch, struct rsi, h);
  264. else
  265. return NULL;
  266. }
  267. static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
  268. {
  269. struct cache_head *ch;
  270. int hash = rsi_hash(new);
  271. ch = sunrpc_cache_update(&rsi_cache, &new->h,
  272. &old->h, hash);
  273. if (ch)
  274. return container_of(ch, struct rsi, h);
  275. else
  276. return NULL;
  277. }
  278. /*
  279. * The rpcsec_context cache is used to store a context that is
  280. * used in data exchange.
  281. * The key is a context handle. The content is:
  282. * uid, gidlist, mechanism, service-set, mech-specific-data
  283. */
  284. #define RSC_HASHBITS 10
  285. #define RSC_HASHMAX (1<<RSC_HASHBITS)
  286. #define RSC_HASHMASK (RSC_HASHMAX-1)
  287. #define GSS_SEQ_WIN 128
  288. struct gss_svc_seq_data {
  289. /* highest seq number seen so far: */
  290. int sd_max;
  291. /* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
  292. * sd_win is nonzero iff sequence number i has been seen already: */
  293. unsigned long sd_win[GSS_SEQ_WIN/BITS_PER_LONG];
  294. spinlock_t sd_lock;
  295. };
  296. struct rsc {
  297. struct cache_head h;
  298. struct xdr_netobj handle;
  299. struct svc_cred cred;
  300. struct gss_svc_seq_data seqdata;
  301. struct gss_ctx *mechctx;
  302. };
  303. static struct cache_head *rsc_table[RSC_HASHMAX];
  304. static struct cache_detail rsc_cache;
  305. static struct rsc *rsc_lookup(struct rsc *item, int set);
  306. static void rsc_free(struct rsc *rsci)
  307. {
  308. kfree(rsci->handle.data);
  309. if (rsci->mechctx)
  310. gss_delete_sec_context(&rsci->mechctx);
  311. if (rsci->cred.cr_group_info)
  312. put_group_info(rsci->cred.cr_group_info);
  313. }
  314. static void rsc_put(struct cache_head *item, struct cache_detail *cd)
  315. {
  316. struct rsc *rsci = container_of(item, struct rsc, h);
  317. if (cache_put(item, cd)) {
  318. rsc_free(rsci);
  319. kfree(rsci);
  320. }
  321. }
  322. static inline int
  323. rsc_hash(struct rsc *rsci)
  324. {
  325. return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS);
  326. }
  327. static inline int
  328. rsc_match(struct rsc *new, struct rsc *tmp)
  329. {
  330. return netobj_equal(&new->handle, &tmp->handle);
  331. }
  332. static inline void
  333. rsc_init(struct rsc *new, struct rsc *tmp)
  334. {
  335. new->handle.len = tmp->handle.len;
  336. tmp->handle.len = 0;
  337. new->handle.data = tmp->handle.data;
  338. tmp->handle.data = NULL;
  339. new->mechctx = NULL;
  340. new->cred.cr_group_info = NULL;
  341. }
  342. static inline void
  343. rsc_update(struct rsc *new, struct rsc *tmp)
  344. {
  345. new->mechctx = tmp->mechctx;
  346. tmp->mechctx = NULL;
  347. memset(&new->seqdata, 0, sizeof(new->seqdata));
  348. spin_lock_init(&new->seqdata.sd_lock);
  349. new->cred = tmp->cred;
  350. tmp->cred.cr_group_info = NULL;
  351. }
  352. static int rsc_parse(struct cache_detail *cd,
  353. char *mesg, int mlen)
  354. {
  355. /* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */
  356. char *buf = mesg;
  357. int len, rv;
  358. struct rsc rsci, *rscp = NULL;
  359. time_t expiry;
  360. int status = -EINVAL;
  361. memset(&rsci, 0, sizeof(rsci));
  362. /* context handle */
  363. len = qword_get(&mesg, buf, mlen);
  364. if (len < 0) goto out;
  365. status = -ENOMEM;
  366. if (dup_to_netobj(&rsci.handle, buf, len))
  367. goto out;
  368. rsci.h.flags = 0;
  369. /* expiry */
  370. expiry = get_expiry(&mesg);
  371. status = -EINVAL;
  372. if (expiry == 0)
  373. goto out;
  374. /* uid, or NEGATIVE */
  375. rv = get_int(&mesg, &rsci.cred.cr_uid);
  376. if (rv == -EINVAL)
  377. goto out;
  378. if (rv == -ENOENT)
  379. set_bit(CACHE_NEGATIVE, &rsci.h.flags);
  380. else {
  381. int N, i;
  382. struct gss_api_mech *gm;
  383. /* gid */
  384. if (get_int(&mesg, &rsci.cred.cr_gid))
  385. goto out;
  386. /* number of additional gid's */
  387. if (get_int(&mesg, &N))
  388. goto out;
  389. status = -ENOMEM;
  390. rsci.cred.cr_group_info = groups_alloc(N);
  391. if (rsci.cred.cr_group_info == NULL)
  392. goto out;
  393. /* gid's */
  394. status = -EINVAL;
  395. for (i=0; i<N; i++) {
  396. gid_t gid;
  397. if (get_int(&mesg, &gid))
  398. goto out;
  399. GROUP_AT(rsci.cred.cr_group_info, i) = gid;
  400. }
  401. /* mech name */
  402. len = qword_get(&mesg, buf, mlen);
  403. if (len < 0)
  404. goto out;
  405. gm = gss_mech_get_by_name(buf);
  406. status = -EOPNOTSUPP;
  407. if (!gm)
  408. goto out;
  409. status = -EINVAL;
  410. /* mech-specific data: */
  411. len = qword_get(&mesg, buf, mlen);
  412. if (len < 0) {
  413. gss_mech_put(gm);
  414. goto out;
  415. }
  416. status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
  417. if (status) {
  418. gss_mech_put(gm);
  419. goto out;
  420. }
  421. gss_mech_put(gm);
  422. }
  423. rsci.h.expiry_time = expiry;
  424. rscp = rsc_lookup(&rsci, 1);
  425. status = 0;
  426. out:
  427. rsc_free(&rsci);
  428. if (rscp)
  429. rsc_put(&rscp->h, &rsc_cache);
  430. return status;
  431. }
  432. static struct cache_detail rsc_cache = {
  433. .owner = THIS_MODULE,
  434. .hash_size = RSC_HASHMAX,
  435. .hash_table = rsc_table,
  436. .name = "auth.rpcsec.context",
  437. .cache_put = rsc_put,
  438. .cache_parse = rsc_parse,
  439. };
  440. static DefineSimpleCacheLookup(rsc, rsc);
  441. static struct rsc *
  442. gss_svc_searchbyctx(struct xdr_netobj *handle)
  443. {
  444. struct rsc rsci;
  445. struct rsc *found;
  446. memset(&rsci, 0, sizeof(rsci));
  447. if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
  448. return NULL;
  449. found = rsc_lookup(&rsci, 0);
  450. rsc_free(&rsci);
  451. if (!found)
  452. return NULL;
  453. if (cache_check(&rsc_cache, &found->h, NULL))
  454. return NULL;
  455. return found;
  456. }
  457. /* Implements sequence number algorithm as specified in RFC 2203. */
  458. static int
  459. gss_check_seq_num(struct rsc *rsci, int seq_num)
  460. {
  461. struct gss_svc_seq_data *sd = &rsci->seqdata;
  462. spin_lock(&sd->sd_lock);
  463. if (seq_num > sd->sd_max) {
  464. if (seq_num >= sd->sd_max + GSS_SEQ_WIN) {
  465. memset(sd->sd_win,0,sizeof(sd->sd_win));
  466. sd->sd_max = seq_num;
  467. } else while (sd->sd_max < seq_num) {
  468. sd->sd_max++;
  469. __clear_bit(sd->sd_max % GSS_SEQ_WIN, sd->sd_win);
  470. }
  471. __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
  472. goto ok;
  473. } else if (seq_num <= sd->sd_max - GSS_SEQ_WIN) {
  474. goto drop;
  475. }
  476. /* sd_max - GSS_SEQ_WIN < seq_num <= sd_max */
  477. if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
  478. goto drop;
  479. ok:
  480. spin_unlock(&sd->sd_lock);
  481. return 1;
  482. drop:
  483. spin_unlock(&sd->sd_lock);
  484. return 0;
  485. }
  486. static inline u32 round_up_to_quad(u32 i)
  487. {
  488. return (i + 3 ) & ~3;
  489. }
  490. static inline int
  491. svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o)
  492. {
  493. int l;
  494. if (argv->iov_len < 4)
  495. return -1;
  496. o->len = ntohl(svc_getu32(argv));
  497. l = round_up_to_quad(o->len);
  498. if (argv->iov_len < l)
  499. return -1;
  500. o->data = argv->iov_base;
  501. argv->iov_base += l;
  502. argv->iov_len -= l;
  503. return 0;
  504. }
  505. static inline int
  506. svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
  507. {
  508. u32 *p;
  509. if (resv->iov_len + 4 > PAGE_SIZE)
  510. return -1;
  511. svc_putu32(resv, htonl(o->len));
  512. p = resv->iov_base + resv->iov_len;
  513. resv->iov_len += round_up_to_quad(o->len);
  514. if (resv->iov_len > PAGE_SIZE)
  515. return -1;
  516. memcpy(p, o->data, o->len);
  517. memset((u8 *)p + o->len, 0, round_up_to_quad(o->len) - o->len);
  518. return 0;
  519. }
  520. /* Verify the checksum on the header and return SVC_OK on success.
  521. * Otherwise, return SVC_DROP (in the case of a bad sequence number)
  522. * or return SVC_DENIED and indicate error in authp.
  523. */
  524. static int
  525. gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
  526. u32 *rpcstart, struct rpc_gss_wire_cred *gc, u32 *authp)
  527. {
  528. struct gss_ctx *ctx_id = rsci->mechctx;
  529. struct xdr_buf rpchdr;
  530. struct xdr_netobj checksum;
  531. u32 flavor = 0;
  532. struct kvec *argv = &rqstp->rq_arg.head[0];
  533. struct kvec iov;
  534. /* data to compute the checksum over: */
  535. iov.iov_base = rpcstart;
  536. iov.iov_len = (u8 *)argv->iov_base - (u8 *)rpcstart;
  537. xdr_buf_from_iov(&iov, &rpchdr);
  538. *authp = rpc_autherr_badverf;
  539. if (argv->iov_len < 4)
  540. return SVC_DENIED;
  541. flavor = ntohl(svc_getu32(argv));
  542. if (flavor != RPC_AUTH_GSS)
  543. return SVC_DENIED;
  544. if (svc_safe_getnetobj(argv, &checksum))
  545. return SVC_DENIED;
  546. if (rqstp->rq_deferred) /* skip verification of revisited request */
  547. return SVC_OK;
  548. if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) {
  549. *authp = rpcsec_gsserr_credproblem;
  550. return SVC_DENIED;
  551. }
  552. if (gc->gc_seq > MAXSEQ) {
  553. dprintk("RPC: svcauth_gss: discarding request with large sequence number %d\n",
  554. gc->gc_seq);
  555. *authp = rpcsec_gsserr_ctxproblem;
  556. return SVC_DENIED;
  557. }
  558. if (!gss_check_seq_num(rsci, gc->gc_seq)) {
  559. dprintk("RPC: svcauth_gss: discarding request with old sequence number %d\n",
  560. gc->gc_seq);
  561. return SVC_DROP;
  562. }
  563. return SVC_OK;
  564. }
  565. static int
  566. gss_write_null_verf(struct svc_rqst *rqstp)
  567. {
  568. u32 *p;
  569. svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_NULL));
  570. p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
  571. /* don't really need to check if head->iov_len > PAGE_SIZE ... */
  572. *p++ = 0;
  573. if (!xdr_ressize_check(rqstp, p))
  574. return -1;
  575. return 0;
  576. }
  577. static int
  578. gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
  579. {
  580. u32 xdr_seq;
  581. u32 maj_stat;
  582. struct xdr_buf verf_data;
  583. struct xdr_netobj mic;
  584. u32 *p;
  585. struct kvec iov;
  586. svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_GSS));
  587. xdr_seq = htonl(seq);
  588. iov.iov_base = &xdr_seq;
  589. iov.iov_len = sizeof(xdr_seq);
  590. xdr_buf_from_iov(&iov, &verf_data);
  591. p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
  592. mic.data = (u8 *)(p + 1);
  593. maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
  594. if (maj_stat != GSS_S_COMPLETE)
  595. return -1;
  596. *p++ = htonl(mic.len);
  597. memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
  598. p += XDR_QUADLEN(mic.len);
  599. if (!xdr_ressize_check(rqstp, p))
  600. return -1;
  601. return 0;
  602. }
  603. struct gss_domain {
  604. struct auth_domain h;
  605. u32 pseudoflavor;
  606. };
  607. static struct auth_domain *
  608. find_gss_auth_domain(struct gss_ctx *ctx, u32 svc)
  609. {
  610. char *name;
  611. name = gss_service_to_auth_domain_name(ctx->mech_type, svc);
  612. if (!name)
  613. return NULL;
  614. return auth_domain_find(name);
  615. }
  616. static struct auth_ops svcauthops_gss;
  617. int
  618. svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
  619. {
  620. struct gss_domain *new;
  621. struct auth_domain *test;
  622. int stat = -ENOMEM;
  623. new = kmalloc(sizeof(*new), GFP_KERNEL);
  624. if (!new)
  625. goto out;
  626. kref_init(&new->h.ref);
  627. new->h.name = kmalloc(strlen(name) + 1, GFP_KERNEL);
  628. if (!new->h.name)
  629. goto out_free_dom;
  630. strcpy(new->h.name, name);
  631. new->h.flavour = &svcauthops_gss;
  632. new->pseudoflavor = pseudoflavor;
  633. test = auth_domain_lookup(name, &new->h);
  634. if (test != &new->h) { /* XXX Duplicate registration? */
  635. auth_domain_put(&new->h);
  636. /* dangling ref-count... */
  637. goto out;
  638. }
  639. return 0;
  640. out_free_dom:
  641. kfree(new);
  642. out:
  643. return stat;
  644. }
  645. EXPORT_SYMBOL(svcauth_gss_register_pseudoflavor);
  646. static inline int
  647. read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
  648. {
  649. u32 raw;
  650. int status;
  651. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  652. if (status)
  653. return status;
  654. *obj = ntohl(raw);
  655. return 0;
  656. }
  657. /* It would be nice if this bit of code could be shared with the client.
  658. * Obstacles:
  659. * The client shouldn't malloc(), would have to pass in own memory.
  660. * The server uses base of head iovec as read pointer, while the
  661. * client uses separate pointer. */
  662. static int
  663. unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
  664. {
  665. int stat = -EINVAL;
  666. u32 integ_len, maj_stat;
  667. struct xdr_netobj mic;
  668. struct xdr_buf integ_buf;
  669. integ_len = ntohl(svc_getu32(&buf->head[0]));
  670. if (integ_len & 3)
  671. goto out;
  672. if (integ_len > buf->len)
  673. goto out;
  674. if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len))
  675. BUG();
  676. /* copy out mic... */
  677. if (read_u32_from_xdr_buf(buf, integ_len, &mic.len))
  678. BUG();
  679. if (mic.len > RPC_MAX_AUTH_SIZE)
  680. goto out;
  681. mic.data = kmalloc(mic.len, GFP_KERNEL);
  682. if (!mic.data)
  683. goto out;
  684. if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len))
  685. goto out;
  686. maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
  687. if (maj_stat != GSS_S_COMPLETE)
  688. goto out;
  689. if (ntohl(svc_getu32(&buf->head[0])) != seq)
  690. goto out;
  691. stat = 0;
  692. out:
  693. return stat;
  694. }
  695. struct gss_svc_data {
  696. /* decoded gss client cred: */
  697. struct rpc_gss_wire_cred clcred;
  698. /* pointer to the beginning of the procedure-specific results,
  699. * which may be encrypted/checksummed in svcauth_gss_release: */
  700. u32 *body_start;
  701. struct rsc *rsci;
  702. };
  703. static int
  704. svcauth_gss_set_client(struct svc_rqst *rqstp)
  705. {
  706. struct gss_svc_data *svcdata = rqstp->rq_auth_data;
  707. struct rsc *rsci = svcdata->rsci;
  708. struct rpc_gss_wire_cred *gc = &svcdata->clcred;
  709. rqstp->rq_client = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
  710. if (rqstp->rq_client == NULL)
  711. return SVC_DENIED;
  712. return SVC_OK;
  713. }
  714. static inline int
  715. gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip)
  716. {
  717. struct rsc *rsci;
  718. if (rsip->major_status != GSS_S_COMPLETE)
  719. return gss_write_null_verf(rqstp);
  720. rsci = gss_svc_searchbyctx(&rsip->out_handle);
  721. if (rsci == NULL) {
  722. rsip->major_status = GSS_S_NO_CONTEXT;
  723. return gss_write_null_verf(rqstp);
  724. }
  725. return gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
  726. }
  727. /*
  728. * Accept an rpcsec packet.
  729. * If context establishment, punt to user space
  730. * If data exchange, verify/decrypt
  731. * If context destruction, handle here
  732. * In the context establishment and destruction case we encode
  733. * response here and return SVC_COMPLETE.
  734. */
  735. static int
  736. svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
  737. {
  738. struct kvec *argv = &rqstp->rq_arg.head[0];
  739. struct kvec *resv = &rqstp->rq_res.head[0];
  740. u32 crlen;
  741. struct xdr_netobj tmpobj;
  742. struct gss_svc_data *svcdata = rqstp->rq_auth_data;
  743. struct rpc_gss_wire_cred *gc;
  744. struct rsc *rsci = NULL;
  745. struct rsi *rsip, rsikey;
  746. u32 *rpcstart;
  747. u32 *reject_stat = resv->iov_base + resv->iov_len;
  748. int ret;
  749. dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",argv->iov_len);
  750. *authp = rpc_autherr_badcred;
  751. if (!svcdata)
  752. svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL);
  753. if (!svcdata)
  754. goto auth_err;
  755. rqstp->rq_auth_data = svcdata;
  756. svcdata->body_start = NULL;
  757. svcdata->rsci = NULL;
  758. gc = &svcdata->clcred;
  759. /* start of rpc packet is 7 u32's back from here:
  760. * xid direction rpcversion prog vers proc flavour
  761. */
  762. rpcstart = argv->iov_base;
  763. rpcstart -= 7;
  764. /* credential is:
  765. * version(==1), proc(0,1,2,3), seq, service (1,2,3), handle
  766. * at least 5 u32s, and is preceeded by length, so that makes 6.
  767. */
  768. if (argv->iov_len < 5 * 4)
  769. goto auth_err;
  770. crlen = ntohl(svc_getu32(argv));
  771. if (ntohl(svc_getu32(argv)) != RPC_GSS_VERSION)
  772. goto auth_err;
  773. gc->gc_proc = ntohl(svc_getu32(argv));
  774. gc->gc_seq = ntohl(svc_getu32(argv));
  775. gc->gc_svc = ntohl(svc_getu32(argv));
  776. if (svc_safe_getnetobj(argv, &gc->gc_ctx))
  777. goto auth_err;
  778. if (crlen != round_up_to_quad(gc->gc_ctx.len) + 5 * 4)
  779. goto auth_err;
  780. if ((gc->gc_proc != RPC_GSS_PROC_DATA) && (rqstp->rq_proc != 0))
  781. goto auth_err;
  782. /*
  783. * We've successfully parsed the credential. Let's check out the
  784. * verifier. An AUTH_NULL verifier is allowed (and required) for
  785. * INIT and CONTINUE_INIT requests. AUTH_RPCSEC_GSS is required for
  786. * PROC_DATA and PROC_DESTROY.
  787. *
  788. * AUTH_NULL verifier is 0 (AUTH_NULL), 0 (length).
  789. * AUTH_RPCSEC_GSS verifier is:
  790. * 6 (AUTH_RPCSEC_GSS), length, checksum.
  791. * checksum is calculated over rpcheader from xid up to here.
  792. */
  793. *authp = rpc_autherr_badverf;
  794. switch (gc->gc_proc) {
  795. case RPC_GSS_PROC_INIT:
  796. case RPC_GSS_PROC_CONTINUE_INIT:
  797. if (argv->iov_len < 2 * 4)
  798. goto auth_err;
  799. if (ntohl(svc_getu32(argv)) != RPC_AUTH_NULL)
  800. goto auth_err;
  801. if (ntohl(svc_getu32(argv)) != 0)
  802. goto auth_err;
  803. break;
  804. case RPC_GSS_PROC_DATA:
  805. case RPC_GSS_PROC_DESTROY:
  806. *authp = rpcsec_gsserr_credproblem;
  807. rsci = gss_svc_searchbyctx(&gc->gc_ctx);
  808. if (!rsci)
  809. goto auth_err;
  810. switch (gss_verify_header(rqstp, rsci, rpcstart, gc, authp)) {
  811. case SVC_OK:
  812. break;
  813. case SVC_DENIED:
  814. goto auth_err;
  815. case SVC_DROP:
  816. goto drop;
  817. }
  818. break;
  819. default:
  820. *authp = rpc_autherr_rejectedcred;
  821. goto auth_err;
  822. }
  823. /* now act upon the command: */
  824. switch (gc->gc_proc) {
  825. case RPC_GSS_PROC_INIT:
  826. case RPC_GSS_PROC_CONTINUE_INIT:
  827. *authp = rpc_autherr_badcred;
  828. if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
  829. goto auth_err;
  830. memset(&rsikey, 0, sizeof(rsikey));
  831. if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
  832. goto drop;
  833. *authp = rpc_autherr_badverf;
  834. if (svc_safe_getnetobj(argv, &tmpobj)) {
  835. kfree(rsikey.in_handle.data);
  836. goto auth_err;
  837. }
  838. if (dup_netobj(&rsikey.in_token, &tmpobj)) {
  839. kfree(rsikey.in_handle.data);
  840. goto drop;
  841. }
  842. rsip = rsi_lookup(&rsikey);
  843. rsi_free(&rsikey);
  844. if (!rsip) {
  845. goto drop;
  846. }
  847. switch(cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) {
  848. case -EAGAIN:
  849. goto drop;
  850. case -ENOENT:
  851. goto drop;
  852. case 0:
  853. if (gss_write_init_verf(rqstp, rsip))
  854. goto drop;
  855. if (resv->iov_len + 4 > PAGE_SIZE)
  856. goto drop;
  857. svc_putu32(resv, rpc_success);
  858. if (svc_safe_putnetobj(resv, &rsip->out_handle))
  859. goto drop;
  860. if (resv->iov_len + 3 * 4 > PAGE_SIZE)
  861. goto drop;
  862. svc_putu32(resv, htonl(rsip->major_status));
  863. svc_putu32(resv, htonl(rsip->minor_status));
  864. svc_putu32(resv, htonl(GSS_SEQ_WIN));
  865. if (svc_safe_putnetobj(resv, &rsip->out_token))
  866. goto drop;
  867. rqstp->rq_client = NULL;
  868. }
  869. goto complete;
  870. case RPC_GSS_PROC_DESTROY:
  871. set_bit(CACHE_NEGATIVE, &rsci->h.flags);
  872. if (resv->iov_len + 4 > PAGE_SIZE)
  873. goto drop;
  874. svc_putu32(resv, rpc_success);
  875. goto complete;
  876. case RPC_GSS_PROC_DATA:
  877. *authp = rpcsec_gsserr_ctxproblem;
  878. if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
  879. goto auth_err;
  880. rqstp->rq_cred = rsci->cred;
  881. get_group_info(rsci->cred.cr_group_info);
  882. *authp = rpc_autherr_badcred;
  883. switch (gc->gc_svc) {
  884. case RPC_GSS_SVC_NONE:
  885. break;
  886. case RPC_GSS_SVC_INTEGRITY:
  887. if (unwrap_integ_data(&rqstp->rq_arg,
  888. gc->gc_seq, rsci->mechctx))
  889. goto auth_err;
  890. /* placeholders for length and seq. number: */
  891. svcdata->body_start = resv->iov_base + resv->iov_len;
  892. svc_putu32(resv, 0);
  893. svc_putu32(resv, 0);
  894. break;
  895. case RPC_GSS_SVC_PRIVACY:
  896. /* currently unsupported */
  897. default:
  898. goto auth_err;
  899. }
  900. svcdata->rsci = rsci;
  901. cache_get(&rsci->h);
  902. ret = SVC_OK;
  903. goto out;
  904. }
  905. auth_err:
  906. /* Restore write pointer to original value: */
  907. xdr_ressize_check(rqstp, reject_stat);
  908. ret = SVC_DENIED;
  909. goto out;
  910. complete:
  911. ret = SVC_COMPLETE;
  912. goto out;
  913. drop:
  914. ret = SVC_DROP;
  915. out:
  916. if (rsci)
  917. rsc_put(&rsci->h, &rsc_cache);
  918. return ret;
  919. }
  920. static int
  921. svcauth_gss_release(struct svc_rqst *rqstp)
  922. {
  923. struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
  924. struct rpc_gss_wire_cred *gc = &gsd->clcred;
  925. struct xdr_buf *resbuf = &rqstp->rq_res;
  926. struct xdr_buf integ_buf;
  927. struct xdr_netobj mic;
  928. struct kvec *resv;
  929. u32 *p;
  930. int integ_offset, integ_len;
  931. int stat = -EINVAL;
  932. if (gc->gc_proc != RPC_GSS_PROC_DATA)
  933. goto out;
  934. /* Release can be called twice, but we only wrap once. */
  935. if (gsd->body_start == NULL)
  936. goto out;
  937. /* normally not set till svc_send, but we need it here: */
  938. resbuf->len = resbuf->head[0].iov_len
  939. + resbuf->page_len + resbuf->tail[0].iov_len;
  940. switch (gc->gc_svc) {
  941. case RPC_GSS_SVC_NONE:
  942. break;
  943. case RPC_GSS_SVC_INTEGRITY:
  944. p = gsd->body_start;
  945. gsd->body_start = NULL;
  946. /* move accept_stat to right place: */
  947. memcpy(p, p + 2, 4);
  948. /* don't wrap in failure case: */
  949. /* Note: counting on not getting here if call was not even
  950. * accepted! */
  951. if (*p != rpc_success) {
  952. resbuf->head[0].iov_len -= 2 * 4;
  953. goto out;
  954. }
  955. p++;
  956. integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
  957. integ_len = resbuf->len - integ_offset;
  958. BUG_ON(integ_len % 4);
  959. *p++ = htonl(integ_len);
  960. *p++ = htonl(gc->gc_seq);
  961. if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
  962. integ_len))
  963. BUG();
  964. if (resbuf->page_len == 0
  965. && resbuf->tail[0].iov_len + RPC_MAX_AUTH_SIZE
  966. < PAGE_SIZE) {
  967. BUG_ON(resbuf->tail[0].iov_len);
  968. /* Use head for everything */
  969. resv = &resbuf->head[0];
  970. } else if (resbuf->tail[0].iov_base == NULL) {
  971. /* copied from nfsd4_encode_read */
  972. svc_take_page(rqstp);
  973. resbuf->tail[0].iov_base = page_address(rqstp
  974. ->rq_respages[rqstp->rq_resused-1]);
  975. rqstp->rq_restailpage = rqstp->rq_resused-1;
  976. resbuf->tail[0].iov_len = 0;
  977. resv = &resbuf->tail[0];
  978. } else {
  979. resv = &resbuf->tail[0];
  980. }
  981. mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
  982. if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
  983. goto out_err;
  984. svc_putu32(resv, htonl(mic.len));
  985. memset(mic.data + mic.len, 0,
  986. round_up_to_quad(mic.len) - mic.len);
  987. resv->iov_len += XDR_QUADLEN(mic.len) << 2;
  988. /* not strictly required: */
  989. resbuf->len += XDR_QUADLEN(mic.len) << 2;
  990. BUG_ON(resv->iov_len > PAGE_SIZE);
  991. break;
  992. case RPC_GSS_SVC_PRIVACY:
  993. default:
  994. goto out_err;
  995. }
  996. out:
  997. stat = 0;
  998. out_err:
  999. if (rqstp->rq_client)
  1000. auth_domain_put(rqstp->rq_client);
  1001. rqstp->rq_client = NULL;
  1002. if (rqstp->rq_cred.cr_group_info)
  1003. put_group_info(rqstp->rq_cred.cr_group_info);
  1004. rqstp->rq_cred.cr_group_info = NULL;
  1005. if (gsd->rsci)
  1006. rsc_put(&gsd->rsci->h, &rsc_cache);
  1007. gsd->rsci = NULL;
  1008. return stat;
  1009. }
  1010. static void
  1011. svcauth_gss_domain_release(struct auth_domain *dom)
  1012. {
  1013. struct gss_domain *gd = container_of(dom, struct gss_domain, h);
  1014. kfree(dom->name);
  1015. kfree(gd);
  1016. }
  1017. static struct auth_ops svcauthops_gss = {
  1018. .name = "rpcsec_gss",
  1019. .owner = THIS_MODULE,
  1020. .flavour = RPC_AUTH_GSS,
  1021. .accept = svcauth_gss_accept,
  1022. .release = svcauth_gss_release,
  1023. .domain_release = svcauth_gss_domain_release,
  1024. .set_client = svcauth_gss_set_client,
  1025. };
  1026. int
  1027. gss_svc_init(void)
  1028. {
  1029. int rv = svc_auth_register(RPC_AUTH_GSS, &svcauthops_gss);
  1030. if (rv == 0) {
  1031. cache_register(&rsc_cache);
  1032. cache_register(&rsi_cache);
  1033. }
  1034. return rv;
  1035. }
  1036. void
  1037. gss_svc_shutdown(void)
  1038. {
  1039. if (cache_unregister(&rsc_cache))
  1040. printk(KERN_ERR "auth_rpcgss: failed to unregister rsc cache\n");
  1041. if (cache_unregister(&rsi_cache))
  1042. printk(KERN_ERR "auth_rpcgss: failed to unregister rsi cache\n");
  1043. svc_auth_unregister(RPC_AUTH_GSS);
  1044. }