auth_gss.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455
  1. /*
  2. * linux/net/sunrpc/auth_gss/auth_gss.c
  3. *
  4. * RPCSEC_GSS client authentication.
  5. *
  6. * Copyright (c) 2000 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Dug Song <dugsong@monkey.org>
  10. * Andy Adamson <andros@umich.edu>
  11. *
  12. * Redistribution and use in source and binary forms, with or without
  13. * modification, are permitted provided that the following conditions
  14. * are met:
  15. *
  16. * 1. Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. * 2. Redistributions in binary form must reproduce the above copyright
  19. * notice, this list of conditions and the following disclaimer in the
  20. * documentation and/or other materials provided with the distribution.
  21. * 3. Neither the name of the University nor the names of its
  22. * contributors may be used to endorse or promote products derived
  23. * from this software without specific prior written permission.
  24. *
  25. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  26. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  27. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  28. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  29. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  32. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  33. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  34. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  35. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  36. */
  37. #include <linux/module.h>
  38. #include <linux/init.h>
  39. #include <linux/types.h>
  40. #include <linux/slab.h>
  41. #include <linux/sched.h>
  42. #include <linux/pagemap.h>
  43. #include <linux/sunrpc/clnt.h>
  44. #include <linux/sunrpc/auth.h>
  45. #include <linux/sunrpc/auth_gss.h>
  46. #include <linux/sunrpc/svcauth_gss.h>
  47. #include <linux/sunrpc/gss_err.h>
  48. #include <linux/workqueue.h>
  49. #include <linux/sunrpc/rpc_pipe_fs.h>
  50. #include <linux/sunrpc/gss_api.h>
  51. #include <asm/uaccess.h>
  52. static const struct rpc_authops authgss_ops;
  53. static const struct rpc_credops gss_credops;
  54. static const struct rpc_credops gss_nullops;
  55. #ifdef RPC_DEBUG
  56. # define RPCDBG_FACILITY RPCDBG_AUTH
  57. #endif
  58. #define GSS_CRED_SLACK 1024
  59. /* length of a krb5 verifier (48), plus data added before arguments when
  60. * using integrity (two 4-byte integers): */
  61. #define GSS_VERF_SLACK 100
  62. struct gss_auth {
  63. struct kref kref;
  64. struct rpc_auth rpc_auth;
  65. struct gss_api_mech *mech;
  66. enum rpc_gss_svc service;
  67. struct rpc_clnt *client;
  68. struct dentry *dentry;
  69. };
  70. /* pipe_version >= 0 if and only if someone has a pipe open. */
  71. static int pipe_version = -1;
  72. static atomic_t pipe_users = ATOMIC_INIT(0);
  73. static DEFINE_SPINLOCK(pipe_version_lock);
  74. static struct rpc_wait_queue pipe_version_rpc_waitqueue;
  75. static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
  76. static void gss_free_ctx(struct gss_cl_ctx *);
  77. static struct rpc_pipe_ops gss_upcall_ops;
  78. static inline struct gss_cl_ctx *
  79. gss_get_ctx(struct gss_cl_ctx *ctx)
  80. {
  81. atomic_inc(&ctx->count);
  82. return ctx;
  83. }
  84. static inline void
  85. gss_put_ctx(struct gss_cl_ctx *ctx)
  86. {
  87. if (atomic_dec_and_test(&ctx->count))
  88. gss_free_ctx(ctx);
  89. }
  90. /* gss_cred_set_ctx:
  91. * called by gss_upcall_callback and gss_create_upcall in order
  92. * to set the gss context. The actual exchange of an old context
  93. * and a new one is protected by the inode->i_lock.
  94. */
  95. static void
  96. gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
  97. {
  98. struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
  99. if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
  100. return;
  101. gss_get_ctx(ctx);
  102. rcu_assign_pointer(gss_cred->gc_ctx, ctx);
  103. set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  104. smp_mb__before_clear_bit();
  105. clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
  106. }
  107. static const void *
  108. simple_get_bytes(const void *p, const void *end, void *res, size_t len)
  109. {
  110. const void *q = (const void *)((const char *)p + len);
  111. if (unlikely(q > end || q < p))
  112. return ERR_PTR(-EFAULT);
  113. memcpy(res, p, len);
  114. return q;
  115. }
  116. static inline const void *
  117. simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
  118. {
  119. const void *q;
  120. unsigned int len;
  121. p = simple_get_bytes(p, end, &len, sizeof(len));
  122. if (IS_ERR(p))
  123. return p;
  124. q = (const void *)((const char *)p + len);
  125. if (unlikely(q > end || q < p))
  126. return ERR_PTR(-EFAULT);
  127. dest->data = kmemdup(p, len, GFP_NOFS);
  128. if (unlikely(dest->data == NULL))
  129. return ERR_PTR(-ENOMEM);
  130. dest->len = len;
  131. return q;
  132. }
  133. static struct gss_cl_ctx *
  134. gss_cred_get_ctx(struct rpc_cred *cred)
  135. {
  136. struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
  137. struct gss_cl_ctx *ctx = NULL;
  138. rcu_read_lock();
  139. if (gss_cred->gc_ctx)
  140. ctx = gss_get_ctx(gss_cred->gc_ctx);
  141. rcu_read_unlock();
  142. return ctx;
  143. }
  144. static struct gss_cl_ctx *
  145. gss_alloc_context(void)
  146. {
  147. struct gss_cl_ctx *ctx;
  148. ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
  149. if (ctx != NULL) {
  150. ctx->gc_proc = RPC_GSS_PROC_DATA;
  151. ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
  152. spin_lock_init(&ctx->gc_seq_lock);
  153. atomic_set(&ctx->count,1);
  154. }
  155. return ctx;
  156. }
  157. #define GSSD_MIN_TIMEOUT (60 * 60)
  158. static const void *
  159. gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
  160. {
  161. const void *q;
  162. unsigned int seclen;
  163. unsigned int timeout;
  164. u32 window_size;
  165. int ret;
  166. /* First unsigned int gives the lifetime (in seconds) of the cred */
  167. p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
  168. if (IS_ERR(p))
  169. goto err;
  170. if (timeout == 0)
  171. timeout = GSSD_MIN_TIMEOUT;
  172. ctx->gc_expiry = jiffies + (unsigned long)timeout * HZ * 3 / 4;
  173. /* Sequence number window. Determines the maximum number of simultaneous requests */
  174. p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
  175. if (IS_ERR(p))
  176. goto err;
  177. ctx->gc_win = window_size;
  178. /* gssd signals an error by passing ctx->gc_win = 0: */
  179. if (ctx->gc_win == 0) {
  180. /* in which case, p points to an error code which we ignore */
  181. p = ERR_PTR(-EACCES);
  182. goto err;
  183. }
  184. /* copy the opaque wire context */
  185. p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
  186. if (IS_ERR(p))
  187. goto err;
  188. /* import the opaque security context */
  189. p = simple_get_bytes(p, end, &seclen, sizeof(seclen));
  190. if (IS_ERR(p))
  191. goto err;
  192. q = (const void *)((const char *)p + seclen);
  193. if (unlikely(q > end || q < p)) {
  194. p = ERR_PTR(-EFAULT);
  195. goto err;
  196. }
  197. ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx);
  198. if (ret < 0) {
  199. p = ERR_PTR(ret);
  200. goto err;
  201. }
  202. return q;
  203. err:
  204. dprintk("RPC: gss_fill_context returning %ld\n", -PTR_ERR(p));
  205. return p;
  206. }
  207. struct gss_upcall_msg {
  208. atomic_t count;
  209. uid_t uid;
  210. struct rpc_pipe_msg msg;
  211. struct list_head list;
  212. struct gss_auth *auth;
  213. struct rpc_wait_queue rpc_waitqueue;
  214. wait_queue_head_t waitqueue;
  215. struct gss_cl_ctx *ctx;
  216. };
  217. static int get_pipe_version(void)
  218. {
  219. int ret;
  220. spin_lock(&pipe_version_lock);
  221. if (pipe_version >= 0) {
  222. atomic_inc(&pipe_users);
  223. ret = 0;
  224. } else
  225. ret = -EAGAIN;
  226. spin_unlock(&pipe_version_lock);
  227. return ret;
  228. }
  229. static void put_pipe_version(void)
  230. {
  231. if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) {
  232. pipe_version = -1;
  233. spin_unlock(&pipe_version_lock);
  234. }
  235. }
  236. static void
  237. gss_release_msg(struct gss_upcall_msg *gss_msg)
  238. {
  239. if (!atomic_dec_and_test(&gss_msg->count))
  240. return;
  241. put_pipe_version();
  242. BUG_ON(!list_empty(&gss_msg->list));
  243. if (gss_msg->ctx != NULL)
  244. gss_put_ctx(gss_msg->ctx);
  245. rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
  246. kfree(gss_msg);
  247. }
  248. static struct gss_upcall_msg *
  249. __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
  250. {
  251. struct gss_upcall_msg *pos;
  252. list_for_each_entry(pos, &rpci->in_downcall, list) {
  253. if (pos->uid != uid)
  254. continue;
  255. atomic_inc(&pos->count);
  256. dprintk("RPC: gss_find_upcall found msg %p\n", pos);
  257. return pos;
  258. }
  259. dprintk("RPC: gss_find_upcall found nothing\n");
  260. return NULL;
  261. }
  262. /* Try to add an upcall to the pipefs queue.
  263. * If an upcall owned by our uid already exists, then we return a reference
  264. * to that upcall instead of adding the new upcall.
  265. */
  266. static inline struct gss_upcall_msg *
  267. gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
  268. {
  269. struct inode *inode = gss_auth->dentry->d_inode;
  270. struct rpc_inode *rpci = RPC_I(inode);
  271. struct gss_upcall_msg *old;
  272. spin_lock(&inode->i_lock);
  273. old = __gss_find_upcall(rpci, gss_msg->uid);
  274. if (old == NULL) {
  275. atomic_inc(&gss_msg->count);
  276. list_add(&gss_msg->list, &rpci->in_downcall);
  277. } else
  278. gss_msg = old;
  279. spin_unlock(&inode->i_lock);
  280. return gss_msg;
  281. }
  282. static void
  283. __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
  284. {
  285. list_del_init(&gss_msg->list);
  286. rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
  287. wake_up_all(&gss_msg->waitqueue);
  288. atomic_dec(&gss_msg->count);
  289. }
  290. static void
  291. gss_unhash_msg(struct gss_upcall_msg *gss_msg)
  292. {
  293. struct gss_auth *gss_auth = gss_msg->auth;
  294. struct inode *inode = gss_auth->dentry->d_inode;
  295. if (list_empty(&gss_msg->list))
  296. return;
  297. spin_lock(&inode->i_lock);
  298. if (!list_empty(&gss_msg->list))
  299. __gss_unhash_msg(gss_msg);
  300. spin_unlock(&inode->i_lock);
  301. }
  302. static void
  303. gss_upcall_callback(struct rpc_task *task)
  304. {
  305. struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
  306. struct gss_cred, gc_base);
  307. struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
  308. struct inode *inode = gss_msg->auth->dentry->d_inode;
  309. spin_lock(&inode->i_lock);
  310. if (gss_msg->ctx)
  311. gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
  312. else
  313. task->tk_status = gss_msg->msg.errno;
  314. gss_cred->gc_upcall = NULL;
  315. rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
  316. spin_unlock(&inode->i_lock);
  317. gss_release_msg(gss_msg);
  318. }
  319. static inline struct gss_upcall_msg *
  320. gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
  321. {
  322. struct gss_upcall_msg *gss_msg;
  323. int vers;
  324. gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
  325. if (gss_msg == NULL)
  326. return ERR_PTR(-ENOMEM);
  327. vers = get_pipe_version();
  328. if (vers < 0) {
  329. kfree(gss_msg);
  330. return ERR_PTR(vers);
  331. }
  332. INIT_LIST_HEAD(&gss_msg->list);
  333. rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
  334. init_waitqueue_head(&gss_msg->waitqueue);
  335. atomic_set(&gss_msg->count, 1);
  336. gss_msg->msg.data = &gss_msg->uid;
  337. gss_msg->msg.len = sizeof(gss_msg->uid);
  338. gss_msg->uid = uid;
  339. gss_msg->auth = gss_auth;
  340. return gss_msg;
  341. }
  342. static struct gss_upcall_msg *
  343. gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred)
  344. {
  345. struct gss_cred *gss_cred = container_of(cred,
  346. struct gss_cred, gc_base);
  347. struct gss_upcall_msg *gss_new, *gss_msg;
  348. uid_t uid = cred->cr_uid;
  349. /* Special case: rpc.gssd assumes that uid == 0 implies machine creds */
  350. if (gss_cred->gc_machine_cred != 0)
  351. uid = 0;
  352. gss_new = gss_alloc_msg(gss_auth, uid);
  353. if (IS_ERR(gss_new))
  354. return gss_new;
  355. gss_msg = gss_add_msg(gss_auth, gss_new);
  356. if (gss_msg == gss_new) {
  357. int res = rpc_queue_upcall(gss_auth->dentry->d_inode, &gss_new->msg);
  358. if (res) {
  359. gss_unhash_msg(gss_new);
  360. gss_msg = ERR_PTR(res);
  361. }
  362. } else
  363. gss_release_msg(gss_new);
  364. return gss_msg;
  365. }
  366. static void warn_gssd(void)
  367. {
  368. static unsigned long ratelimit;
  369. unsigned long now = jiffies;
  370. if (time_after(now, ratelimit)) {
  371. printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
  372. "Please check user daemon is running.\n");
  373. ratelimit = now + 15*HZ;
  374. }
  375. }
  376. static inline int
  377. gss_refresh_upcall(struct rpc_task *task)
  378. {
  379. struct rpc_cred *cred = task->tk_msg.rpc_cred;
  380. struct gss_auth *gss_auth = container_of(cred->cr_auth,
  381. struct gss_auth, rpc_auth);
  382. struct gss_cred *gss_cred = container_of(cred,
  383. struct gss_cred, gc_base);
  384. struct gss_upcall_msg *gss_msg;
  385. struct inode *inode = gss_auth->dentry->d_inode;
  386. int err = 0;
  387. dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
  388. cred->cr_uid);
  389. gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
  390. if (IS_ERR(gss_msg) == -EAGAIN) {
  391. /* XXX: warning on the first, under the assumption we
  392. * shouldn't normally hit this case on a refresh. */
  393. warn_gssd();
  394. task->tk_timeout = 15*HZ;
  395. rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL);
  396. return 0;
  397. }
  398. if (IS_ERR(gss_msg)) {
  399. err = PTR_ERR(gss_msg);
  400. goto out;
  401. }
  402. spin_lock(&inode->i_lock);
  403. if (gss_cred->gc_upcall != NULL)
  404. rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
  405. else if (gss_msg->ctx != NULL) {
  406. gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
  407. gss_cred->gc_upcall = NULL;
  408. rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
  409. } else if (gss_msg->msg.errno >= 0) {
  410. task->tk_timeout = 0;
  411. gss_cred->gc_upcall = gss_msg;
  412. /* gss_upcall_callback will release the reference to gss_upcall_msg */
  413. atomic_inc(&gss_msg->count);
  414. rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
  415. } else
  416. err = gss_msg->msg.errno;
  417. spin_unlock(&inode->i_lock);
  418. gss_release_msg(gss_msg);
  419. out:
  420. dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n",
  421. task->tk_pid, cred->cr_uid, err);
  422. return err;
  423. }
  424. static inline int
  425. gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
  426. {
  427. struct inode *inode = gss_auth->dentry->d_inode;
  428. struct rpc_cred *cred = &gss_cred->gc_base;
  429. struct gss_upcall_msg *gss_msg;
  430. DEFINE_WAIT(wait);
  431. int err = 0;
  432. dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid);
  433. retry:
  434. gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
  435. if (PTR_ERR(gss_msg) == -EAGAIN) {
  436. err = wait_event_interruptible_timeout(pipe_version_waitqueue,
  437. pipe_version >= 0, 15*HZ);
  438. if (err)
  439. goto out;
  440. if (pipe_version < 0)
  441. warn_gssd();
  442. goto retry;
  443. }
  444. if (IS_ERR(gss_msg)) {
  445. err = PTR_ERR(gss_msg);
  446. goto out;
  447. }
  448. for (;;) {
  449. prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
  450. spin_lock(&inode->i_lock);
  451. if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
  452. break;
  453. }
  454. spin_unlock(&inode->i_lock);
  455. if (signalled()) {
  456. err = -ERESTARTSYS;
  457. goto out_intr;
  458. }
  459. schedule();
  460. }
  461. if (gss_msg->ctx)
  462. gss_cred_set_ctx(cred, gss_msg->ctx);
  463. else
  464. err = gss_msg->msg.errno;
  465. spin_unlock(&inode->i_lock);
  466. out_intr:
  467. finish_wait(&gss_msg->waitqueue, &wait);
  468. gss_release_msg(gss_msg);
  469. out:
  470. dprintk("RPC: gss_create_upcall for uid %u result %d\n",
  471. cred->cr_uid, err);
  472. return err;
  473. }
  474. static ssize_t
  475. gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
  476. char __user *dst, size_t buflen)
  477. {
  478. char *data = (char *)msg->data + msg->copied;
  479. size_t mlen = min(msg->len, buflen);
  480. unsigned long left;
  481. left = copy_to_user(dst, data, mlen);
  482. if (left == mlen) {
  483. msg->errno = -EFAULT;
  484. return -EFAULT;
  485. }
  486. mlen -= left;
  487. msg->copied += mlen;
  488. msg->errno = 0;
  489. return mlen;
  490. }
  491. #define MSG_BUF_MAXSIZE 1024
  492. static ssize_t
  493. gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
  494. {
  495. const void *p, *end;
  496. void *buf;
  497. struct gss_upcall_msg *gss_msg;
  498. struct inode *inode = filp->f_path.dentry->d_inode;
  499. struct gss_cl_ctx *ctx;
  500. uid_t uid;
  501. ssize_t err = -EFBIG;
  502. if (mlen > MSG_BUF_MAXSIZE)
  503. goto out;
  504. err = -ENOMEM;
  505. buf = kmalloc(mlen, GFP_NOFS);
  506. if (!buf)
  507. goto out;
  508. err = -EFAULT;
  509. if (copy_from_user(buf, src, mlen))
  510. goto err;
  511. end = (const void *)((char *)buf + mlen);
  512. p = simple_get_bytes(buf, end, &uid, sizeof(uid));
  513. if (IS_ERR(p)) {
  514. err = PTR_ERR(p);
  515. goto err;
  516. }
  517. err = -ENOMEM;
  518. ctx = gss_alloc_context();
  519. if (ctx == NULL)
  520. goto err;
  521. err = -ENOENT;
  522. /* Find a matching upcall */
  523. spin_lock(&inode->i_lock);
  524. gss_msg = __gss_find_upcall(RPC_I(inode), uid);
  525. if (gss_msg == NULL) {
  526. spin_unlock(&inode->i_lock);
  527. goto err_put_ctx;
  528. }
  529. list_del_init(&gss_msg->list);
  530. spin_unlock(&inode->i_lock);
  531. p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
  532. if (IS_ERR(p)) {
  533. err = PTR_ERR(p);
  534. gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES;
  535. goto err_release_msg;
  536. }
  537. gss_msg->ctx = gss_get_ctx(ctx);
  538. err = mlen;
  539. err_release_msg:
  540. spin_lock(&inode->i_lock);
  541. __gss_unhash_msg(gss_msg);
  542. spin_unlock(&inode->i_lock);
  543. gss_release_msg(gss_msg);
  544. err_put_ctx:
  545. gss_put_ctx(ctx);
  546. err:
  547. kfree(buf);
  548. out:
  549. dprintk("RPC: gss_pipe_downcall returning %Zd\n", err);
  550. return err;
  551. }
  552. static int
  553. gss_pipe_open(struct inode *inode)
  554. {
  555. spin_lock(&pipe_version_lock);
  556. if (pipe_version < 0) {
  557. pipe_version = 0;
  558. rpc_wake_up(&pipe_version_rpc_waitqueue);
  559. wake_up(&pipe_version_waitqueue);
  560. }
  561. atomic_inc(&pipe_users);
  562. spin_unlock(&pipe_version_lock);
  563. return 0;
  564. }
  565. static void
  566. gss_pipe_release(struct inode *inode)
  567. {
  568. struct rpc_inode *rpci = RPC_I(inode);
  569. struct gss_upcall_msg *gss_msg;
  570. spin_lock(&inode->i_lock);
  571. while (!list_empty(&rpci->in_downcall)) {
  572. gss_msg = list_entry(rpci->in_downcall.next,
  573. struct gss_upcall_msg, list);
  574. gss_msg->msg.errno = -EPIPE;
  575. atomic_inc(&gss_msg->count);
  576. __gss_unhash_msg(gss_msg);
  577. spin_unlock(&inode->i_lock);
  578. gss_release_msg(gss_msg);
  579. spin_lock(&inode->i_lock);
  580. }
  581. spin_unlock(&inode->i_lock);
  582. put_pipe_version();
  583. }
  584. static void
  585. gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
  586. {
  587. struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
  588. if (msg->errno < 0) {
  589. dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n",
  590. gss_msg);
  591. atomic_inc(&gss_msg->count);
  592. gss_unhash_msg(gss_msg);
  593. if (msg->errno == -ETIMEDOUT)
  594. warn_gssd();
  595. gss_release_msg(gss_msg);
  596. }
  597. }
  598. /*
  599. * NOTE: we have the opportunity to use different
  600. * parameters based on the input flavor (which must be a pseudoflavor)
  601. */
  602. static struct rpc_auth *
  603. gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
  604. {
  605. struct gss_auth *gss_auth;
  606. struct rpc_auth * auth;
  607. int err = -ENOMEM; /* XXX? */
  608. dprintk("RPC: creating GSS authenticator for client %p\n", clnt);
  609. if (!try_module_get(THIS_MODULE))
  610. return ERR_PTR(err);
  611. if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
  612. goto out_dec;
  613. gss_auth->client = clnt;
  614. err = -EINVAL;
  615. gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
  616. if (!gss_auth->mech) {
  617. printk(KERN_WARNING "%s: Pseudoflavor %d not found!\n",
  618. __func__, flavor);
  619. goto err_free;
  620. }
  621. gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
  622. if (gss_auth->service == 0)
  623. goto err_put_mech;
  624. auth = &gss_auth->rpc_auth;
  625. auth->au_cslack = GSS_CRED_SLACK >> 2;
  626. auth->au_rslack = GSS_VERF_SLACK >> 2;
  627. auth->au_ops = &authgss_ops;
  628. auth->au_flavor = flavor;
  629. atomic_set(&auth->au_count, 1);
  630. kref_init(&gss_auth->kref);
  631. gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name,
  632. clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
  633. if (IS_ERR(gss_auth->dentry)) {
  634. err = PTR_ERR(gss_auth->dentry);
  635. goto err_put_mech;
  636. }
  637. err = rpcauth_init_credcache(auth);
  638. if (err)
  639. goto err_unlink_pipe;
  640. return auth;
  641. err_unlink_pipe:
  642. rpc_unlink(gss_auth->dentry);
  643. err_put_mech:
  644. gss_mech_put(gss_auth->mech);
  645. err_free:
  646. kfree(gss_auth);
  647. out_dec:
  648. module_put(THIS_MODULE);
  649. return ERR_PTR(err);
  650. }
  651. static void
  652. gss_free(struct gss_auth *gss_auth)
  653. {
  654. rpc_unlink(gss_auth->dentry);
  655. gss_mech_put(gss_auth->mech);
  656. kfree(gss_auth);
  657. module_put(THIS_MODULE);
  658. }
  659. static void
  660. gss_free_callback(struct kref *kref)
  661. {
  662. struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
  663. gss_free(gss_auth);
  664. }
  665. static void
  666. gss_destroy(struct rpc_auth *auth)
  667. {
  668. struct gss_auth *gss_auth;
  669. dprintk("RPC: destroying GSS authenticator %p flavor %d\n",
  670. auth, auth->au_flavor);
  671. rpcauth_destroy_credcache(auth);
  672. gss_auth = container_of(auth, struct gss_auth, rpc_auth);
  673. kref_put(&gss_auth->kref, gss_free_callback);
  674. }
  675. /*
  676. * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
  677. * to the server with the GSS control procedure field set to
  678. * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
  679. * all RPCSEC_GSS state associated with that context.
  680. */
  681. static int
  682. gss_destroying_context(struct rpc_cred *cred)
  683. {
  684. struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
  685. struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
  686. struct rpc_task *task;
  687. if (gss_cred->gc_ctx == NULL ||
  688. test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
  689. return 0;
  690. gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
  691. cred->cr_ops = &gss_nullops;
  692. /* Take a reference to ensure the cred will be destroyed either
  693. * by the RPC call or by the put_rpccred() below */
  694. get_rpccred(cred);
  695. task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT);
  696. if (!IS_ERR(task))
  697. rpc_put_task(task);
  698. put_rpccred(cred);
  699. return 1;
  700. }
  701. /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
  702. * to create a new cred or context, so they check that things have been
  703. * allocated before freeing them. */
  704. static void
  705. gss_do_free_ctx(struct gss_cl_ctx *ctx)
  706. {
  707. dprintk("RPC: gss_free_ctx\n");
  708. kfree(ctx->gc_wire_ctx.data);
  709. kfree(ctx);
  710. }
  711. static void
  712. gss_free_ctx_callback(struct rcu_head *head)
  713. {
  714. struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
  715. gss_do_free_ctx(ctx);
  716. }
  717. static void
  718. gss_free_ctx(struct gss_cl_ctx *ctx)
  719. {
  720. struct gss_ctx *gc_gss_ctx;
  721. gc_gss_ctx = rcu_dereference(ctx->gc_gss_ctx);
  722. rcu_assign_pointer(ctx->gc_gss_ctx, NULL);
  723. call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
  724. if (gc_gss_ctx)
  725. gss_delete_sec_context(&gc_gss_ctx);
  726. }
  727. static void
  728. gss_free_cred(struct gss_cred *gss_cred)
  729. {
  730. dprintk("RPC: gss_free_cred %p\n", gss_cred);
  731. kfree(gss_cred);
  732. }
  733. static void
  734. gss_free_cred_callback(struct rcu_head *head)
  735. {
  736. struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
  737. gss_free_cred(gss_cred);
  738. }
  739. static void
  740. gss_destroy_nullcred(struct rpc_cred *cred)
  741. {
  742. struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
  743. struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
  744. struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
  745. rcu_assign_pointer(gss_cred->gc_ctx, NULL);
  746. call_rcu(&cred->cr_rcu, gss_free_cred_callback);
  747. if (ctx)
  748. gss_put_ctx(ctx);
  749. kref_put(&gss_auth->kref, gss_free_callback);
  750. }
  751. static void
  752. gss_destroy_cred(struct rpc_cred *cred)
  753. {
  754. if (gss_destroying_context(cred))
  755. return;
  756. gss_destroy_nullcred(cred);
  757. }
  758. /*
  759. * Lookup RPCSEC_GSS cred for the current process
  760. */
  761. static struct rpc_cred *
  762. gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
  763. {
  764. return rpcauth_lookup_credcache(auth, acred, flags);
  765. }
  766. static struct rpc_cred *
  767. gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
  768. {
  769. struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
  770. struct gss_cred *cred = NULL;
  771. int err = -ENOMEM;
  772. dprintk("RPC: gss_create_cred for uid %d, flavor %d\n",
  773. acred->uid, auth->au_flavor);
  774. if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS)))
  775. goto out_err;
  776. rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
  777. /*
  778. * Note: in order to force a call to call_refresh(), we deliberately
  779. * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
  780. */
  781. cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
  782. cred->gc_service = gss_auth->service;
  783. cred->gc_machine_cred = acred->machine_cred;
  784. kref_get(&gss_auth->kref);
  785. return &cred->gc_base;
  786. out_err:
  787. dprintk("RPC: gss_create_cred failed with error %d\n", err);
  788. return ERR_PTR(err);
  789. }
  790. static int
  791. gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
  792. {
  793. struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
  794. struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
  795. int err;
  796. do {
  797. err = gss_create_upcall(gss_auth, gss_cred);
  798. } while (err == -EAGAIN);
  799. return err;
  800. }
  801. static int
  802. gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
  803. {
  804. struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
  805. if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
  806. goto out;
  807. /* Don't match with creds that have expired. */
  808. if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
  809. return 0;
  810. if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
  811. return 0;
  812. out:
  813. if (acred->machine_cred != gss_cred->gc_machine_cred)
  814. return 0;
  815. return (rc->cr_uid == acred->uid);
  816. }
  817. /*
  818. * Marshal credentials.
  819. * Maybe we should keep a cached credential for performance reasons.
  820. */
  821. static __be32 *
  822. gss_marshal(struct rpc_task *task, __be32 *p)
  823. {
  824. struct rpc_cred *cred = task->tk_msg.rpc_cred;
  825. struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
  826. gc_base);
  827. struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
  828. __be32 *cred_len;
  829. struct rpc_rqst *req = task->tk_rqstp;
  830. u32 maj_stat = 0;
  831. struct xdr_netobj mic;
  832. struct kvec iov;
  833. struct xdr_buf verf_buf;
  834. dprintk("RPC: %5u gss_marshal\n", task->tk_pid);
  835. *p++ = htonl(RPC_AUTH_GSS);
  836. cred_len = p++;
  837. spin_lock(&ctx->gc_seq_lock);
  838. req->rq_seqno = ctx->gc_seq++;
  839. spin_unlock(&ctx->gc_seq_lock);
  840. *p++ = htonl((u32) RPC_GSS_VERSION);
  841. *p++ = htonl((u32) ctx->gc_proc);
  842. *p++ = htonl((u32) req->rq_seqno);
  843. *p++ = htonl((u32) gss_cred->gc_service);
  844. p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
  845. *cred_len = htonl((p - (cred_len + 1)) << 2);
  846. /* We compute the checksum for the verifier over the xdr-encoded bytes
  847. * starting with the xid and ending at the end of the credential: */
  848. iov.iov_base = xprt_skip_transport_header(task->tk_xprt,
  849. req->rq_snd_buf.head[0].iov_base);
  850. iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
  851. xdr_buf_from_iov(&iov, &verf_buf);
  852. /* set verifier flavor*/
  853. *p++ = htonl(RPC_AUTH_GSS);
  854. mic.data = (u8 *)(p + 1);
  855. maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
  856. if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
  857. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  858. } else if (maj_stat != 0) {
  859. printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
  860. goto out_put_ctx;
  861. }
  862. p = xdr_encode_opaque(p, NULL, mic.len);
  863. gss_put_ctx(ctx);
  864. return p;
  865. out_put_ctx:
  866. gss_put_ctx(ctx);
  867. return NULL;
  868. }
  869. static int gss_renew_cred(struct rpc_task *task)
  870. {
  871. struct rpc_cred *oldcred = task->tk_msg.rpc_cred;
  872. struct gss_cred *gss_cred = container_of(oldcred,
  873. struct gss_cred,
  874. gc_base);
  875. struct rpc_auth *auth = oldcred->cr_auth;
  876. struct auth_cred acred = {
  877. .uid = oldcred->cr_uid,
  878. .machine_cred = gss_cred->gc_machine_cred,
  879. };
  880. struct rpc_cred *new;
  881. new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
  882. if (IS_ERR(new))
  883. return PTR_ERR(new);
  884. task->tk_msg.rpc_cred = new;
  885. put_rpccred(oldcred);
  886. return 0;
  887. }
  888. /*
  889. * Refresh credentials. XXX - finish
  890. */
  891. static int
  892. gss_refresh(struct rpc_task *task)
  893. {
  894. struct rpc_cred *cred = task->tk_msg.rpc_cred;
  895. int ret = 0;
  896. if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
  897. !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
  898. ret = gss_renew_cred(task);
  899. if (ret < 0)
  900. goto out;
  901. cred = task->tk_msg.rpc_cred;
  902. }
  903. if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
  904. ret = gss_refresh_upcall(task);
  905. out:
  906. return ret;
  907. }
  908. /* Dummy refresh routine: used only when destroying the context */
  909. static int
  910. gss_refresh_null(struct rpc_task *task)
  911. {
  912. return -EACCES;
  913. }
  914. static __be32 *
  915. gss_validate(struct rpc_task *task, __be32 *p)
  916. {
  917. struct rpc_cred *cred = task->tk_msg.rpc_cred;
  918. struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
  919. __be32 seq;
  920. struct kvec iov;
  921. struct xdr_buf verf_buf;
  922. struct xdr_netobj mic;
  923. u32 flav,len;
  924. u32 maj_stat;
  925. dprintk("RPC: %5u gss_validate\n", task->tk_pid);
  926. flav = ntohl(*p++);
  927. if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
  928. goto out_bad;
  929. if (flav != RPC_AUTH_GSS)
  930. goto out_bad;
  931. seq = htonl(task->tk_rqstp->rq_seqno);
  932. iov.iov_base = &seq;
  933. iov.iov_len = sizeof(seq);
  934. xdr_buf_from_iov(&iov, &verf_buf);
  935. mic.data = (u8 *)p;
  936. mic.len = len;
  937. maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
  938. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  939. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  940. if (maj_stat) {
  941. dprintk("RPC: %5u gss_validate: gss_verify_mic returned "
  942. "error 0x%08x\n", task->tk_pid, maj_stat);
  943. goto out_bad;
  944. }
  945. /* We leave it to unwrap to calculate au_rslack. For now we just
  946. * calculate the length of the verifier: */
  947. cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
  948. gss_put_ctx(ctx);
  949. dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n",
  950. task->tk_pid);
  951. return p + XDR_QUADLEN(len);
  952. out_bad:
  953. gss_put_ctx(ctx);
  954. dprintk("RPC: %5u gss_validate failed.\n", task->tk_pid);
  955. return NULL;
  956. }
  957. static inline int
  958. gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
  959. kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
  960. {
  961. struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
  962. struct xdr_buf integ_buf;
  963. __be32 *integ_len = NULL;
  964. struct xdr_netobj mic;
  965. u32 offset;
  966. __be32 *q;
  967. struct kvec *iov;
  968. u32 maj_stat = 0;
  969. int status = -EIO;
  970. integ_len = p++;
  971. offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
  972. *p++ = htonl(rqstp->rq_seqno);
  973. status = encode(rqstp, p, obj);
  974. if (status)
  975. return status;
  976. if (xdr_buf_subsegment(snd_buf, &integ_buf,
  977. offset, snd_buf->len - offset))
  978. return status;
  979. *integ_len = htonl(integ_buf.len);
  980. /* guess whether we're in the head or the tail: */
  981. if (snd_buf->page_len || snd_buf->tail[0].iov_len)
  982. iov = snd_buf->tail;
  983. else
  984. iov = snd_buf->head;
  985. p = iov->iov_base + iov->iov_len;
  986. mic.data = (u8 *)(p + 1);
  987. maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
  988. status = -EIO; /* XXX? */
  989. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  990. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  991. else if (maj_stat)
  992. return status;
  993. q = xdr_encode_opaque(p, NULL, mic.len);
  994. offset = (u8 *)q - (u8 *)p;
  995. iov->iov_len += offset;
  996. snd_buf->len += offset;
  997. return 0;
  998. }
  999. static void
  1000. priv_release_snd_buf(struct rpc_rqst *rqstp)
  1001. {
  1002. int i;
  1003. for (i=0; i < rqstp->rq_enc_pages_num; i++)
  1004. __free_page(rqstp->rq_enc_pages[i]);
  1005. kfree(rqstp->rq_enc_pages);
  1006. }
  1007. static int
  1008. alloc_enc_pages(struct rpc_rqst *rqstp)
  1009. {
  1010. struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
  1011. int first, last, i;
  1012. if (snd_buf->page_len == 0) {
  1013. rqstp->rq_enc_pages_num = 0;
  1014. return 0;
  1015. }
  1016. first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
  1017. last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
  1018. rqstp->rq_enc_pages_num = last - first + 1 + 1;
  1019. rqstp->rq_enc_pages
  1020. = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
  1021. GFP_NOFS);
  1022. if (!rqstp->rq_enc_pages)
  1023. goto out;
  1024. for (i=0; i < rqstp->rq_enc_pages_num; i++) {
  1025. rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
  1026. if (rqstp->rq_enc_pages[i] == NULL)
  1027. goto out_free;
  1028. }
  1029. rqstp->rq_release_snd_buf = priv_release_snd_buf;
  1030. return 0;
  1031. out_free:
  1032. for (i--; i >= 0; i--) {
  1033. __free_page(rqstp->rq_enc_pages[i]);
  1034. }
  1035. out:
  1036. return -EAGAIN;
  1037. }
  1038. static inline int
  1039. gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
  1040. kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
  1041. {
  1042. struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
  1043. u32 offset;
  1044. u32 maj_stat;
  1045. int status;
  1046. __be32 *opaque_len;
  1047. struct page **inpages;
  1048. int first;
  1049. int pad;
  1050. struct kvec *iov;
  1051. char *tmp;
  1052. opaque_len = p++;
  1053. offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
  1054. *p++ = htonl(rqstp->rq_seqno);
  1055. status = encode(rqstp, p, obj);
  1056. if (status)
  1057. return status;
  1058. status = alloc_enc_pages(rqstp);
  1059. if (status)
  1060. return status;
  1061. first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
  1062. inpages = snd_buf->pages + first;
  1063. snd_buf->pages = rqstp->rq_enc_pages;
  1064. snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
  1065. /* Give the tail its own page, in case we need extra space in the
  1066. * head when wrapping: */
  1067. if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
  1068. tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
  1069. memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
  1070. snd_buf->tail[0].iov_base = tmp;
  1071. }
  1072. maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
  1073. /* RPC_SLACK_SPACE should prevent this ever happening: */
  1074. BUG_ON(snd_buf->len > snd_buf->buflen);
  1075. status = -EIO;
  1076. /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
  1077. * done anyway, so it's safe to put the request on the wire: */
  1078. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  1079. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  1080. else if (maj_stat)
  1081. return status;
  1082. *opaque_len = htonl(snd_buf->len - offset);
  1083. /* guess whether we're in the head or the tail: */
  1084. if (snd_buf->page_len || snd_buf->tail[0].iov_len)
  1085. iov = snd_buf->tail;
  1086. else
  1087. iov = snd_buf->head;
  1088. p = iov->iov_base + iov->iov_len;
  1089. pad = 3 - ((snd_buf->len - offset - 1) & 3);
  1090. memset(p, 0, pad);
  1091. iov->iov_len += pad;
  1092. snd_buf->len += pad;
  1093. return 0;
  1094. }
  1095. static int
  1096. gss_wrap_req(struct rpc_task *task,
  1097. kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
  1098. {
  1099. struct rpc_cred *cred = task->tk_msg.rpc_cred;
  1100. struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
  1101. gc_base);
  1102. struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
  1103. int status = -EIO;
  1104. dprintk("RPC: %5u gss_wrap_req\n", task->tk_pid);
  1105. if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
  1106. /* The spec seems a little ambiguous here, but I think that not
  1107. * wrapping context destruction requests makes the most sense.
  1108. */
  1109. status = encode(rqstp, p, obj);
  1110. goto out;
  1111. }
  1112. switch (gss_cred->gc_service) {
  1113. case RPC_GSS_SVC_NONE:
  1114. status = encode(rqstp, p, obj);
  1115. break;
  1116. case RPC_GSS_SVC_INTEGRITY:
  1117. status = gss_wrap_req_integ(cred, ctx, encode,
  1118. rqstp, p, obj);
  1119. break;
  1120. case RPC_GSS_SVC_PRIVACY:
  1121. status = gss_wrap_req_priv(cred, ctx, encode,
  1122. rqstp, p, obj);
  1123. break;
  1124. }
  1125. out:
  1126. gss_put_ctx(ctx);
  1127. dprintk("RPC: %5u gss_wrap_req returning %d\n", task->tk_pid, status);
  1128. return status;
  1129. }
  1130. static inline int
  1131. gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
  1132. struct rpc_rqst *rqstp, __be32 **p)
  1133. {
  1134. struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
  1135. struct xdr_buf integ_buf;
  1136. struct xdr_netobj mic;
  1137. u32 data_offset, mic_offset;
  1138. u32 integ_len;
  1139. u32 maj_stat;
  1140. int status = -EIO;
  1141. integ_len = ntohl(*(*p)++);
  1142. if (integ_len & 3)
  1143. return status;
  1144. data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
  1145. mic_offset = integ_len + data_offset;
  1146. if (mic_offset > rcv_buf->len)
  1147. return status;
  1148. if (ntohl(*(*p)++) != rqstp->rq_seqno)
  1149. return status;
  1150. if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
  1151. mic_offset - data_offset))
  1152. return status;
  1153. if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
  1154. return status;
  1155. maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
  1156. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  1157. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  1158. if (maj_stat != GSS_S_COMPLETE)
  1159. return status;
  1160. return 0;
  1161. }
  1162. static inline int
  1163. gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
  1164. struct rpc_rqst *rqstp, __be32 **p)
  1165. {
  1166. struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
  1167. u32 offset;
  1168. u32 opaque_len;
  1169. u32 maj_stat;
  1170. int status = -EIO;
  1171. opaque_len = ntohl(*(*p)++);
  1172. offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
  1173. if (offset + opaque_len > rcv_buf->len)
  1174. return status;
  1175. /* remove padding: */
  1176. rcv_buf->len = offset + opaque_len;
  1177. maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
  1178. if (maj_stat == GSS_S_CONTEXT_EXPIRED)
  1179. clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
  1180. if (maj_stat != GSS_S_COMPLETE)
  1181. return status;
  1182. if (ntohl(*(*p)++) != rqstp->rq_seqno)
  1183. return status;
  1184. return 0;
  1185. }
  1186. static int
  1187. gss_unwrap_resp(struct rpc_task *task,
  1188. kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
  1189. {
  1190. struct rpc_cred *cred = task->tk_msg.rpc_cred;
  1191. struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
  1192. gc_base);
  1193. struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
  1194. __be32 *savedp = p;
  1195. struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
  1196. int savedlen = head->iov_len;
  1197. int status = -EIO;
  1198. if (ctx->gc_proc != RPC_GSS_PROC_DATA)
  1199. goto out_decode;
  1200. switch (gss_cred->gc_service) {
  1201. case RPC_GSS_SVC_NONE:
  1202. break;
  1203. case RPC_GSS_SVC_INTEGRITY:
  1204. status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
  1205. if (status)
  1206. goto out;
  1207. break;
  1208. case RPC_GSS_SVC_PRIVACY:
  1209. status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
  1210. if (status)
  1211. goto out;
  1212. break;
  1213. }
  1214. /* take into account extra slack for integrity and privacy cases: */
  1215. cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
  1216. + (savedlen - head->iov_len);
  1217. out_decode:
  1218. status = decode(rqstp, p, obj);
  1219. out:
  1220. gss_put_ctx(ctx);
  1221. dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
  1222. status);
  1223. return status;
  1224. }
  1225. static const struct rpc_authops authgss_ops = {
  1226. .owner = THIS_MODULE,
  1227. .au_flavor = RPC_AUTH_GSS,
  1228. .au_name = "RPCSEC_GSS",
  1229. .create = gss_create,
  1230. .destroy = gss_destroy,
  1231. .lookup_cred = gss_lookup_cred,
  1232. .crcreate = gss_create_cred
  1233. };
  1234. static const struct rpc_credops gss_credops = {
  1235. .cr_name = "AUTH_GSS",
  1236. .crdestroy = gss_destroy_cred,
  1237. .cr_init = gss_cred_init,
  1238. .crbind = rpcauth_generic_bind_cred,
  1239. .crmatch = gss_match,
  1240. .crmarshal = gss_marshal,
  1241. .crrefresh = gss_refresh,
  1242. .crvalidate = gss_validate,
  1243. .crwrap_req = gss_wrap_req,
  1244. .crunwrap_resp = gss_unwrap_resp,
  1245. };
  1246. static const struct rpc_credops gss_nullops = {
  1247. .cr_name = "AUTH_GSS",
  1248. .crdestroy = gss_destroy_nullcred,
  1249. .crbind = rpcauth_generic_bind_cred,
  1250. .crmatch = gss_match,
  1251. .crmarshal = gss_marshal,
  1252. .crrefresh = gss_refresh_null,
  1253. .crvalidate = gss_validate,
  1254. .crwrap_req = gss_wrap_req,
  1255. .crunwrap_resp = gss_unwrap_resp,
  1256. };
  1257. static struct rpc_pipe_ops gss_upcall_ops = {
  1258. .upcall = gss_pipe_upcall,
  1259. .downcall = gss_pipe_downcall,
  1260. .destroy_msg = gss_pipe_destroy_msg,
  1261. .open_pipe = gss_pipe_open,
  1262. .release_pipe = gss_pipe_release,
  1263. };
  1264. /*
  1265. * Initialize RPCSEC_GSS module
  1266. */
  1267. static int __init init_rpcsec_gss(void)
  1268. {
  1269. int err = 0;
  1270. err = rpcauth_register(&authgss_ops);
  1271. if (err)
  1272. goto out;
  1273. err = gss_svc_init();
  1274. if (err)
  1275. goto out_unregister;
  1276. rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
  1277. return 0;
  1278. out_unregister:
  1279. rpcauth_unregister(&authgss_ops);
  1280. out:
  1281. return err;
  1282. }
  1283. static void __exit exit_rpcsec_gss(void)
  1284. {
  1285. gss_svc_shutdown();
  1286. rpcauth_unregister(&authgss_ops);
  1287. }
  1288. MODULE_LICENSE("GPL");
  1289. module_init(init_rpcsec_gss)
  1290. module_exit(exit_rpcsec_gss)