|
@@ -832,6 +832,74 @@ out:
|
|
return stat;
|
|
return stat;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline int
|
|
|
|
+total_buf_len(struct xdr_buf *buf)
|
|
|
|
+{
|
|
|
|
+ return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+fix_priv_head(struct xdr_buf *buf, int pad)
|
|
|
|
+{
|
|
|
|
+ if (buf->page_len == 0) {
|
|
|
|
+ /* We need to adjust head and buf->len in tandem in this
|
|
|
|
+ * case to make svc_defer() work--it finds the original
|
|
|
|
+ * buffer start using buf->len - buf->head[0].iov_len. */
|
|
|
|
+ buf->head[0].iov_len -= pad;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
|
|
|
|
+{
|
|
|
|
+ u32 priv_len, maj_stat;
|
|
|
|
+ int pad, saved_len, remaining_len, offset;
|
|
|
|
+
|
|
|
|
+ rqstp->rq_sendfile_ok = 0;
|
|
|
|
+
|
|
|
|
+ priv_len = ntohl(svc_getu32(&buf->head[0]));
|
|
|
|
+ if (rqstp->rq_deferred) {
|
|
|
|
+ /* Already decrypted last time through! The sequence number
|
|
|
|
+ * check at out_seq is unnecessary but harmless: */
|
|
|
|
+ goto out_seq;
|
|
|
|
+ }
|
|
|
|
+ /* buf->len is the number of bytes from the original start of the
|
|
|
|
+ * request to the end, where head[0].iov_len is just the bytes
|
|
|
|
+ * not yet read from the head, so these two values are different: */
|
|
|
|
+ remaining_len = total_buf_len(buf);
|
|
|
|
+ if (priv_len > remaining_len)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ pad = remaining_len - priv_len;
|
|
|
|
+ buf->len -= pad;
|
|
|
|
+ fix_priv_head(buf, pad);
|
|
|
|
+
|
|
|
|
+ /* Maybe it would be better to give gss_unwrap a length parameter: */
|
|
|
|
+ saved_len = buf->len;
|
|
|
|
+ buf->len = priv_len;
|
|
|
|
+ maj_stat = gss_unwrap(ctx, 0, buf);
|
|
|
|
+ pad = priv_len - buf->len;
|
|
|
|
+ buf->len = saved_len;
|
|
|
|
+ buf->len -= pad;
|
|
|
|
+ /* The upper layers assume the buffer is aligned on 4-byte boundaries.
|
|
|
|
+ * In the krb5p case, at least, the data ends up offset, so we need to
|
|
|
|
+ * move it around. */
|
|
|
|
+ /* XXX: This is very inefficient. It would be better to either do
|
|
|
|
+ * this while we encrypt, or maybe in the receive code, if we can peak
|
|
|
|
+ * ahead and work out the service and mechanism there. */
|
|
|
|
+ offset = buf->head[0].iov_len % 4;
|
|
|
|
+ if (offset) {
|
|
|
|
+ buf->buflen = RPCSVC_MAXPAYLOAD;
|
|
|
|
+ xdr_shift_buf(buf, offset);
|
|
|
|
+ fix_priv_head(buf, pad);
|
|
|
|
+ }
|
|
|
|
+ if (maj_stat != GSS_S_COMPLETE)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+out_seq:
|
|
|
|
+ if (ntohl(svc_getu32(&buf->head[0])) != seq)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
struct gss_svc_data {
|
|
struct gss_svc_data {
|
|
/* decoded gss client cred: */
|
|
/* decoded gss client cred: */
|
|
struct rpc_gss_wire_cred clcred;
|
|
struct rpc_gss_wire_cred clcred;
|
|
@@ -1047,7 +1115,14 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
|
|
svc_putu32(resv, 0);
|
|
svc_putu32(resv, 0);
|
|
break;
|
|
break;
|
|
case RPC_GSS_SVC_PRIVACY:
|
|
case RPC_GSS_SVC_PRIVACY:
|
|
- /* currently unsupported */
|
|
|
|
|
|
+ if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
|
|
|
|
+ gc->gc_seq, rsci->mechctx))
|
|
|
|
+ goto auth_err;
|
|
|
|
+ /* placeholders for length and seq. number: */
|
|
|
|
+ svcdata->body_start = resv->iov_base + resv->iov_len;
|
|
|
|
+ svc_putu32(resv, 0);
|
|
|
|
+ svc_putu32(resv, 0);
|
|
|
|
+ break;
|
|
default:
|
|
default:
|
|
goto auth_err;
|
|
goto auth_err;
|
|
}
|
|
}
|
|
@@ -1089,9 +1164,8 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
|
|
gsd->body_start = NULL;
|
|
gsd->body_start = NULL;
|
|
/* move accept_stat to right place: */
|
|
/* move accept_stat to right place: */
|
|
memcpy(p, p + 2, 4);
|
|
memcpy(p, p + 2, 4);
|
|
- /* don't wrap in failure case: */
|
|
|
|
- /* Note: counting on not getting here if call was not even
|
|
|
|
- * accepted! */
|
|
|
|
|
|
+ /* Don't wrap in failure case: */
|
|
|
|
+ /* Counting on not getting here if call was not even accepted! */
|
|
if (*p != rpc_success) {
|
|
if (*p != rpc_success) {
|
|
resbuf->head[0].iov_len -= 2 * 4;
|
|
resbuf->head[0].iov_len -= 2 * 4;
|
|
goto out;
|
|
goto out;
|
|
@@ -1138,6 +1212,65 @@ out_err:
|
|
return stat;
|
|
return stat;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline int
|
|
|
|
+svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
|
|
|
|
+{
|
|
|
|
+ struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
|
|
|
|
+ struct rpc_gss_wire_cred *gc = &gsd->clcred;
|
|
|
|
+ struct xdr_buf *resbuf = &rqstp->rq_res;
|
|
|
|
+ struct page **inpages = NULL;
|
|
|
|
+ u32 *p;
|
|
|
|
+ int offset, *len;
|
|
|
|
+ int pad;
|
|
|
|
+
|
|
|
|
+ p = gsd->body_start;
|
|
|
|
+ gsd->body_start = NULL;
|
|
|
|
+ /* move accept_stat to right place: */
|
|
|
|
+ memcpy(p, p + 2, 4);
|
|
|
|
+ /* Don't wrap in failure case: */
|
|
|
|
+ /* Counting on not getting here if call was not even accepted! */
|
|
|
|
+ if (*p != rpc_success) {
|
|
|
|
+ resbuf->head[0].iov_len -= 2 * 4;
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ p++;
|
|
|
|
+ len = p++;
|
|
|
|
+ offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
|
|
|
|
+ *p++ = htonl(gc->gc_seq);
|
|
|
|
+ inpages = resbuf->pages;
|
|
|
|
+ /* XXX: Would be better to write some xdr helper functions for
|
|
|
|
+ * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
|
|
|
|
+ if (resbuf->tail[0].iov_base && rqstp->rq_restailpage == 0) {
|
|
|
|
+ BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
|
|
|
|
+ + PAGE_SIZE);
|
|
|
|
+ BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
|
|
|
|
+ if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len
|
|
|
|
+ + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ memmove(resbuf->tail[0].iov_base + RPC_MAX_AUTH_SIZE,
|
|
|
|
+ resbuf->tail[0].iov_base,
|
|
|
|
+ resbuf->tail[0].iov_len);
|
|
|
|
+ resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
|
|
|
|
+ }
|
|
|
|
+ if (resbuf->tail[0].iov_base == NULL) {
|
|
|
|
+ if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ resbuf->tail[0].iov_base = resbuf->head[0].iov_base
|
|
|
|
+ + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
|
|
|
|
+ resbuf->tail[0].iov_len = 0;
|
|
|
|
+ rqstp->rq_restailpage = 0;
|
|
|
|
+ }
|
|
|
|
+ if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ *len = htonl(resbuf->len - offset);
|
|
|
|
+ pad = 3 - ((resbuf->len - offset - 1)&3);
|
|
|
|
+ p = (u32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len);
|
|
|
|
+ memset(p, 0, pad);
|
|
|
|
+ resbuf->tail[0].iov_len += pad;
|
|
|
|
+ resbuf->len += pad;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
static int
|
|
static int
|
|
svcauth_gss_release(struct svc_rqst *rqstp)
|
|
svcauth_gss_release(struct svc_rqst *rqstp)
|
|
{
|
|
{
|
|
@@ -1152,15 +1285,22 @@ svcauth_gss_release(struct svc_rqst *rqstp)
|
|
if (gsd->body_start == NULL)
|
|
if (gsd->body_start == NULL)
|
|
goto out;
|
|
goto out;
|
|
/* normally not set till svc_send, but we need it here: */
|
|
/* normally not set till svc_send, but we need it here: */
|
|
- resbuf->len = resbuf->head[0].iov_len
|
|
|
|
- + resbuf->page_len + resbuf->tail[0].iov_len;
|
|
|
|
|
|
+ /* XXX: what for? Do we mess it up the moment we call svc_putu32
|
|
|
|
+ * or whatever? */
|
|
|
|
+ resbuf->len = total_buf_len(resbuf);
|
|
switch (gc->gc_svc) {
|
|
switch (gc->gc_svc) {
|
|
case RPC_GSS_SVC_NONE:
|
|
case RPC_GSS_SVC_NONE:
|
|
break;
|
|
break;
|
|
case RPC_GSS_SVC_INTEGRITY:
|
|
case RPC_GSS_SVC_INTEGRITY:
|
|
- svcauth_gss_wrap_resp_integ(rqstp);
|
|
|
|
|
|
+ stat = svcauth_gss_wrap_resp_integ(rqstp);
|
|
|
|
+ if (stat)
|
|
|
|
+ goto out_err;
|
|
break;
|
|
break;
|
|
case RPC_GSS_SVC_PRIVACY:
|
|
case RPC_GSS_SVC_PRIVACY:
|
|
|
|
+ stat = svcauth_gss_wrap_resp_priv(rqstp);
|
|
|
|
+ if (stat)
|
|
|
|
+ goto out_err;
|
|
|
|
+ break;
|
|
default:
|
|
default:
|
|
goto out_err;
|
|
goto out_err;
|
|
}
|
|
}
|