|
@@ -1,27 +1,118 @@
|
|
|
|
+#include <crypto/aead.h>
|
|
|
|
+#include <crypto/authenc.h>
|
|
#include <linux/err.h>
|
|
#include <linux/err.h>
|
|
#include <linux/module.h>
|
|
#include <linux/module.h>
|
|
#include <net/ip.h>
|
|
#include <net/ip.h>
|
|
#include <net/xfrm.h>
|
|
#include <net/xfrm.h>
|
|
#include <net/esp.h>
|
|
#include <net/esp.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/scatterlist.h>
|
|
-#include <linux/crypto.h>
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/pfkeyv2.h>
|
|
#include <linux/pfkeyv2.h>
|
|
-#include <linux/random.h>
|
|
|
|
|
|
+#include <linux/rtnetlink.h>
|
|
|
|
+#include <linux/slab.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/in6.h>
|
|
#include <linux/in6.h>
|
|
#include <net/icmp.h>
|
|
#include <net/icmp.h>
|
|
#include <net/protocol.h>
|
|
#include <net/protocol.h>
|
|
#include <net/udp.h>
|
|
#include <net/udp.h>
|
|
|
|
|
|
|
|
+struct esp_skb_cb {
|
|
|
|
+ struct xfrm_skb_cb xfrm;
|
|
|
|
+ void *tmp;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Allocate an AEAD request structure with extra space for SG and IV.
|
|
|
|
+ *
|
|
|
|
+ * For alignment considerations the IV is placed at the front, followed
|
|
|
|
+ * by the request and finally the SG list.
|
|
|
|
+ *
|
|
|
|
+ * TODO: Use spare space in skb for this where possible.
|
|
|
|
+ */
|
|
|
|
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
|
|
|
|
+{
|
|
|
|
+ unsigned int len;
|
|
|
|
+
|
|
|
|
+ len = crypto_aead_ivsize(aead);
|
|
|
|
+ if (len) {
|
|
|
|
+ len += crypto_aead_alignmask(aead) &
|
|
|
|
+ ~(crypto_tfm_ctx_alignment() - 1);
|
|
|
|
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
|
|
|
|
+ len = ALIGN(len, __alignof__(struct scatterlist));
|
|
|
|
+
|
|
|
|
+ len += sizeof(struct scatterlist) * nfrags;
|
|
|
|
+
|
|
|
|
+ return kmalloc(len, GFP_ATOMIC);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
|
|
|
|
+{
|
|
|
|
+ return crypto_aead_ivsize(aead) ?
|
|
|
|
+ PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline struct aead_givcrypt_request *esp_tmp_givreq(
|
|
|
|
+ struct crypto_aead *aead, u8 *iv)
|
|
|
|
+{
|
|
|
|
+ struct aead_givcrypt_request *req;
|
|
|
|
+
|
|
|
|
+ req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
|
|
|
|
+ crypto_tfm_ctx_alignment());
|
|
|
|
+ aead_givcrypt_set_tfm(req, aead);
|
|
|
|
+ return req;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
|
|
|
|
+{
|
|
|
|
+ struct aead_request *req;
|
|
|
|
+
|
|
|
|
+ req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
|
|
|
|
+ crypto_tfm_ctx_alignment());
|
|
|
|
+ aead_request_set_tfm(req, aead);
|
|
|
|
+ return req;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
|
|
|
|
+ struct aead_request *req)
|
|
|
|
+{
|
|
|
|
+ return (void *)ALIGN((unsigned long)(req + 1) +
|
|
|
|
+ crypto_aead_reqsize(aead),
|
|
|
|
+ __alignof__(struct scatterlist));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline struct scatterlist *esp_givreq_sg(
|
|
|
|
+ struct crypto_aead *aead, struct aead_givcrypt_request *req)
|
|
|
|
+{
|
|
|
|
+ return (void *)ALIGN((unsigned long)(req + 1) +
|
|
|
|
+ crypto_aead_reqsize(aead),
|
|
|
|
+ __alignof__(struct scatterlist));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void esp_output_done(struct crypto_async_request *base, int err)
|
|
|
|
+{
|
|
|
|
+ struct sk_buff *skb = base->data;
|
|
|
|
+
|
|
|
|
+ kfree(ESP_SKB_CB(skb)->tmp);
|
|
|
|
+ xfrm_output_resume(skb, err);
|
|
|
|
+}
|
|
|
|
+
|
|
static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
|
|
static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
|
|
{
|
|
{
|
|
int err;
|
|
int err;
|
|
struct ip_esp_hdr *esph;
|
|
struct ip_esp_hdr *esph;
|
|
- struct crypto_blkcipher *tfm;
|
|
|
|
- struct blkcipher_desc desc;
|
|
|
|
|
|
+ struct crypto_aead *aead;
|
|
|
|
+ struct aead_givcrypt_request *req;
|
|
|
|
+ struct scatterlist *sg;
|
|
|
|
+ struct scatterlist *asg;
|
|
struct esp_data *esp;
|
|
struct esp_data *esp;
|
|
struct sk_buff *trailer;
|
|
struct sk_buff *trailer;
|
|
|
|
+ void *tmp;
|
|
|
|
+ u8 *iv;
|
|
u8 *tail;
|
|
u8 *tail;
|
|
int blksize;
|
|
int blksize;
|
|
int clen;
|
|
int clen;
|
|
@@ -36,18 +127,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
|
|
clen = skb->len;
|
|
clen = skb->len;
|
|
|
|
|
|
esp = x->data;
|
|
esp = x->data;
|
|
- alen = esp->auth.icv_trunc_len;
|
|
|
|
- tfm = esp->conf.tfm;
|
|
|
|
- desc.tfm = tfm;
|
|
|
|
- desc.flags = 0;
|
|
|
|
- blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
|
|
|
|
|
|
+ aead = esp->aead;
|
|
|
|
+ alen = crypto_aead_authsize(aead);
|
|
|
|
+
|
|
|
|
+ blksize = ALIGN(crypto_aead_blocksize(aead), 4);
|
|
clen = ALIGN(clen + 2, blksize);
|
|
clen = ALIGN(clen + 2, blksize);
|
|
- if (esp->conf.padlen)
|
|
|
|
- clen = ALIGN(clen, esp->conf.padlen);
|
|
|
|
|
|
+ if (esp->padlen)
|
|
|
|
+ clen = ALIGN(clen, esp->padlen);
|
|
|
|
+
|
|
|
|
+ if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
|
|
|
|
+ goto error;
|
|
|
|
+ nfrags = err;
|
|
|
|
|
|
- if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
|
|
|
|
|
|
+ tmp = esp_alloc_tmp(aead, nfrags + 1);
|
|
|
|
+ if (!tmp)
|
|
goto error;
|
|
goto error;
|
|
|
|
|
|
|
|
+ iv = esp_tmp_iv(aead, tmp);
|
|
|
|
+ req = esp_tmp_givreq(aead, iv);
|
|
|
|
+ asg = esp_givreq_sg(aead, req);
|
|
|
|
+ sg = asg + 1;
|
|
|
|
+
|
|
/* Fill padding... */
|
|
/* Fill padding... */
|
|
tail = skb_tail_pointer(trailer);
|
|
tail = skb_tail_pointer(trailer);
|
|
do {
|
|
do {
|
|
@@ -56,28 +156,34 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
|
|
tail[i] = i + 1;
|
|
tail[i] = i + 1;
|
|
} while (0);
|
|
} while (0);
|
|
tail[clen - skb->len - 2] = (clen - skb->len) - 2;
|
|
tail[clen - skb->len - 2] = (clen - skb->len) - 2;
|
|
- pskb_put(skb, trailer, clen - skb->len);
|
|
|
|
|
|
+ tail[clen - skb->len - 1] = *skb_mac_header(skb);
|
|
|
|
+ pskb_put(skb, trailer, clen - skb->len + alen);
|
|
|
|
|
|
skb_push(skb, -skb_network_offset(skb));
|
|
skb_push(skb, -skb_network_offset(skb));
|
|
esph = ip_esp_hdr(skb);
|
|
esph = ip_esp_hdr(skb);
|
|
- *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb);
|
|
|
|
*skb_mac_header(skb) = IPPROTO_ESP;
|
|
*skb_mac_header(skb) = IPPROTO_ESP;
|
|
|
|
|
|
- spin_lock_bh(&x->lock);
|
|
|
|
-
|
|
|
|
/* this is non-NULL only with UDP Encapsulation */
|
|
/* this is non-NULL only with UDP Encapsulation */
|
|
if (x->encap) {
|
|
if (x->encap) {
|
|
struct xfrm_encap_tmpl *encap = x->encap;
|
|
struct xfrm_encap_tmpl *encap = x->encap;
|
|
struct udphdr *uh;
|
|
struct udphdr *uh;
|
|
__be32 *udpdata32;
|
|
__be32 *udpdata32;
|
|
|
|
+ unsigned int sport, dport;
|
|
|
|
+ int encap_type;
|
|
|
|
+
|
|
|
|
+ spin_lock_bh(&x->lock);
|
|
|
|
+ sport = encap->encap_sport;
|
|
|
|
+ dport = encap->encap_dport;
|
|
|
|
+ encap_type = encap->encap_type;
|
|
|
|
+ spin_unlock_bh(&x->lock);
|
|
|
|
|
|
uh = (struct udphdr *)esph;
|
|
uh = (struct udphdr *)esph;
|
|
- uh->source = encap->encap_sport;
|
|
|
|
- uh->dest = encap->encap_dport;
|
|
|
|
- uh->len = htons(skb->len + alen - skb_transport_offset(skb));
|
|
|
|
|
|
+ uh->source = sport;
|
|
|
|
+ uh->dest = dport;
|
|
|
|
+ uh->len = htons(skb->len - skb_transport_offset(skb));
|
|
uh->check = 0;
|
|
uh->check = 0;
|
|
|
|
|
|
- switch (encap->encap_type) {
|
|
|
|
|
|
+ switch (encap_type) {
|
|
default:
|
|
default:
|
|
case UDP_ENCAP_ESPINUDP:
|
|
case UDP_ENCAP_ESPINUDP:
|
|
esph = (struct ip_esp_hdr *)(uh + 1);
|
|
esph = (struct ip_esp_hdr *)(uh + 1);
|
|
@@ -95,131 +201,45 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
|
|
esph->spi = x->id.spi;
|
|
esph->spi = x->id.spi;
|
|
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
|
|
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
|
|
|
|
|
|
- if (esp->conf.ivlen) {
|
|
|
|
- if (unlikely(!esp->conf.ivinitted)) {
|
|
|
|
- get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
|
|
|
|
- esp->conf.ivinitted = 1;
|
|
|
|
- }
|
|
|
|
- crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- do {
|
|
|
|
- struct scatterlist *sg = &esp->sgbuf[0];
|
|
|
|
-
|
|
|
|
- if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
|
|
|
|
- sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
|
|
|
|
- if (!sg)
|
|
|
|
- goto unlock;
|
|
|
|
- }
|
|
|
|
- sg_init_table(sg, nfrags);
|
|
|
|
- skb_to_sgvec(skb, sg,
|
|
|
|
- esph->enc_data +
|
|
|
|
- esp->conf.ivlen -
|
|
|
|
- skb->data, clen);
|
|
|
|
- err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
|
|
|
|
- if (unlikely(sg != &esp->sgbuf[0]))
|
|
|
|
- kfree(sg);
|
|
|
|
- } while (0);
|
|
|
|
-
|
|
|
|
- if (unlikely(err))
|
|
|
|
- goto unlock;
|
|
|
|
-
|
|
|
|
- if (esp->conf.ivlen) {
|
|
|
|
- memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
|
|
|
|
- crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
|
|
|
|
- }
|
|
|
|
|
|
+ sg_init_table(sg, nfrags);
|
|
|
|
+ skb_to_sgvec(skb, sg,
|
|
|
|
+ esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
|
|
|
|
+ clen + alen);
|
|
|
|
+ sg_init_one(asg, esph, sizeof(*esph));
|
|
|
|
+
|
|
|
|
+ aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
|
|
|
|
+ aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
|
|
|
|
+ aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
|
|
|
|
+ aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq);
|
|
|
|
+
|
|
|
|
+ ESP_SKB_CB(skb)->tmp = tmp;
|
|
|
|
+ err = crypto_aead_givencrypt(req);
|
|
|
|
+ if (err == -EINPROGRESS)
|
|
|
|
+ goto error;
|
|
|
|
|
|
- if (esp->auth.icv_full_len) {
|
|
|
|
- err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
|
|
|
|
- sizeof(*esph) + esp->conf.ivlen + clen);
|
|
|
|
- memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
|
|
|
|
- }
|
|
|
|
|
|
+ if (err == -EBUSY)
|
|
|
|
+ err = NET_XMIT_DROP;
|
|
|
|
|
|
-unlock:
|
|
|
|
- spin_unlock_bh(&x->lock);
|
|
|
|
|
|
+ kfree(tmp);
|
|
|
|
|
|
error:
|
|
error:
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Note: detecting truncated vs. non-truncated authentication data is very
|
|
|
|
- * expensive, so we only support truncated data, which is the recommended
|
|
|
|
- * and common case.
|
|
|
|
- */
|
|
|
|
-static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
|
|
|
|
|
|
+static int esp_input_done2(struct sk_buff *skb, int err)
|
|
{
|
|
{
|
|
struct iphdr *iph;
|
|
struct iphdr *iph;
|
|
- struct ip_esp_hdr *esph;
|
|
|
|
|
|
+ struct xfrm_state *x = xfrm_input_state(skb);
|
|
struct esp_data *esp = x->data;
|
|
struct esp_data *esp = x->data;
|
|
- struct crypto_blkcipher *tfm = esp->conf.tfm;
|
|
|
|
- struct blkcipher_desc desc = { .tfm = tfm };
|
|
|
|
- struct sk_buff *trailer;
|
|
|
|
- int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
|
|
|
|
- int alen = esp->auth.icv_trunc_len;
|
|
|
|
- int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen;
|
|
|
|
- int nfrags;
|
|
|
|
|
|
+ struct crypto_aead *aead = esp->aead;
|
|
|
|
+ int alen = crypto_aead_authsize(aead);
|
|
|
|
+ int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
|
|
|
|
+ int elen = skb->len - hlen;
|
|
int ihl;
|
|
int ihl;
|
|
u8 nexthdr[2];
|
|
u8 nexthdr[2];
|
|
- struct scatterlist *sg;
|
|
|
|
int padlen;
|
|
int padlen;
|
|
- int err = -EINVAL;
|
|
|
|
|
|
|
|
- if (!pskb_may_pull(skb, sizeof(*esph)))
|
|
|
|
- goto out;
|
|
|
|
-
|
|
|
|
- if (elen <= 0 || (elen & (blksize-1)))
|
|
|
|
- goto out;
|
|
|
|
-
|
|
|
|
- if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
|
|
|
|
- goto out;
|
|
|
|
- nfrags = err;
|
|
|
|
-
|
|
|
|
- skb->ip_summed = CHECKSUM_NONE;
|
|
|
|
-
|
|
|
|
- spin_lock(&x->lock);
|
|
|
|
-
|
|
|
|
- /* If integrity check is required, do this. */
|
|
|
|
- if (esp->auth.icv_full_len) {
|
|
|
|
- u8 sum[alen];
|
|
|
|
-
|
|
|
|
- err = esp_mac_digest(esp, skb, 0, skb->len - alen);
|
|
|
|
- if (err)
|
|
|
|
- goto unlock;
|
|
|
|
-
|
|
|
|
- if (skb_copy_bits(skb, skb->len - alen, sum, alen))
|
|
|
|
- BUG();
|
|
|
|
-
|
|
|
|
- if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
|
|
|
|
- err = -EBADMSG;
|
|
|
|
- goto unlock;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- esph = (struct ip_esp_hdr *)skb->data;
|
|
|
|
-
|
|
|
|
- /* Get ivec. This can be wrong, check against another impls. */
|
|
|
|
- if (esp->conf.ivlen)
|
|
|
|
- crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
|
|
|
|
-
|
|
|
|
- sg = &esp->sgbuf[0];
|
|
|
|
-
|
|
|
|
- if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
|
|
|
|
- err = -ENOMEM;
|
|
|
|
- sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
|
|
|
|
- if (!sg)
|
|
|
|
- goto unlock;
|
|
|
|
- }
|
|
|
|
- sg_init_table(sg, nfrags);
|
|
|
|
- skb_to_sgvec(skb, sg,
|
|
|
|
- sizeof(*esph) + esp->conf.ivlen,
|
|
|
|
- elen);
|
|
|
|
- err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
|
|
|
|
- if (unlikely(sg != &esp->sgbuf[0]))
|
|
|
|
- kfree(sg);
|
|
|
|
-
|
|
|
|
-unlock:
|
|
|
|
- spin_unlock(&x->lock);
|
|
|
|
|
|
+ kfree(ESP_SKB_CB(skb)->tmp);
|
|
|
|
|
|
if (unlikely(err))
|
|
if (unlikely(err))
|
|
goto out;
|
|
goto out;
|
|
@@ -229,15 +249,11 @@ unlock:
|
|
|
|
|
|
err = -EINVAL;
|
|
err = -EINVAL;
|
|
padlen = nexthdr[0];
|
|
padlen = nexthdr[0];
|
|
- if (padlen+2 >= elen)
|
|
|
|
|
|
+ if (padlen + 2 + alen >= elen)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
/* ... check padding bits here. Silly. :-) */
|
|
/* ... check padding bits here. Silly. :-) */
|
|
|
|
|
|
- /* RFC4303: Drop dummy packets without any error */
|
|
|
|
- if (nexthdr[1] == IPPROTO_NONE)
|
|
|
|
- goto out;
|
|
|
|
-
|
|
|
|
iph = ip_hdr(skb);
|
|
iph = ip_hdr(skb);
|
|
ihl = iph->ihl * 4;
|
|
ihl = iph->ihl * 4;
|
|
|
|
|
|
@@ -279,10 +295,87 @@ unlock:
|
|
}
|
|
}
|
|
|
|
|
|
pskb_trim(skb, skb->len - alen - padlen - 2);
|
|
pskb_trim(skb, skb->len - alen - padlen - 2);
|
|
- __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen);
|
|
|
|
|
|
+ __skb_pull(skb, hlen);
|
|
skb_set_transport_header(skb, -ihl);
|
|
skb_set_transport_header(skb, -ihl);
|
|
|
|
|
|
- return nexthdr[1];
|
|
|
|
|
|
+ err = nexthdr[1];
|
|
|
|
+
|
|
|
|
+ /* RFC4303: Drop dummy packets without any error */
|
|
|
|
+ if (err == IPPROTO_NONE)
|
|
|
|
+ err = -EINVAL;
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void esp_input_done(struct crypto_async_request *base, int err)
|
|
|
|
+{
|
|
|
|
+ struct sk_buff *skb = base->data;
|
|
|
|
+
|
|
|
|
+ xfrm_input_resume(skb, esp_input_done2(skb, err));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Note: detecting truncated vs. non-truncated authentication data is very
|
|
|
|
+ * expensive, so we only support truncated data, which is the recommended
|
|
|
|
+ * and common case.
|
|
|
|
+ */
|
|
|
|
+static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ struct ip_esp_hdr *esph;
|
|
|
|
+ struct esp_data *esp = x->data;
|
|
|
|
+ struct crypto_aead *aead = esp->aead;
|
|
|
|
+ struct aead_request *req;
|
|
|
|
+ struct sk_buff *trailer;
|
|
|
|
+ int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
|
|
|
|
+ int nfrags;
|
|
|
|
+ void *tmp;
|
|
|
|
+ u8 *iv;
|
|
|
|
+ struct scatterlist *sg;
|
|
|
|
+ struct scatterlist *asg;
|
|
|
|
+ int err = -EINVAL;
|
|
|
|
+
|
|
|
|
+ if (!pskb_may_pull(skb, sizeof(*esph)))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ if (elen <= 0)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
|
|
|
|
+ goto out;
|
|
|
|
+ nfrags = err;
|
|
|
|
+
|
|
|
|
+ err = -ENOMEM;
|
|
|
|
+ tmp = esp_alloc_tmp(aead, nfrags + 1);
|
|
|
|
+ if (!tmp)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ ESP_SKB_CB(skb)->tmp = tmp;
|
|
|
|
+ iv = esp_tmp_iv(aead, tmp);
|
|
|
|
+ req = esp_tmp_req(aead, iv);
|
|
|
|
+ asg = esp_req_sg(aead, req);
|
|
|
|
+ sg = asg + 1;
|
|
|
|
+
|
|
|
|
+ skb->ip_summed = CHECKSUM_NONE;
|
|
|
|
+
|
|
|
|
+ esph = (struct ip_esp_hdr *)skb->data;
|
|
|
|
+
|
|
|
|
+ /* Get ivec. This can be wrong, check against another impls. */
|
|
|
|
+ iv = esph->enc_data;
|
|
|
|
+
|
|
|
|
+ sg_init_table(sg, nfrags);
|
|
|
|
+ skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
|
|
|
|
+ sg_init_one(asg, esph, sizeof(*esph));
|
|
|
|
+
|
|
|
|
+ aead_request_set_callback(req, 0, esp_input_done, skb);
|
|
|
|
+ aead_request_set_crypt(req, sg, sg, elen, iv);
|
|
|
|
+ aead_request_set_assoc(req, asg, sizeof(*esph));
|
|
|
|
+
|
|
|
|
+ err = crypto_aead_decrypt(req);
|
|
|
|
+ if (err == -EINPROGRESS)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ err = esp_input_done2(skb, err);
|
|
|
|
|
|
out:
|
|
out:
|
|
return err;
|
|
return err;
|
|
@@ -291,11 +384,11 @@ out:
|
|
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
|
|
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
|
|
{
|
|
{
|
|
struct esp_data *esp = x->data;
|
|
struct esp_data *esp = x->data;
|
|
- u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
|
|
|
|
- u32 align = max_t(u32, blksize, esp->conf.padlen);
|
|
|
|
|
|
+ u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
|
|
|
|
+ u32 align = max_t(u32, blksize, esp->padlen);
|
|
u32 rem;
|
|
u32 rem;
|
|
|
|
|
|
- mtu -= x->props.header_len + esp->auth.icv_trunc_len;
|
|
|
|
|
|
+ mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
|
|
rem = mtu & (align - 1);
|
|
rem = mtu & (align - 1);
|
|
mtu &= ~(align - 1);
|
|
mtu &= ~(align - 1);
|
|
|
|
|
|
@@ -342,80 +435,98 @@ static void esp_destroy(struct xfrm_state *x)
|
|
if (!esp)
|
|
if (!esp)
|
|
return;
|
|
return;
|
|
|
|
|
|
- crypto_free_blkcipher(esp->conf.tfm);
|
|
|
|
- esp->conf.tfm = NULL;
|
|
|
|
- kfree(esp->conf.ivec);
|
|
|
|
- esp->conf.ivec = NULL;
|
|
|
|
- crypto_free_hash(esp->auth.tfm);
|
|
|
|
- esp->auth.tfm = NULL;
|
|
|
|
- kfree(esp->auth.work_icv);
|
|
|
|
- esp->auth.work_icv = NULL;
|
|
|
|
|
|
+ crypto_free_aead(esp->aead);
|
|
kfree(esp);
|
|
kfree(esp);
|
|
}
|
|
}
|
|
|
|
|
|
static int esp_init_state(struct xfrm_state *x)
|
|
static int esp_init_state(struct xfrm_state *x)
|
|
{
|
|
{
|
|
struct esp_data *esp = NULL;
|
|
struct esp_data *esp = NULL;
|
|
- struct crypto_blkcipher *tfm;
|
|
|
|
|
|
+ struct crypto_aead *aead;
|
|
|
|
+ struct crypto_authenc_key_param *param;
|
|
|
|
+ struct rtattr *rta;
|
|
|
|
+ char *key;
|
|
|
|
+ char *p;
|
|
|
|
+ char authenc_name[CRYPTO_MAX_ALG_NAME];
|
|
u32 align;
|
|
u32 align;
|
|
|
|
+ unsigned int keylen;
|
|
|
|
+ int err;
|
|
|
|
|
|
if (x->ealg == NULL)
|
|
if (x->ealg == NULL)
|
|
- goto error;
|
|
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
|
|
|
|
+ x->aalg ? x->aalg->alg_name : "digest_null",
|
|
|
|
+ x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
|
|
|
|
+ return -ENAMETOOLONG;
|
|
|
|
|
|
esp = kzalloc(sizeof(*esp), GFP_KERNEL);
|
|
esp = kzalloc(sizeof(*esp), GFP_KERNEL);
|
|
if (esp == NULL)
|
|
if (esp == NULL)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
+ x->data = esp;
|
|
|
|
+
|
|
|
|
+ aead = crypto_alloc_aead(authenc_name, 0, 0);
|
|
|
|
+ err = PTR_ERR(aead);
|
|
|
|
+ if (IS_ERR(aead))
|
|
|
|
+ goto error;
|
|
|
|
+
|
|
|
|
+ esp->aead = aead;
|
|
|
|
+
|
|
|
|
+ keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
|
|
|
|
+ (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
|
|
|
|
+ err = -ENOMEM;
|
|
|
|
+ key = kmalloc(keylen, GFP_KERNEL);
|
|
|
|
+ if (!key)
|
|
|
|
+ goto error;
|
|
|
|
+
|
|
|
|
+ p = key;
|
|
|
|
+ rta = (void *)p;
|
|
|
|
+ rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
|
|
|
|
+ rta->rta_len = RTA_LENGTH(sizeof(*param));
|
|
|
|
+ param = RTA_DATA(rta);
|
|
|
|
+ p += RTA_SPACE(sizeof(*param));
|
|
|
|
+
|
|
if (x->aalg) {
|
|
if (x->aalg) {
|
|
struct xfrm_algo_desc *aalg_desc;
|
|
struct xfrm_algo_desc *aalg_desc;
|
|
- struct crypto_hash *hash;
|
|
|
|
-
|
|
|
|
- hash = crypto_alloc_hash(x->aalg->alg_name, 0,
|
|
|
|
- CRYPTO_ALG_ASYNC);
|
|
|
|
- if (IS_ERR(hash))
|
|
|
|
- goto error;
|
|
|
|
|
|
|
|
- esp->auth.tfm = hash;
|
|
|
|
- if (crypto_hash_setkey(hash, x->aalg->alg_key,
|
|
|
|
- (x->aalg->alg_key_len + 7) / 8))
|
|
|
|
- goto error;
|
|
|
|
|
|
+ memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
|
|
|
|
+ p += (x->aalg->alg_key_len + 7) / 8;
|
|
|
|
|
|
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
|
|
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
|
|
BUG_ON(!aalg_desc);
|
|
BUG_ON(!aalg_desc);
|
|
|
|
|
|
|
|
+ err = -EINVAL;
|
|
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
|
|
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
|
|
- crypto_hash_digestsize(hash)) {
|
|
|
|
|
|
+ crypto_aead_authsize(aead)) {
|
|
NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
|
|
NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
|
|
x->aalg->alg_name,
|
|
x->aalg->alg_name,
|
|
- crypto_hash_digestsize(hash),
|
|
|
|
|
|
+ crypto_aead_authsize(aead),
|
|
aalg_desc->uinfo.auth.icv_fullbits/8);
|
|
aalg_desc->uinfo.auth.icv_fullbits/8);
|
|
- goto error;
|
|
|
|
|
|
+ goto free_key;
|
|
}
|
|
}
|
|
|
|
|
|
- esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
|
|
|
|
- esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
|
|
|
|
-
|
|
|
|
- esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
|
|
|
|
- if (!esp->auth.work_icv)
|
|
|
|
- goto error;
|
|
|
|
|
|
+ err = crypto_aead_setauthsize(
|
|
|
|
+ aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
|
|
|
|
+ if (err)
|
|
|
|
+ goto free_key;
|
|
}
|
|
}
|
|
|
|
|
|
- tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
|
|
|
|
- if (IS_ERR(tfm))
|
|
|
|
- goto error;
|
|
|
|
- esp->conf.tfm = tfm;
|
|
|
|
- esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
|
|
|
|
- esp->conf.padlen = 0;
|
|
|
|
- if (esp->conf.ivlen) {
|
|
|
|
- esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
|
|
|
|
- if (unlikely(esp->conf.ivec == NULL))
|
|
|
|
- goto error;
|
|
|
|
- esp->conf.ivinitted = 0;
|
|
|
|
- }
|
|
|
|
- if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key,
|
|
|
|
- (x->ealg->alg_key_len + 7) / 8))
|
|
|
|
|
|
+ esp->padlen = 0;
|
|
|
|
+
|
|
|
|
+ param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
|
|
|
|
+ memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
|
|
|
|
+
|
|
|
|
+ err = crypto_aead_setkey(aead, key, keylen);
|
|
|
|
+
|
|
|
|
+free_key:
|
|
|
|
+ kfree(key);
|
|
|
|
+
|
|
|
|
+ if (err)
|
|
goto error;
|
|
goto error;
|
|
- x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
|
|
|
|
|
|
+
|
|
|
|
+ x->props.header_len = sizeof(struct ip_esp_hdr) +
|
|
|
|
+ crypto_aead_ivsize(aead);
|
|
if (x->props.mode == XFRM_MODE_TUNNEL)
|
|
if (x->props.mode == XFRM_MODE_TUNNEL)
|
|
x->props.header_len += sizeof(struct iphdr);
|
|
x->props.header_len += sizeof(struct iphdr);
|
|
else if (x->props.mode == XFRM_MODE_BEET)
|
|
else if (x->props.mode == XFRM_MODE_BEET)
|
|
@@ -434,18 +545,14 @@ static int esp_init_state(struct xfrm_state *x)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- x->data = esp;
|
|
|
|
- align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
|
|
|
|
- if (esp->conf.padlen)
|
|
|
|
- align = max_t(u32, align, esp->conf.padlen);
|
|
|
|
- x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len;
|
|
|
|
- return 0;
|
|
|
|
|
|
+
|
|
|
|
+ align = ALIGN(crypto_aead_blocksize(aead), 4);
|
|
|
|
+ if (esp->padlen)
|
|
|
|
+ align = max_t(u32, align, esp->padlen);
|
|
|
|
+ x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
|
|
|
|
|
|
error:
|
|
error:
|
|
- x->data = esp;
|
|
|
|
- esp_destroy(x);
|
|
|
|
- x->data = NULL;
|
|
|
|
- return -EINVAL;
|
|
|
|
|
|
+ return err;
|
|
}
|
|
}
|
|
|
|
|
|
static struct xfrm_type esp_type =
|
|
static struct xfrm_type esp_type =
|