Browse Source

staging: lustre: remove almost all crypto layer wrappers

Almost all of these are just a straight function name rename, so fix
them all up to call the crypto layer properly, no need for a #define to
hide things.

Cc: Peng Tao <tao.peng@emc.com>
Cc: Andreas Dilger <andreas.dilger@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Greg Kroah-Hartman 12 years ago
parent
commit
2b53313a6d

+ 2 - 58
drivers/staging/lustre/lustre/include/linux/lustre_compat25.h

@@ -100,74 +100,18 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
 #endif
 
 
-
 /* add a lustre compatible layer for crypto API */
 #include <linux/crypto.h>
-#define ll_crypto_hash	  crypto_hash
-#define ll_crypto_cipher	crypto_blkcipher
-#define ll_crypto_alloc_hash(name, type, mask)  crypto_alloc_hash(name, type, mask)
-#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_hash_setkey(tfm, key, keylen)
-#define ll_crypto_hash_init(desc)	       crypto_hash_init(desc)
-#define ll_crypto_hash_update(desc, sl, bytes)  crypto_hash_update(desc, sl, bytes)
-#define ll_crypto_hash_final(desc, out)	 crypto_hash_final(desc, out)
-#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
-		crypto_blkcipher_setkey(tfm, key, keylen)
-#define ll_crypto_blkcipher_set_iv(tfm, src, len) \
-		crypto_blkcipher_set_iv(tfm, src, len)
-#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
-		crypto_blkcipher_get_iv(tfm, dst, len)
-#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
-		crypto_blkcipher_encrypt(desc, dst, src, bytes)
-#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
-		crypto_blkcipher_decrypt(desc, dst, src, bytes)
-#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
-		crypto_blkcipher_encrypt_iv(desc, dst, src, bytes)
-#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
-		crypto_blkcipher_decrypt_iv(desc, dst, src, bytes)
 
 static inline
-struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(const char *name,
+struct crypto_blkcipher *ll_crypto_alloc_blkcipher(const char *name,
 						   u32 type, u32 mask)
 {
-	struct ll_crypto_cipher *rtn = crypto_alloc_blkcipher(name, type, mask);
+	struct crypto_blkcipher *rtn = crypto_alloc_blkcipher(name, type, mask);
 
 	return (rtn == NULL ? ERR_PTR(-ENOMEM) : rtn);
 }
 
-static inline int ll_crypto_hmac(struct ll_crypto_hash *tfm,
-				 u8 *key, unsigned int *keylen,
-				 struct scatterlist *sg,
-				 unsigned int size, u8 *result)
-{
-	struct hash_desc desc;
-	int	      rv;
-	desc.tfm   = tfm;
-	desc.flags = 0;
-	rv = crypto_hash_setkey(desc.tfm, key, *keylen);
-	if (rv) {
-		CERROR("failed to hash setkey: %d\n", rv);
-		return rv;
-	}
-	return crypto_hash_digest(&desc, sg, size, result);
-}
-static inline
-unsigned int ll_crypto_tfm_alg_max_keysize(struct crypto_blkcipher *tfm)
-{
-	return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.max_keysize;
-}
-static inline
-unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
-{
-	return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
-}
-
-#define ll_crypto_hash_blocksize(tfm)       crypto_hash_blocksize(tfm)
-#define ll_crypto_hash_digestsize(tfm)      crypto_hash_digestsize(tfm)
-#define ll_crypto_blkcipher_ivsize(tfm)     crypto_blkcipher_ivsize(tfm)
-#define ll_crypto_blkcipher_blocksize(tfm)  crypto_blkcipher_blocksize(tfm)
-#define ll_crypto_free_hash(tfm)	    crypto_free_hash(tfm)
-#define ll_crypto_free_blkcipher(tfm)       crypto_free_blkcipher(tfm)
-
 #define ll_vfs_rmdir(dir,entry,mnt)	     vfs_rmdir(dir,entry)
 #define ll_vfs_mkdir(inode,dir,mnt,mode)	vfs_mkdir(inode,dir,mode)
 #define ll_vfs_link(old,mnt,dir,new,mnt1)       vfs_link(old,dir,new)

+ 35 - 11
drivers/staging/lustre/lustre/obdclass/capa.c

@@ -47,6 +47,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/crypto.h>
 
 #include <obd_class.h>
 #include <lustre_debug.h>
@@ -76,6 +77,12 @@ EXPORT_SYMBOL(capa_list);
 EXPORT_SYMBOL(capa_lock);
 EXPORT_SYMBOL(capa_count);
 
+static inline
+unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
+{
+	return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
+}
+
 struct hlist_head *init_capa_hash(void)
 {
 	struct hlist_head *hash;
@@ -234,9 +241,26 @@ struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa,
 }
 EXPORT_SYMBOL(capa_lookup);
 
+static inline int ll_crypto_hmac(struct crypto_hash *tfm,
+				 u8 *key, unsigned int *keylen,
+				 struct scatterlist *sg,
+				 unsigned int size, u8 *result)
+{
+	struct hash_desc desc;
+	int	      rv;
+	desc.tfm   = tfm;
+	desc.flags = 0;
+	rv = crypto_hash_setkey(desc.tfm, key, *keylen);
+	if (rv) {
+		CERROR("failed to hash setkey: %d\n", rv);
+		return rv;
+	}
+	return crypto_hash_digest(&desc, sg, size, result);
+}
+
 int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
 {
-	struct ll_crypto_hash *tfm;
+	struct crypto_hash *tfm;
 	struct capa_hmac_alg  *alg;
 	int keylen;
 	struct scatterlist sl;
@@ -248,7 +272,7 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
 
 	alg = &capa_hmac_algs[capa_alg(capa)];
 
-	tfm = ll_crypto_alloc_hash(alg->ha_name, 0, 0);
+	tfm = crypto_alloc_hash(alg->ha_name, 0, 0);
 	if (!tfm) {
 		CERROR("crypto_alloc_tfm failed, check whether your kernel"
 		       "has crypto support!\n");
@@ -261,7 +285,7 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
 		    (unsigned long)(capa) % PAGE_CACHE_SIZE);
 
 	ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac);
-	ll_crypto_free_hash(tfm);
+	crypto_free_hash(tfm);
 
 	return 0;
 }
@@ -269,7 +293,7 @@ EXPORT_SYMBOL(capa_hmac);
 
 int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
 {
-	struct ll_crypto_cipher *tfm;
+	struct crypto_blkcipher *tfm;
 	struct scatterlist sd;
 	struct scatterlist ss;
 	struct blkcipher_desc desc;
@@ -291,7 +315,7 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
 		GOTO(out, rc = -EINVAL);
 	}
 
-	rc = ll_crypto_blkcipher_setkey(tfm, key, min);
+	rc = crypto_blkcipher_setkey(tfm, key, min);
 	if (rc) {
 		CERROR("failed to setting key for aes\n");
 		GOTO(out, rc);
@@ -305,21 +329,21 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
 	desc.tfm   = tfm;
 	desc.info  = NULL;
 	desc.flags = 0;
-	rc = ll_crypto_blkcipher_encrypt(&desc, &sd, &ss, 16);
+	rc = crypto_blkcipher_encrypt(&desc, &sd, &ss, 16);
 	if (rc) {
 		CERROR("failed to encrypt for aes\n");
 		GOTO(out, rc);
 	}
 
 out:
-	ll_crypto_free_blkcipher(tfm);
+	crypto_free_blkcipher(tfm);
 	return rc;
 }
 EXPORT_SYMBOL(capa_encrypt_id);
 
 int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
 {
-	struct ll_crypto_cipher *tfm;
+	struct crypto_blkcipher *tfm;
 	struct scatterlist sd;
 	struct scatterlist ss;
 	struct blkcipher_desc desc;
@@ -341,7 +365,7 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
 		GOTO(out, rc = -EINVAL);
 	}
 
-	rc = ll_crypto_blkcipher_setkey(tfm, key, min);
+	rc = crypto_blkcipher_setkey(tfm, key, min);
 	if (rc) {
 		CERROR("failed to setting key for aes\n");
 		GOTO(out, rc);
@@ -356,14 +380,14 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
 	desc.tfm   = tfm;
 	desc.info  = NULL;
 	desc.flags = 0;
-	rc = ll_crypto_blkcipher_decrypt(&desc, &sd, &ss, 16);
+	rc = crypto_blkcipher_decrypt(&desc, &sd, &ss, 16);
 	if (rc) {
 		CERROR("failed to decrypt for aes\n");
 		GOTO(out, rc);
 	}
 
 out:
-	ll_crypto_free_blkcipher(tfm);
+	crypto_free_blkcipher(tfm);
 	return rc;
 }
 EXPORT_SYMBOL(capa_decrypt_id);

+ 54 - 53
drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c

@@ -54,6 +54,7 @@
 #include <linux/slab.h>
 #include <linux/crypto.h>
 #include <linux/mutex.h>
+#include <linux/crypto.h>
 
 #include <obd.h>
 #include <obd_class.h>
@@ -154,7 +155,7 @@ int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
 		return -1;
 	}
 
-	if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
+	if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
 		CERROR("failed to set %s key, len %d\n",
 		       alg_name, kb->kb_key.len);
 		return -1;
@@ -197,7 +198,7 @@ void keyblock_free(struct krb5_keyblock *kb)
 {
 	rawobj_free(&kb->kb_key);
 	if (kb->kb_tfm)
-		ll_crypto_free_blkcipher(kb->kb_tfm);
+		crypto_free_blkcipher(kb->kb_tfm);
 }
 
 static
@@ -529,7 +530,7 @@ void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
 }
 
 static
-__u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
+__u32 krb5_encrypt(struct crypto_blkcipher *tfm,
 		   int decrypt,
 		   void * iv,
 		   void * in,
@@ -546,27 +547,27 @@ __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
 	desc.info = local_iv;
 	desc.flags= 0;
 
-	if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
+	if (length % crypto_blkcipher_blocksize(tfm) != 0) {
 		CERROR("output length %d mismatch blocksize %d\n",
-		       length, ll_crypto_blkcipher_blocksize(tfm));
+		       length, crypto_blkcipher_blocksize(tfm));
 		goto out;
 	}
 
-	if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
-		CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
+	if (crypto_blkcipher_ivsize(tfm) > 16) {
+		CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
 		goto out;
 	}
 
 	if (iv)
-		memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
+		memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
 
 	memcpy(out, in, length);
 	buf_to_sg(&sg, out, length);
 
 	if (decrypt)
-		ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
+		ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
 	else
-		ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
+		ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
 
 out:
 	return(ret);
@@ -574,7 +575,7 @@ out:
 
 
 static inline
-int krb5_digest_hmac(struct ll_crypto_hash *tfm,
+int krb5_digest_hmac(struct crypto_hash *tfm,
 		     rawobj_t *key,
 		     struct krb5_header *khdr,
 		     int msgcnt, rawobj_t *msgs,
@@ -585,17 +586,17 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
 	struct scatterlist sg[1];
 	int		i;
 
-	ll_crypto_hash_setkey(tfm, key->data, key->len);
+	crypto_hash_setkey(tfm, key->data, key->len);
 	desc.tfm  = tfm;
 	desc.flags= 0;
 
-	ll_crypto_hash_init(&desc);
+	crypto_hash_init(&desc);
 
 	for (i = 0; i < msgcnt; i++) {
 		if (msgs[i].len == 0)
 			continue;
 		buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-		ll_crypto_hash_update(&desc, sg, msgs[i].len);
+		crypto_hash_update(&desc, sg, msgs[i].len);
 	}
 
 	for (i = 0; i < iovcnt; i++) {
@@ -604,20 +605,20 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
 
 		sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
 			    iovs[i].kiov_offset);
-		ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+		crypto_hash_update(&desc, sg, iovs[i].kiov_len);
 	}
 
 	if (khdr) {
 		buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-		ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+		crypto_hash_update(&desc, sg, sizeof(*khdr));
 	}
 
-	return ll_crypto_hash_final(&desc, cksum->data);
+	return crypto_hash_final(&desc, cksum->data);
 }
 
 
 static inline
-int krb5_digest_norm(struct ll_crypto_hash *tfm,
+int krb5_digest_norm(struct crypto_hash *tfm,
 		     struct krb5_keyblock *kb,
 		     struct krb5_header *khdr,
 		     int msgcnt, rawobj_t *msgs,
@@ -632,13 +633,13 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm,
 	desc.tfm  = tfm;
 	desc.flags= 0;
 
-	ll_crypto_hash_init(&desc);
+	crypto_hash_init(&desc);
 
 	for (i = 0; i < msgcnt; i++) {
 		if (msgs[i].len == 0)
 			continue;
 		buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
-		ll_crypto_hash_update(&desc, sg, msgs[i].len);
+		crypto_hash_update(&desc, sg, msgs[i].len);
 	}
 
 	for (i = 0; i < iovcnt; i++) {
@@ -647,15 +648,15 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm,
 
 		sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
 			    iovs[i].kiov_offset);
-		ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+		crypto_hash_update(&desc, sg, iovs[i].kiov_len);
 	}
 
 	if (khdr) {
 		buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
-		ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+		crypto_hash_update(&desc, sg, sizeof(*khdr));
 	}
 
-	ll_crypto_hash_final(&desc, cksum->data);
+	crypto_hash_final(&desc, cksum->data);
 
 	return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
 			    cksum->data, cksum->len);
@@ -674,7 +675,7 @@ __s32 krb5_make_checksum(__u32 enctype,
 			 rawobj_t *cksum)
 {
 	struct krb5_enctype   *ke = &enctypes[enctype];
-	struct ll_crypto_hash *tfm;
+	struct crypto_hash *tfm;
 	__u32		  code = GSS_S_FAILURE;
 	int		    rc;
 
@@ -683,7 +684,7 @@ __s32 krb5_make_checksum(__u32 enctype,
 		return GSS_S_FAILURE;
 	}
 
-	cksum->len = ll_crypto_hash_digestsize(tfm);
+	cksum->len = crypto_hash_digestsize(tfm);
 	OBD_ALLOC_LARGE(cksum->data, cksum->len);
 	if (!cksum->data) {
 		cksum->len = 0;
@@ -700,7 +701,7 @@ __s32 krb5_make_checksum(__u32 enctype,
 	if (rc == 0)
 		code = GSS_S_COMPLETE;
 out_tfm:
-	ll_crypto_free_hash(tfm);
+	crypto_free_hash(tfm);
 	return code;
 }
 
@@ -878,7 +879,7 @@ int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
 }
 
 static
-int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
 			 int mode_ecb,
 			 int inobj_cnt,
 			 rawobj_t *inobjs,
@@ -904,17 +905,17 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
 
 		if (mode_ecb) {
 			if (enc)
-				rc = ll_crypto_blkcipher_encrypt(
+				rc = crypto_blkcipher_encrypt(
 					&desc, &dst, &src, src.length);
 			else
-				rc = ll_crypto_blkcipher_decrypt(
+				rc = crypto_blkcipher_decrypt(
 					&desc, &dst, &src, src.length);
 		} else {
 			if (enc)
-				rc = ll_crypto_blkcipher_encrypt_iv(
+				rc = crypto_blkcipher_encrypt_iv(
 					&desc, &dst, &src, src.length);
 			else
-				rc = ll_crypto_blkcipher_decrypt_iv(
+				rc = crypto_blkcipher_decrypt_iv(
 					&desc, &dst, &src, src.length);
 		}
 
@@ -935,7 +936,7 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
  * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
  */
 static
-int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
 		      struct krb5_header *khdr,
 		      char *confounder,
 		      struct ptlrpc_bulk_desc *desc,
@@ -950,7 +951,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
 	LASSERT(desc->bd_iov_count);
 	LASSERT(desc->bd_enc_iov);
 
-	blocksize = ll_crypto_blkcipher_blocksize(tfm);
+	blocksize = crypto_blkcipher_blocksize(tfm);
 	LASSERT(blocksize > 1);
 	LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
@@ -962,7 +963,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
 	buf_to_sg(&src, confounder, blocksize);
 	buf_to_sg(&dst, cipher->data, blocksize);
 
-	rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
+	rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
 	if (rc) {
 		CERROR("error to encrypt confounder: %d\n", rc);
 		return rc;
@@ -982,7 +983,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
 		desc->bd_enc_iov[i].kiov_offset = dst.offset;
 		desc->bd_enc_iov[i].kiov_len = dst.length;
 
-		rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+		rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
 						    src.length);
 		if (rc) {
 			CERROR("error to encrypt page: %d\n", rc);
@@ -994,7 +995,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
 	buf_to_sg(&src, khdr, sizeof(*khdr));
 	buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
 
-	rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
+	rc = crypto_blkcipher_encrypt_iv(&ciph_desc,
 					    &dst, &src, sizeof(*khdr));
 	if (rc) {
 		CERROR("error to encrypt krb5 header: %d\n", rc);
@@ -1024,7 +1025,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
  *   should have been done by prep_bulk().
  */
 static
-int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
 		      struct krb5_header *khdr,
 		      struct ptlrpc_bulk_desc *desc,
 		      rawobj_t *cipher,
@@ -1041,7 +1042,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
 	LASSERT(desc->bd_enc_iov);
 	LASSERT(desc->bd_nob_transferred);
 
-	blocksize = ll_crypto_blkcipher_blocksize(tfm);
+	blocksize = crypto_blkcipher_blocksize(tfm);
 	LASSERT(blocksize > 1);
 	LASSERT(cipher->len == blocksize + sizeof(*khdr));
 
@@ -1058,7 +1059,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
 	buf_to_sg(&src, cipher->data, blocksize);
 	buf_to_sg(&dst, plain->data, blocksize);
 
-	rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
+	rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
 	if (rc) {
 		CERROR("error to decrypt confounder: %d\n", rc);
 		return rc;
@@ -1101,7 +1102,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
 		if (desc->bd_iov[i].kiov_len % blocksize == 0)
 			sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
 
-		rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+		rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
 						    src.length);
 		if (rc) {
 			CERROR("error to decrypt page: %d\n", rc);
@@ -1141,7 +1142,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
 	buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
 	buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
 
-	rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
+	rc = crypto_blkcipher_decrypt_iv(&ciph_desc,
 					    &dst, &src, sizeof(*khdr));
 	if (rc) {
 		CERROR("error to decrypt tail: %d\n", rc);
@@ -1176,7 +1177,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
 	LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
 	LASSERT(kctx->kc_keye.kb_tfm == NULL ||
 		ke->ke_conf_size >=
-		ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+		crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
 
 	/*
 	 * final token format:
@@ -1200,7 +1201,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
 		blocksize = 1;
 	} else {
 		LASSERT(kctx->kc_keye.kb_tfm);
-		blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+		blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
 	}
 	LASSERT(blocksize <= ke->ke_conf_size);
 
@@ -1247,7 +1248,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
 
 	if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
 		rawobj_t		 arc4_keye;
-		struct ll_crypto_cipher *arc4_tfm;
+		struct crypto_blkcipher *arc4_tfm;
 
 		if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
 				       NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
@@ -1261,7 +1262,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
 			GOTO(arc4_out_key, rc = -EACCES);
 		}
 
-		if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+		if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
 					       arc4_keye.len)) {
 			CERROR("failed to set arc4 key, len %d\n",
 			       arc4_keye.len);
@@ -1271,7 +1272,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
 		rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
 					  3, data_desc, &cipher, 1);
 arc4_out_tfm:
-		ll_crypto_free_blkcipher(arc4_tfm);
+		crypto_free_blkcipher(arc4_tfm);
 arc4_out_key:
 		rawobj_free(&arc4_keye);
 arc4_out:
@@ -1309,7 +1310,7 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
 	LASSERT(desc->bd_enc_iov);
 	LASSERT(kctx->kc_keye.kb_tfm);
 
-	blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+	blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
 
 	for (i = 0; i < desc->bd_iov_count; i++) {
 		LASSERT(desc->bd_enc_iov[i].kiov_page);
@@ -1370,7 +1371,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
 		blocksize = 1;
 	} else {
 		LASSERT(kctx->kc_keye.kb_tfm);
-		blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+		blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
 	}
 
 	/*
@@ -1480,7 +1481,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
 		blocksize = 1;
 	} else {
 		LASSERT(kctx->kc_keye.kb_tfm);
-		blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+		blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
 	}
 
 	/* expected token layout:
@@ -1520,7 +1521,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
 
 	if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
 		rawobj_t		 arc4_keye;
-		struct ll_crypto_cipher *arc4_tfm;
+		struct crypto_blkcipher *arc4_tfm;
 
 		cksum.data = token->data + token->len - ke->ke_hash_size;
 		cksum.len = ke->ke_hash_size;
@@ -1537,7 +1538,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
 			GOTO(arc4_out_key, rc = -EACCES);
 		}
 
-		if (ll_crypto_blkcipher_setkey(arc4_tfm,
+		if (crypto_blkcipher_setkey(arc4_tfm,
 					 arc4_keye.data, arc4_keye.len)) {
 			CERROR("failed to set arc4 key, len %d\n",
 			       arc4_keye.len);
@@ -1547,7 +1548,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx  *gctx,
 		rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
 					  1, &cipher_in, &plain_out, 0);
 arc4_out_tfm:
-		ll_crypto_free_blkcipher(arc4_tfm);
+		crypto_free_blkcipher(arc4_tfm);
 arc4_out_key:
 		rawobj_free(&arc4_keye);
 arc4_out:
@@ -1646,7 +1647,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
 		LBUG();
 	} else {
 		LASSERT(kctx->kc_keye.kb_tfm);
-		blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+		blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
 	}
 	LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);