diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 56f2a1b0ccf57ef608025f750191cf51f6f08c65..41034745d6a2af2e9938c00dd103fdf5db71320a 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -50,8 +50,8 @@ static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, static inline bool cast5_fpu_begin(bool fpu_enabled, struct skcipher_walk *walk, unsigned int nbytes) { - return glue_skwalk_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS, - walk, fpu_enabled, nbytes); + return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS, + walk, fpu_enabled, nbytes); } static inline void cast5_fpu_end(bool fpu_enabled) diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c index fab5fa1aed77f1cfe854b568b5443d9f40aa2af7..a78ef99a9981b47e1ccbd2942295a5195801a75d 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c @@ -50,9 +50,8 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, unsigned int func_bytes; unsigned int i; - fpu_enabled = glue_skwalk_fpu_begin(bsize, - gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, + &walk, fpu_enabled, nbytes); for (i = 0; i < gctx->num_funcs; i++) { func_bytes = bsize * gctx->funcs[i].num_blocks; @@ -129,9 +128,8 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, unsigned int i; u128 last_iv; - fpu_enabled = glue_skwalk_fpu_begin(bsize, - gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, + &walk, fpu_enabled, nbytes); /* Start of the last block. */ src += nbytes / bsize - 1; dst += nbytes / bsize - 1; @@ -190,9 +188,8 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, unsigned int i; le128 ctrblk; - fpu_enabled = glue_skwalk_fpu_begin(bsize, - gctx->fpu_blocks_limit, - &walk, fpu_enabled, nbytes); + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, + &walk, fpu_enabled, nbytes); be128_to_le128(&ctrblk, (be128 *)walk.iv); @@ -291,9 +288,9 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx, return err; /* set minimum length to bsize, for tweak_fn */ - fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit, - &walk, fpu_enabled, - nbytes < bsize ? bsize : nbytes); + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, + &walk, fpu_enabled, + nbytes < bsize ? bsize : nbytes); /* calculate first value of T */ tweak_fn(tweak_ctx, walk.iv, walk.iv); diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h index b925a6363b3ff83b63e2fcb7cd558dfcb405e009..d1818634ae7ee921c1ef665f3b966a93821990d8 100644 --- a/arch/x86/include/asm/crypto/glue_helper.h +++ b/arch/x86/include/asm/crypto/glue_helper.h @@ -44,10 +44,9 @@ struct common_glue_ctx { struct common_glue_func_entry funcs[]; }; -static inline bool glue_skwalk_fpu_begin(unsigned int bsize, - int fpu_blocks_limit, - struct skcipher_walk *walk, - bool fpu_enabled, unsigned int nbytes) +static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit, + struct skcipher_walk *walk, + bool fpu_enabled, unsigned int nbytes) { if (likely(fpu_blocks_limit < 0)) return false;