From a033aed5a84eb93a32929b6862602cb283d39e82 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 4 Dec 2018 22:20:05 -0800 Subject: [PATCH] crypto: x86/chacha - yield the FPU occasionally To improve responsiveness, yield the FPU (temporarily re-enabling preemption) every 4 KiB encrypted/decrypted, rather than keeping preemption disabled during the entire encryption/decryption operation. Alternatively we could do this for every skcipher_walk step, but steps may be small in some cases, and yielding the FPU is expensive on x86. Suggested-by: Martin Willi Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/chacha_glue.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c index d19c2908be90..9b1d3fac4943 100644 --- a/arch/x86/crypto/chacha_glue.c +++ b/arch/x86/crypto/chacha_glue.c @@ -132,6 +132,7 @@ static int chacha_simd_stream_xor(struct skcipher_request *req, { u32 *state, state_buf[16 + 2] __aligned(8); struct skcipher_walk walk; + int next_yield = 4096; /* bytes until next FPU yield */ int err; BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); @@ -144,12 +145,21 @@ static int chacha_simd_stream_xor(struct skcipher_request *req, while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; - if (nbytes < walk.total) + if (nbytes < walk.total) { nbytes = round_down(nbytes, walk.stride); + next_yield -= nbytes; + } chacha_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, nbytes, ctx->nrounds); + if (next_yield <= 0) { + /* temporarily allow preemption */ + kernel_fpu_end(); + kernel_fpu_begin(); + next_yield = 4096; + } + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); }