crypto: ablkcipher - fix crash flushing dcache in error path

Like the skcipher_walk and blkcipher_walk cases:

scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page.  But in the error case of
ablkcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.

Fix it by reorganizing ablkcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.

Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: bf06099db1 ("crypto: skcipher - Add ablkcipher_walk interfaces")
Cc: <stable@vger.kernel.org> # v2.6.35+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Eric Biggers 2018-07-23 10:54:58 -07:00 committed by Herbert Xu
parent 0868def3e4
commit 318abdfbe7

View file

@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
unsigned int bsize)
static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
unsigned int n)
{
unsigned int n = bsize;
for (;;) {
unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
n -= len_this_page;
scatterwalk_start(&walk->out, sg_next(walk->out.sg));
}
return bsize;
}
static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
unsigned int n)
static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
unsigned int n)
{
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
return n;
}
static int ablkcipher_walk_next(struct ablkcipher_request *req,
@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
struct ablkcipher_walk *walk, int err)
{
struct crypto_tfm *tfm = req->base.tfm;
unsigned int nbytes = 0;
unsigned int n; /* bytes processed */
bool more;
if (likely(err >= 0)) {
unsigned int n = walk->nbytes - err;
if (unlikely(err < 0))
goto finish;
if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
n = ablkcipher_done_fast(walk, n);
else if (WARN_ON(err)) {
n = walk->nbytes - err;
walk->total -= n;
more = (walk->total != 0);
if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
ablkcipher_done_fast(walk, n);
} else {
if (WARN_ON(err)) {
/* unexpected case; didn't process all bytes */
err = -EINVAL;
goto err;
} else
n = ablkcipher_done_slow(walk, n);
nbytes = walk->total - n;
err = 0;
goto finish;
}
ablkcipher_done_slow(walk, n);
}
scatterwalk_done(&walk->in, 0, nbytes);
scatterwalk_done(&walk->out, 1, nbytes);
scatterwalk_done(&walk->in, 0, more);
scatterwalk_done(&walk->out, 1, more);
err:
walk->total = nbytes;
walk->nbytes = nbytes;
if (nbytes) {
if (more) {
crypto_yield(req->base.flags);
return ablkcipher_walk_next(req, walk);
}
err = 0;
finish:
walk->nbytes = 0;
if (walk->iv != req->info)
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
kfree(walk->iv_buffer);
return err;
}
EXPORT_SYMBOL_GPL(ablkcipher_walk_done);