summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-10-04 14:49:00 +0800
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 16:55:48 -0700
commit2614de1b9af5a9e49cda64b394e1348159565bd5 (patch)
tree00a104eaffacee8d40ba809fe2845df3193e71f5 /crypto
parentd8058480b35dbc3d1e6085b3f13b80af27def09e (diff)
downloadlinux-next-2614de1b9af5a9e49cda64b394e1348159565bd5.tar.gz
[CRYPTO] blkcipher: Increase kmalloc amount to aligned block size
Now that the block size is no longer a multiple of the alignment, we need to increase the kmalloc amount in blkcipher_next_slow to use the aligned block size. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/blkcipher.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index a3c87da23f1e..3d05586a8f34 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -158,7 +158,7 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
if (walk->buffer)
goto ok;
- n = bsize * 3 - (alignmask + 1) +
+ n = aligned_bsize * 3 - (alignmask + 1) +
(alignmask & ~(crypto_tfm_ctx_alignment() - 1));
walk->buffer = kmalloc(n, GFP_ATOMIC);
if (!walk->buffer)