summaryrefslogtreecommitdiff
path: root/lib/accelerated/x86
diff options
context:
space:
mode:
Diffstat (limited to 'lib/accelerated/x86')
-rw-r--r--lib/accelerated/x86/aes-cbc-x86-aesni.c30
-rw-r--r--lib/accelerated/x86/aes-cbc-x86-ssse3.c34
-rw-r--r--lib/accelerated/x86/aes-ccm-x86-aesni.c62
-rw-r--r--lib/accelerated/x86/aes-gcm-aead.h33
-rw-r--r--lib/accelerated/x86/aes-gcm-padlock.c43
-rw-r--r--lib/accelerated/x86/aes-gcm-x86-aesni.c40
-rw-r--r--lib/accelerated/x86/aes-gcm-x86-pclmul-avx.c99
-rw-r--r--lib/accelerated/x86/aes-gcm-x86-pclmul.c59
-rw-r--r--lib/accelerated/x86/aes-gcm-x86-ssse3.c42
-rw-r--r--lib/accelerated/x86/aes-padlock.c22
-rw-r--r--lib/accelerated/x86/aes-padlock.h30
-rw-r--r--lib/accelerated/x86/aes-x86.h70
-rw-r--r--lib/accelerated/x86/aes-xts-x86-aesni.c34
-rw-r--r--lib/accelerated/x86/hmac-padlock.c144
-rw-r--r--lib/accelerated/x86/hmac-x86-ssse3.c108
-rw-r--r--lib/accelerated/x86/sha-padlock.c110
-rw-r--r--lib/accelerated/x86/sha-padlock.h22
-rw-r--r--lib/accelerated/x86/sha-x86-ssse3.c56
-rw-r--r--lib/accelerated/x86/sha-x86.h12
-rw-r--r--lib/accelerated/x86/x86-common.c739
-rw-r--r--lib/accelerated/x86/x86-common.h29
21 files changed, 790 insertions, 1028 deletions
diff --git a/lib/accelerated/x86/aes-cbc-x86-aesni.c b/lib/accelerated/x86/aes-cbc-x86-aesni.c
index 3029701b8d..cf3c81df1a 100644
--- a/lib/accelerated/x86/aes-cbc-x86-aesni.c
+++ b/lib/accelerated/x86/aes-cbc-x86-aesni.c
@@ -40,13 +40,13 @@ struct aes_ctx {
int enc;
};
-static int
-aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
+static int aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx,
+ int enc)
{
/* we use key size to distinguish */
- if (algorithm != GNUTLS_CIPHER_AES_128_CBC
- && algorithm != GNUTLS_CIPHER_AES_192_CBC
- && algorithm != GNUTLS_CIPHER_AES_256_CBC)
+ if (algorithm != GNUTLS_CIPHER_AES_128_CBC &&
+ algorithm != GNUTLS_CIPHER_AES_192_CBC &&
+ algorithm != GNUTLS_CIPHER_AES_256_CBC)
return GNUTLS_E_INVALID_REQUEST;
*_ctx = gnutls_calloc(1, sizeof(struct aes_ctx));
@@ -68,13 +68,11 @@ static int aes_cipher_setkey(void *_ctx, const void *userkey, size_t keysize)
CHECK_AES_KEYSIZE(keysize);
if (ctx->enc)
- ret =
- aesni_set_encrypt_key(userkey, keysize * 8,
- ALIGN16(&ctx->expanded_key));
+ ret = aesni_set_encrypt_key(userkey, keysize * 8,
+ ALIGN16(&ctx->expanded_key));
else
- ret =
- aesni_set_decrypt_key(userkey, keysize * 8,
- ALIGN16(&ctx->expanded_key));
+ ret = aesni_set_decrypt_key(userkey, keysize * 8,
+ ALIGN16(&ctx->expanded_key));
if (ret != 0)
return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED);
@@ -93,9 +91,8 @@ static int aes_setiv(void *_ctx, const void *iv, size_t iv_size)
return 0;
}
-static int
-aes_encrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int aes_encrypt(void *_ctx, const void *src, size_t src_size, void *dst,
+ size_t dst_size)
{
struct aes_ctx *ctx = _ctx;
@@ -110,9 +107,8 @@ aes_encrypt(void *_ctx, const void *src, size_t src_size,
return 0;
}
-static int
-aes_decrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int aes_decrypt(void *_ctx, const void *src, size_t src_size, void *dst,
+ size_t dst_size)
{
struct aes_ctx *ctx = _ctx;
diff --git a/lib/accelerated/x86/aes-cbc-x86-ssse3.c b/lib/accelerated/x86/aes-cbc-x86-ssse3.c
index ff5c128745..633006c1ae 100644
--- a/lib/accelerated/x86/aes-cbc-x86-ssse3.c
+++ b/lib/accelerated/x86/aes-cbc-x86-ssse3.c
@@ -40,13 +40,13 @@ struct aes_ctx {
int enc;
};
-static int
-aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
+static int aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx,
+ int enc)
{
/* we use key size to distinguish */
- if (algorithm != GNUTLS_CIPHER_AES_128_CBC
- && algorithm != GNUTLS_CIPHER_AES_192_CBC
- && algorithm != GNUTLS_CIPHER_AES_256_CBC)
+ if (algorithm != GNUTLS_CIPHER_AES_128_CBC &&
+ algorithm != GNUTLS_CIPHER_AES_192_CBC &&
+ algorithm != GNUTLS_CIPHER_AES_256_CBC)
return GNUTLS_E_INVALID_REQUEST;
*_ctx = gnutls_calloc(1, sizeof(struct aes_ctx));
@@ -60,8 +60,8 @@ aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
return 0;
}
-static int
-aes_ssse3_cipher_setkey(void *_ctx, const void *userkey, size_t keysize)
+static int aes_ssse3_cipher_setkey(void *_ctx, const void *userkey,
+ size_t keysize)
{
struct aes_ctx *ctx = _ctx;
int ret;
@@ -69,13 +69,11 @@ aes_ssse3_cipher_setkey(void *_ctx, const void *userkey, size_t keysize)
CHECK_AES_KEYSIZE(keysize);
if (ctx->enc)
- ret =
- vpaes_set_encrypt_key(userkey, keysize * 8,
- ALIGN16(&ctx->expanded_key));
+ ret = vpaes_set_encrypt_key(userkey, keysize * 8,
+ ALIGN16(&ctx->expanded_key));
else
- ret =
- vpaes_set_decrypt_key(userkey, keysize * 8,
- ALIGN16(&ctx->expanded_key));
+ ret = vpaes_set_decrypt_key(userkey, keysize * 8,
+ ALIGN16(&ctx->expanded_key));
if (ret != 0)
return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED);
@@ -83,9 +81,8 @@ aes_ssse3_cipher_setkey(void *_ctx, const void *userkey, size_t keysize)
return 0;
}
-static int
-aes_ssse3_encrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int aes_ssse3_encrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct aes_ctx *ctx = _ctx;
@@ -100,9 +97,8 @@ aes_ssse3_encrypt(void *_ctx, const void *src, size_t src_size,
return 0;
}
-static int
-aes_ssse3_decrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int aes_ssse3_decrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct aes_ctx *ctx = _ctx;
diff --git a/lib/accelerated/x86/aes-ccm-x86-aesni.c b/lib/accelerated/x86/aes-ccm-x86-aesni.c
index 2c0d6733e4..98e8028c8f 100644
--- a/lib/accelerated/x86/aes-ccm-x86-aesni.c
+++ b/lib/accelerated/x86/aes-ccm-x86-aesni.c
@@ -30,14 +30,14 @@
#ifdef HAVE_LIBNETTLE
-# include <gnutls/crypto.h>
-# include "errors.h"
-# include <aes-x86.h>
-# include <x86-common.h>
-# include <byteswap.h>
-# include <nettle/ccm.h>
-# include <aes-x86.h>
-# include <fips.h>
+#include <gnutls/crypto.h>
+#include "errors.h"
+#include <aes-x86.h>
+#include <x86-common.h>
+#include <byteswap.h>
+#include <nettle/ccm.h>
+#include <aes-x86.h>
+#include <fips.h>
typedef struct ccm_x86_aes_ctx {
AES_KEY key;
@@ -45,15 +45,15 @@ typedef struct ccm_x86_aes_ctx {
/* CCM mode
*/
-static void x86_aes_encrypt(const void *_ctx,
- size_t length, uint8_t * dst, const uint8_t * src)
+static void x86_aes_encrypt(const void *_ctx, size_t length, uint8_t *dst,
+ const uint8_t *src)
{
AES_KEY *ctx = (void *)_ctx;
aesni_ecb_encrypt(src, dst, length, ctx, 1);
}
-static int
-aes_ccm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **ctx, int enc)
+static int aes_ccm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **ctx,
+ int enc)
{
/* we use key size to distinguish */
if (algorithm != GNUTLS_CIPHER_AES_128_CCM &&
@@ -79,13 +79,11 @@ static int aes_ccm_cipher_setkey(void *_ctx, const void *key, size_t length)
return 0;
}
-static int
-aes_ccm_aead_encrypt(void *_ctx,
- const void *nonce, size_t nonce_size,
- const void *auth, size_t auth_size,
- size_t tag_size,
- const void *plain, size_t plain_size,
- void *encr, size_t encr_size)
+static int aes_ccm_aead_encrypt(void *_ctx, const void *nonce,
+ size_t nonce_size, const void *auth,
+ size_t auth_size, size_t tag_size,
+ const void *plain, size_t plain_size,
+ void *encr, size_t encr_size)
{
struct ccm_x86_aes_ctx *ctx = _ctx;
/* proper AEAD cipher */
@@ -117,20 +115,17 @@ aes_ccm_aead_encrypt(void *_ctx,
break;
}
- ccm_encrypt_message(&ctx->key, x86_aes_encrypt,
- nonce_size, nonce,
- auth_size, auth,
- tag_size, plain_size + tag_size, encr, plain);
+ ccm_encrypt_message(&ctx->key, x86_aes_encrypt, nonce_size, nonce,
+ auth_size, auth, tag_size, plain_size + tag_size,
+ encr, plain);
return 0;
}
-static int
-aes_ccm_aead_decrypt(void *_ctx,
- const void *nonce, size_t nonce_size,
- const void *auth, size_t auth_size,
- size_t tag_size,
- const void *encr, size_t encr_size,
- void *plain, size_t plain_size)
+static int aes_ccm_aead_decrypt(void *_ctx, const void *nonce,
+ size_t nonce_size, const void *auth,
+ size_t auth_size, size_t tag_size,
+ const void *encr, size_t encr_size, void *plain,
+ size_t plain_size)
{
struct ccm_x86_aes_ctx *ctx = _ctx;
int ret;
@@ -165,10 +160,9 @@ aes_ccm_aead_decrypt(void *_ctx,
break;
}
- ret = ccm_decrypt_message(&ctx->key, x86_aes_encrypt,
- nonce_size, nonce,
- auth_size, auth,
- tag_size, encr_size - tag_size, plain, encr);
+ ret = ccm_decrypt_message(&ctx->key, x86_aes_encrypt, nonce_size, nonce,
+ auth_size, auth, tag_size,
+ encr_size - tag_size, plain, encr);
if (unlikely(ret == 0))
return gnutls_assert_val(GNUTLS_E_DECRYPTION_FAILED);
diff --git a/lib/accelerated/x86/aes-gcm-aead.h b/lib/accelerated/x86/aes-gcm-aead.h
index 6f9bdb1d36..3041861f5c 100644
--- a/lib/accelerated/x86/aes-gcm-aead.h
+++ b/lib/accelerated/x86/aes-gcm-aead.h
@@ -1,13 +1,10 @@
#ifndef GNUTLS_LIB_ACCELERATED_X86_AES_GCM_AEAD_H
-# define GNUTLS_LIB_ACCELERATED_X86_AES_GCM_AEAD_H
-
-static int
-aes_gcm_aead_encrypt(void *ctx,
- const void *nonce, size_t nonce_size,
- const void *auth, size_t auth_size,
- size_t tag_size,
- const void *plain, size_t plain_size,
- void *encr, size_t encr_size)
+#define GNUTLS_LIB_ACCELERATED_X86_AES_GCM_AEAD_H
+
+static int aes_gcm_aead_encrypt(void *ctx, const void *nonce, size_t nonce_size,
+ const void *auth, size_t auth_size,
+ size_t tag_size, const void *plain,
+ size_t plain_size, void *encr, size_t encr_size)
{
/* proper AEAD cipher */
if (unlikely(encr_size - tag_size < plain_size))
@@ -18,17 +15,15 @@ aes_gcm_aead_encrypt(void *ctx,
aes_gcm_encrypt(ctx, plain, plain_size, encr, encr_size);
- aes_gcm_tag(ctx, ((uint8_t *) encr) + plain_size, tag_size);
+ aes_gcm_tag(ctx, ((uint8_t *)encr) + plain_size, tag_size);
return 0;
}
-static int
-aes_gcm_aead_decrypt(void *ctx,
- const void *nonce, size_t nonce_size,
- const void *auth, size_t auth_size,
- size_t tag_size,
- const void *encr, size_t encr_size,
- void *plain, size_t plain_size)
+static int aes_gcm_aead_decrypt(void *ctx, const void *nonce, size_t nonce_size,
+ const void *auth, size_t auth_size,
+ size_t tag_size, const void *encr,
+ size_t encr_size, void *plain,
+ size_t plain_size)
{
uint8_t tag[MAX_HASH_SIZE];
@@ -46,10 +41,10 @@ aes_gcm_aead_decrypt(void *ctx,
aes_gcm_tag(ctx, tag, tag_size);
- if (gnutls_memcmp(((uint8_t *) encr) + encr_size, tag, tag_size) != 0)
+ if (gnutls_memcmp(((uint8_t *)encr) + encr_size, tag, tag_size) != 0)
return gnutls_assert_val(GNUTLS_E_DECRYPTION_FAILED);
return 0;
}
-#endif /* GNUTLS_LIB_ACCELERATED_X86_AES_GCM_AEAD_H */
+#endif /* GNUTLS_LIB_ACCELERATED_X86_AES_GCM_AEAD_H */
diff --git a/lib/accelerated/x86/aes-gcm-padlock.c b/lib/accelerated/x86/aes-gcm-padlock.c
index a822762592..3ffaa32fc4 100644
--- a/lib/accelerated/x86/aes-gcm-padlock.c
+++ b/lib/accelerated/x86/aes-gcm-padlock.c
@@ -29,28 +29,27 @@
#ifdef HAVE_LIBNETTLE
-# include <gnutls/crypto.h>
-# include "errors.h"
-# include <aes-x86.h>
-# include <x86-common.h>
-# include <byteswap.h>
-# include <nettle/gcm.h>
-# include <aes-padlock.h>
+#include <gnutls/crypto.h>
+#include "errors.h"
+#include <aes-x86.h>
+#include <x86-common.h>
+#include <byteswap.h>
+#include <nettle/gcm.h>
+#include <aes-padlock.h>
-# define GCM_BLOCK_SIZE 16
+#define GCM_BLOCK_SIZE 16
/* GCM mode
* Actually padlock doesn't include GCM mode. We just use
* the ECB part of padlock and nettle for everything else.
*/
struct gcm_padlock_aes_ctx {
- struct GCM_CTX (struct padlock_ctx) inner;
+ struct GCM_CTX(struct padlock_ctx) inner;
size_t rekey_counter;
};
-static void padlock_aes_encrypt(const void *_ctx,
- size_t length, uint8_t * dst,
- const uint8_t * src)
+static void padlock_aes_encrypt(const void *_ctx, size_t length, uint8_t *dst,
+ const uint8_t *src)
{
struct padlock_ctx *ctx = (void *)_ctx;
struct padlock_cipher_data *pce;
@@ -62,7 +61,7 @@ static void padlock_aes_encrypt(const void *_ctx,
}
static void padlock_aes128_set_encrypt_key(struct padlock_ctx *_ctx,
- const uint8_t * key)
+ const uint8_t *key)
{
struct padlock_ctx *ctx = _ctx;
ctx->enc = 1;
@@ -71,7 +70,7 @@ static void padlock_aes128_set_encrypt_key(struct padlock_ctx *_ctx,
}
static void padlock_aes256_set_encrypt_key(struct padlock_ctx *_ctx,
- const uint8_t * key)
+ const uint8_t *key)
{
struct padlock_ctx *ctx = _ctx;
ctx->enc = 1;
@@ -87,8 +86,8 @@ static void aes_gcm_deinit(void *_ctx)
gnutls_free(ctx);
}
-static int
-aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
+static int aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx,
+ int enc)
{
/* we use key size to distinguish */
if (algorithm != GNUTLS_CIPHER_AES_128_GCM &&
@@ -134,9 +133,8 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size)
return 0;
}
-static int
-aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t length)
+static int aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t length)
{
struct gcm_padlock_aes_ctx *ctx = _ctx;
int ret;
@@ -154,9 +152,8 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
return 0;
}
-static int
-aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct gcm_padlock_aes_ctx *ctx = _ctx;
@@ -183,7 +180,7 @@ static void aes_gcm_tag(void *_ctx, void *tag, size_t tagsize)
GCM_DIGEST(&ctx->inner, padlock_aes_encrypt, tagsize, tag);
}
-# include "aes-gcm-aead.h"
+#include "aes-gcm-aead.h"
const gnutls_crypto_cipher_st _gnutls_aes_gcm_padlock = {
.init = aes_gcm_cipher_init,
diff --git a/lib/accelerated/x86/aes-gcm-x86-aesni.c b/lib/accelerated/x86/aes-gcm-x86-aesni.c
index b36a8e452d..ef90b94dae 100644
--- a/lib/accelerated/x86/aes-gcm-x86-aesni.c
+++ b/lib/accelerated/x86/aes-gcm-x86-aesni.c
@@ -30,52 +30,52 @@
#ifdef HAVE_LIBNETTLE
-# include <gnutls/crypto.h>
-# include "errors.h"
-# include <aes-x86.h>
-# include <x86-common.h>
-# include <byteswap.h>
-# include <nettle/gcm.h>
+#include <gnutls/crypto.h>
+#include "errors.h"
+#include <aes-x86.h>
+#include <x86-common.h>
+#include <byteswap.h>
+#include <nettle/gcm.h>
/* GCM mode
* It is used when the CPU doesn't include the PCLMUL instructions.
*/
struct gcm_x86_aes_ctx {
- struct GCM_CTX (AES_KEY) inner;
+ struct GCM_CTX(AES_KEY) inner;
size_t rekey_counter;
};
-static void x86_aes_encrypt(const void *_ctx,
- size_t length, uint8_t * dst, const uint8_t * src)
+static void x86_aes_encrypt(const void *_ctx, size_t length, uint8_t *dst,
+ const uint8_t *src)
{
AES_KEY *ctx = (void *)_ctx;
aesni_ecb_encrypt(src, dst, length, ctx, 1);
}
-static void x86_aes128_set_encrypt_key(void *_ctx, const uint8_t * key)
+static void x86_aes128_set_encrypt_key(void *_ctx, const uint8_t *key)
{
AES_KEY *ctx = _ctx;
aesni_set_encrypt_key(key, 16 * 8, ctx);
}
-static void x86_aes192_set_encrypt_key(void *_ctx, const uint8_t * key)
+static void x86_aes192_set_encrypt_key(void *_ctx, const uint8_t *key)
{
AES_KEY *ctx = _ctx;
aesni_set_encrypt_key(key, 24 * 8, ctx);
}
-static void x86_aes256_set_encrypt_key(void *_ctx, const uint8_t * key)
+static void x86_aes256_set_encrypt_key(void *_ctx, const uint8_t *key)
{
AES_KEY *ctx = _ctx;
aesni_set_encrypt_key(key, 32 * 8, ctx);
}
-static int
-aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
+static int aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx,
+ int enc)
{
/* we use key size to distinguish */
if (algorithm != GNUTLS_CIPHER_AES_128_GCM &&
@@ -125,9 +125,8 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size)
return 0;
}
-static int
-aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t length)
+static int aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t length)
{
struct gcm_x86_aes_ctx *ctx = _ctx;
int ret;
@@ -145,9 +144,8 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
return 0;
}
-static int
-aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct gcm_x86_aes_ctx *ctx = _ctx;
@@ -182,7 +180,7 @@ static void aes_gcm_deinit(void *_ctx)
gnutls_free(ctx);
}
-# include "aes-gcm-aead.h"
+#include "aes-gcm-aead.h"
const gnutls_crypto_cipher_st _gnutls_aes_gcm_x86_aesni = {
.init = aes_gcm_cipher_init,
diff --git a/lib/accelerated/x86/aes-gcm-x86-pclmul-avx.c b/lib/accelerated/x86/aes-gcm-x86-pclmul-avx.c
index c0e6173ce2..fd1689e930 100644
--- a/lib/accelerated/x86/aes-gcm-x86-pclmul-avx.c
+++ b/lib/accelerated/x86/aes-gcm-x86-pclmul-avx.c
@@ -65,7 +65,7 @@ struct aes_gcm_ctx {
};
void gcm_init_avx(u128 Htable[16], const uint64_t Xi[2]);
-void gcm_ghash_avx(uint64_t Xi[2], const u128 Htable[16], const uint8_t * in,
+void gcm_ghash_avx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *in,
size_t len);
void gcm_gmult_avx(uint64_t Xi[2], const u128 Htable[16]);
@@ -77,8 +77,8 @@ static void aes_gcm_deinit(void *_ctx)
gnutls_free(ctx);
}
-static int
-aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
+static int aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx,
+ int enc)
{
/* we use key size to distinguish */
if (algorithm != GNUTLS_CIPHER_AES_128_GCM &&
@@ -95,22 +95,21 @@ aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
return 0;
}
-static int
-aes_gcm_cipher_setkey(void *_ctx, const void *userkey, size_t keysize)
+static int aes_gcm_cipher_setkey(void *_ctx, const void *userkey,
+ size_t keysize)
{
struct aes_gcm_ctx *ctx = _ctx;
int ret;
CHECK_AES_KEYSIZE(keysize);
- ret =
- aesni_set_encrypt_key(userkey, keysize * 8,
- ALIGN16(&ctx->expanded_key));
+ ret = aesni_set_encrypt_key(userkey, keysize * 8,
+ ALIGN16(&ctx->expanded_key));
if (ret != 0)
return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED);
- aesni_ecb_encrypt(ctx->gcm.H.c, ctx->gcm.H.c,
- GCM_BLOCK_SIZE, ALIGN16(&ctx->expanded_key), 1);
+ aesni_ecb_encrypt(ctx->gcm.H.c, ctx->gcm.H.c, GCM_BLOCK_SIZE,
+ ALIGN16(&ctx->expanded_key), 1);
ctx->gcm.H.u[0] = bswap_64(ctx->gcm.H.u[0]);
ctx->gcm.H.u[1] = bswap_64(ctx->gcm.H.u[1]);
@@ -137,8 +136,8 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size)
ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 2] = 0;
ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 1] = 1;
- aesni_ecb_encrypt(ctx->gcm.Yi.c, ctx->gcm.EK0.c,
- GCM_BLOCK_SIZE, ALIGN16(&ctx->expanded_key), 1);
+ aesni_ecb_encrypt(ctx->gcm.Yi.c, ctx->gcm.EK0.c, GCM_BLOCK_SIZE,
+ ALIGN16(&ctx->expanded_key), 1);
ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 1] = 2;
ctx->finished = 0;
ctx->auth_finished = 0;
@@ -146,8 +145,8 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size)
return 0;
}
-static void
-gcm_ghash(struct aes_gcm_ctx *ctx, const uint8_t * src, size_t src_size)
+static void gcm_ghash(struct aes_gcm_ctx *ctx, const uint8_t *src,
+ size_t src_size)
{
size_t rest = src_size % GCM_BLOCK_SIZE;
size_t aligned_size = src_size - rest;
@@ -162,24 +161,21 @@ gcm_ghash(struct aes_gcm_ctx *ctx, const uint8_t * src, size_t src_size)
}
}
-static inline void
-ctr_encrypt_last(struct aes_gcm_ctx *ctx, const uint8_t * src,
- uint8_t * dst, size_t pos, size_t length)
+static inline void ctr_encrypt_last(struct aes_gcm_ctx *ctx, const uint8_t *src,
+ uint8_t *dst, size_t pos, size_t length)
{
uint8_t tmp[GCM_BLOCK_SIZE];
uint8_t out[GCM_BLOCK_SIZE];
memcpy(tmp, &src[pos], length);
- aesni_ctr32_encrypt_blocks(tmp, out, 1,
- ALIGN16(&ctx->expanded_key), ctx->gcm.Yi.c);
+ aesni_ctr32_encrypt_blocks(tmp, out, 1, ALIGN16(&ctx->expanded_key),
+ ctx->gcm.Yi.c);
memcpy(&dst[pos], out, length);
-
}
-static int
-aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t length)
+static int aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t length)
{
struct aes_gcm_ctx *ctx = _ctx;
int blocks = src_size / GCM_BLOCK_SIZE;
@@ -200,8 +196,7 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
}
if (blocks > 0) {
- aesni_ctr32_encrypt_blocks(src, dst,
- blocks,
+ aesni_ctr32_encrypt_blocks(src, dst, blocks,
ALIGN16(&ctx->expanded_key),
ctx->gcm.Yi.c);
@@ -210,7 +205,7 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
_gnutls_write_uint32(counter, ctx->gcm.Yi.c + 12);
}
- if (rest > 0) { /* last incomplete block */
+ if (rest > 0) { /* last incomplete block */
ctr_encrypt_last(ctx, src, dst, exp_blocks, rest);
ctx->finished = 1;
}
@@ -221,9 +216,8 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
return 0;
}
-static int
-aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct aes_gcm_ctx *ctx = _ctx;
int blocks = src_size / GCM_BLOCK_SIZE;
@@ -241,8 +235,7 @@ aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
ctx->gcm.len.u[1] += src_size;
if (blocks > 0) {
- aesni_ctr32_encrypt_blocks(src, dst,
- blocks,
+ aesni_ctr32_encrypt_blocks(src, dst, blocks,
ALIGN16(&ctx->expanded_key),
ctx->gcm.Yi.c);
@@ -251,7 +244,7 @@ aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
_gnutls_write_uint32(counter, ctx->gcm.Yi.c + 12);
}
- if (rest > 0) { /* last incomplete block */
+ if (rest > 0) { /* last incomplete block */
ctr_encrypt_last(ctx, src, dst, exp_blocks, rest);
ctx->finished = 1;
}
@@ -297,13 +290,11 @@ static void aes_gcm_tag(void *_ctx, void *tag, size_t tagsize)
#ifdef ASM_X86_64
/* requires AVX */
-static int
-aesni_gcm_aead_encrypt(void *_ctx,
- const void *nonce, size_t nonce_size,
- const void *auth, size_t auth_size,
- size_t tag_size,
- const void *plain, size_t plain_size,
- void *encr, size_t encr_size)
+static int aesni_gcm_aead_encrypt(void *_ctx, const void *nonce,
+ size_t nonce_size, const void *auth,
+ size_t auth_size, size_t tag_size,
+ const void *plain, size_t plain_size,
+ void *encr, size_t encr_size)
{
struct aes_gcm_ctx *ctx = _ctx;
size_t s = 0;
@@ -322,21 +313,19 @@ aesni_gcm_aead_encrypt(void *_ctx,
}
if ((plain_size - s) > 0)
- aes_gcm_encrypt(ctx, ((uint8_t *) plain) + s, plain_size - s,
- ((uint8_t *) encr) + s, encr_size - s);
+ aes_gcm_encrypt(ctx, ((uint8_t *)plain) + s, plain_size - s,
+ ((uint8_t *)encr) + s, encr_size - s);
- aes_gcm_tag(ctx, ((uint8_t *) encr) + plain_size, tag_size);
+ aes_gcm_tag(ctx, ((uint8_t *)encr) + plain_size, tag_size);
return 0;
}
-static int
-aesni_gcm_aead_decrypt(void *_ctx,
- const void *nonce, size_t nonce_size,
- const void *auth, size_t auth_size,
- size_t tag_size,
- const void *encr, size_t encr_size,
- void *plain, size_t plain_size)
+static int aesni_gcm_aead_decrypt(void *_ctx, const void *nonce,
+ size_t nonce_size, const void *auth,
+ size_t auth_size, size_t tag_size,
+ const void *encr, size_t encr_size,
+ void *plain, size_t plain_size)
{
struct aes_gcm_ctx *ctx = _ctx;
uint8_t tag[MAX_HASH_SIZE];
@@ -361,21 +350,21 @@ aesni_gcm_aead_decrypt(void *_ctx,
}
if ((encr_size - s) > 0) {
- aes_gcm_decrypt(ctx, ((uint8_t *) encr) + s, encr_size - s,
- ((uint8_t *) plain) + s, plain_size - s);
+ aes_gcm_decrypt(ctx, ((uint8_t *)encr) + s, encr_size - s,
+ ((uint8_t *)plain) + s, plain_size - s);
}
aes_gcm_tag(ctx, tag, tag_size);
- if (gnutls_memcmp(((uint8_t *) encr) + encr_size, tag, tag_size) != 0)
+ if (gnutls_memcmp(((uint8_t *)encr) + encr_size, tag, tag_size) != 0)
return gnutls_assert_val(GNUTLS_E_DECRYPTION_FAILED);
return 0;
}
#else
-# define aesni_gcm_aead_decrypt aes_gcm_aead_decrypt
-# define aesni_gcm_aead_encrypt aes_gcm_aead_encrypt
-# include "aes-gcm-aead.h"
+#define aesni_gcm_aead_decrypt aes_gcm_aead_decrypt
+#define aesni_gcm_aead_encrypt aes_gcm_aead_encrypt
+#include "aes-gcm-aead.h"
#endif
const gnutls_crypto_cipher_st _gnutls_aes_gcm_pclmul_avx = {
diff --git a/lib/accelerated/x86/aes-gcm-x86-pclmul.c b/lib/accelerated/x86/aes-gcm-x86-pclmul.c
index 8c8901eb82..596332c437 100644
--- a/lib/accelerated/x86/aes-gcm-x86-pclmul.c
+++ b/lib/accelerated/x86/aes-gcm-x86-pclmul.c
@@ -64,8 +64,8 @@ struct aes_gcm_ctx {
};
void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]);
-void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16],
- const uint8_t * inp, size_t len);
+void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
+ size_t len);
void gcm_gmult_clmul(uint64_t Xi[2], const u128 Htable[16]);
static void aes_gcm_deinit(void *_ctx)
@@ -76,8 +76,8 @@ static void aes_gcm_deinit(void *_ctx)
gnutls_free(ctx);
}
-static int
-aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
+static int aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx,
+ int enc)
{
/* we use key size to distinguish */
if (algorithm != GNUTLS_CIPHER_AES_128_GCM &&
@@ -94,22 +94,21 @@ aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
return 0;
}
-static int
-aes_gcm_cipher_setkey(void *_ctx, const void *userkey, size_t keysize)
+static int aes_gcm_cipher_setkey(void *_ctx, const void *userkey,
+ size_t keysize)
{
struct aes_gcm_ctx *ctx = _ctx;
int ret;
CHECK_AES_KEYSIZE(keysize);
- ret =
- aesni_set_encrypt_key(userkey, keysize * 8,
- ALIGN16(&ctx->expanded_key));
+ ret = aesni_set_encrypt_key(userkey, keysize * 8,
+ ALIGN16(&ctx->expanded_key));
if (ret != 0)
return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED);
- aesni_ecb_encrypt(ctx->gcm.H.c, ctx->gcm.H.c,
- GCM_BLOCK_SIZE, ALIGN16(&ctx->expanded_key), 1);
+ aesni_ecb_encrypt(ctx->gcm.H.c, ctx->gcm.H.c, GCM_BLOCK_SIZE,
+ ALIGN16(&ctx->expanded_key), 1);
ctx->gcm.H.u[0] = bswap_64(ctx->gcm.H.u[0]);
ctx->gcm.H.u[1] = bswap_64(ctx->gcm.H.u[1]);
@@ -136,8 +135,8 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size)
ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 2] = 0;
ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 1] = 1;
- aesni_ecb_encrypt(ctx->gcm.Yi.c, ctx->gcm.EK0.c,
- GCM_BLOCK_SIZE, ALIGN16(&ctx->expanded_key), 1);
+ aesni_ecb_encrypt(ctx->gcm.Yi.c, ctx->gcm.EK0.c, GCM_BLOCK_SIZE,
+ ALIGN16(&ctx->expanded_key), 1);
ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 1] = 2;
ctx->finished = 0;
ctx->auth_finished = 0;
@@ -145,8 +144,8 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size)
return 0;
}
-static void
-gcm_ghash(struct aes_gcm_ctx *ctx, const uint8_t * src, size_t src_size)
+static void gcm_ghash(struct aes_gcm_ctx *ctx, const uint8_t *src,
+ size_t src_size)
{
size_t rest = src_size % GCM_BLOCK_SIZE;
size_t aligned_size = src_size - rest;
@@ -161,24 +160,21 @@ gcm_ghash(struct aes_gcm_ctx *ctx, const uint8_t * src, size_t src_size)
}
}
-static inline void
-ctr_encrypt_last(struct aes_gcm_ctx *ctx, const uint8_t * src,
- uint8_t * dst, size_t pos, size_t length)
+static inline void ctr_encrypt_last(struct aes_gcm_ctx *ctx, const uint8_t *src,
+ uint8_t *dst, size_t pos, size_t length)
{
uint8_t tmp[GCM_BLOCK_SIZE];
uint8_t out[GCM_BLOCK_SIZE];
memcpy(tmp, &src[pos], length);
- aesni_ctr32_encrypt_blocks(tmp, out, 1,
- ALIGN16(&ctx->expanded_key), ctx->gcm.Yi.c);
+ aesni_ctr32_encrypt_blocks(tmp, out, 1, ALIGN16(&ctx->expanded_key),
+ ctx->gcm.Yi.c);
memcpy(&dst[pos], out, length);
-
}
-static int
-aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t length)
+static int aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t length)
{
struct aes_gcm_ctx *ctx = _ctx;
int blocks = src_size / GCM_BLOCK_SIZE;
@@ -199,8 +195,7 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
}
if (blocks > 0) {
- aesni_ctr32_encrypt_blocks(src, dst,
- blocks,
+ aesni_ctr32_encrypt_blocks(src, dst, blocks,
ALIGN16(&ctx->expanded_key),
ctx->gcm.Yi.c);
@@ -209,7 +204,7 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
_gnutls_write_uint32(counter, ctx->gcm.Yi.c + 12);
}
- if (rest > 0) { /* last incomplete block */
+ if (rest > 0) { /* last incomplete block */
ctr_encrypt_last(ctx, src, dst, exp_blocks, rest);
ctx->finished = 1;
}
@@ -220,9 +215,8 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
return 0;
}
-static int
-aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct aes_gcm_ctx *ctx = _ctx;
int blocks = src_size / GCM_BLOCK_SIZE;
@@ -240,8 +234,7 @@ aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
ctx->gcm.len.u[1] += src_size;
if (blocks > 0) {
- aesni_ctr32_encrypt_blocks(src, dst,
- blocks,
+ aesni_ctr32_encrypt_blocks(src, dst, blocks,
ALIGN16(&ctx->expanded_key),
ctx->gcm.Yi.c);
@@ -250,7 +243,7 @@ aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
_gnutls_write_uint32(counter, ctx->gcm.Yi.c + 12);
}
- if (rest > 0) { /* last incomplete block */
+ if (rest > 0) { /* last incomplete block */
ctr_encrypt_last(ctx, src, dst, exp_blocks, rest);
ctx->finished = 1;
}
diff --git a/lib/accelerated/x86/aes-gcm-x86-ssse3.c b/lib/accelerated/x86/aes-gcm-x86-ssse3.c
index b3aad36c8a..eee6ee944e 100644
--- a/lib/accelerated/x86/aes-gcm-x86-ssse3.c
+++ b/lib/accelerated/x86/aes-gcm-x86-ssse3.c
@@ -30,24 +30,24 @@
#ifdef HAVE_LIBNETTLE
-# include <gnutls/crypto.h>
-# include "errors.h"
-# include <aes-x86.h>
-# include <x86-common.h>
-# include <byteswap.h>
-# include <nettle/gcm.h>
-# include <assert.h>
+#include <gnutls/crypto.h>
+#include "errors.h"
+#include <aes-x86.h>
+#include <x86-common.h>
+#include <byteswap.h>
+#include <nettle/gcm.h>
+#include <assert.h>
/* GCM mode
* It is used when the CPU doesn't include the PCLMUL instructions.
*/
struct gcm_x86_aes_ctx {
- struct GCM_CTX (AES_KEY) inner;
+ struct GCM_CTX(AES_KEY) inner;
size_t rekey_counter;
};
-static void x86_aes_encrypt(const void *_ctx,
- size_t length, uint8_t * dst, const uint8_t * src)
+static void x86_aes_encrypt(const void *_ctx, size_t length, uint8_t *dst,
+ const uint8_t *src)
{
AES_KEY *ctx = (void *)_ctx;
unsigned i;
@@ -62,29 +62,29 @@ static void x86_aes_encrypt(const void *_ctx,
}
}
-static void x86_aes_128_set_encrypt_key(void *_ctx, const uint8_t * key)
+static void x86_aes_128_set_encrypt_key(void *_ctx, const uint8_t *key)
{
AES_KEY *ctx = _ctx;
vpaes_set_encrypt_key(key, 16 * 8, ctx);
}
-static void x86_aes_192_set_encrypt_key(void *_ctx, const uint8_t * key)
+static void x86_aes_192_set_encrypt_key(void *_ctx, const uint8_t *key)
{
AES_KEY *ctx = _ctx;
vpaes_set_encrypt_key(key, 24 * 8, ctx);
}
-static void x86_aes_256_set_encrypt_key(void *_ctx, const uint8_t * key)
+static void x86_aes_256_set_encrypt_key(void *_ctx, const uint8_t *key)
{
AES_KEY *ctx = _ctx;
vpaes_set_encrypt_key(key, 32 * 8, ctx);
}
-static int
-aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
+static int aes_gcm_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx,
+ int enc)
{
/* we use key size to distinguish */
if (algorithm != GNUTLS_CIPHER_AES_128_GCM &&
@@ -134,9 +134,8 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size)
return 0;
}
-static int
-aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t length)
+static int aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t length)
{
struct gcm_x86_aes_ctx *ctx = _ctx;
int ret;
@@ -154,9 +153,8 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size,
return 0;
}
-static int
-aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct gcm_x86_aes_ctx *ctx = _ctx;
@@ -191,7 +189,7 @@ static void aes_gcm_deinit(void *_ctx)
gnutls_free(ctx);
}
-# include "aes-gcm-aead.h"
+#include "aes-gcm-aead.h"
const gnutls_crypto_cipher_st _gnutls_aes_gcm_x86_ssse3 = {
.init = aes_gcm_cipher_init,
diff --git a/lib/accelerated/x86/aes-padlock.c b/lib/accelerated/x86/aes-padlock.c
index 6e1e755fa2..518d8bd4a8 100644
--- a/lib/accelerated/x86/aes-padlock.c
+++ b/lib/accelerated/x86/aes-padlock.c
@@ -32,17 +32,17 @@
#include "errors.h"
#include <aes-x86.h>
#include <x86-common.h>
-#include <nettle/aes.h> /* for key generation in 192 and 256 bits */
+#include <nettle/aes.h> /* for key generation in 192 and 256 bits */
#include <sha-padlock.h>
#include <aes-padlock.h>
-static int
-aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
+static int aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx,
+ int enc)
{
/* we use key size to distinguish */
- if (algorithm != GNUTLS_CIPHER_AES_128_CBC
- && algorithm != GNUTLS_CIPHER_AES_256_CBC
- && algorithm != GNUTLS_CIPHER_AES_192_CBC)
+ if (algorithm != GNUTLS_CIPHER_AES_128_CBC &&
+ algorithm != GNUTLS_CIPHER_AES_256_CBC &&
+ algorithm != GNUTLS_CIPHER_AES_192_CBC)
return GNUTLS_E_INVALID_REQUEST;
*_ctx = gnutls_calloc(1, sizeof(struct padlock_ctx));
@@ -125,9 +125,8 @@ static int aes_setiv(void *_ctx, const void *iv, size_t iv_size)
return 0;
}
-static int
-padlock_aes_cbc_encrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int padlock_aes_cbc_encrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct padlock_ctx *ctx = _ctx;
struct padlock_cipher_data *pce;
@@ -144,9 +143,8 @@ padlock_aes_cbc_encrypt(void *_ctx, const void *src, size_t src_size,
return ret ? 0 : GNUTLS_E_ENCRYPTION_FAILED;
}
-static int
-padlock_aes_cbc_decrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int padlock_aes_cbc_decrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct padlock_ctx *ctx = _ctx;
struct padlock_cipher_data *pcd;
diff --git a/lib/accelerated/x86/aes-padlock.h b/lib/accelerated/x86/aes-padlock.h
index a33391ef19..0b638e2127 100644
--- a/lib/accelerated/x86/aes-padlock.h
+++ b/lib/accelerated/x86/aes-padlock.h
@@ -1,25 +1,25 @@
#ifndef GNUTLS_LIB_ACCELERATED_X86_AES_PADLOCK_H
-# define GNUTLS_LIB_ACCELERATED_X86_AES_PADLOCK_H
+#define GNUTLS_LIB_ACCELERATED_X86_AES_PADLOCK_H
-# include "gnutls_int.h"
-# include <aes-x86.h>
+#include "gnutls_int.h"
+#include <aes-x86.h>
struct padlock_cipher_data {
- unsigned char iv[16]; /* Initialization vector */
+ unsigned char iv[16]; /* Initialization vector */
union {
unsigned int pad[4];
struct {
- unsigned rounds:4;
- unsigned dgst:1; /* n/a in C3 */
- unsigned align:1; /* n/a in C3 */
- unsigned ciphr:1; /* n/a in C3 */
- unsigned int keygen:1;
- unsigned interm:1;
- unsigned int encdec:1;
- unsigned ksize:2;
+ unsigned rounds : 4;
+ unsigned dgst : 1; /* n/a in C3 */
+ unsigned align : 1; /* n/a in C3 */
+ unsigned ciphr : 1; /* n/a in C3 */
+ unsigned int keygen : 1;
+ unsigned interm : 1;
+ unsigned int encdec : 1;
+ unsigned ksize : 2;
} b;
- } cword; /* Control word */
- AES_KEY ks; /* Encryption key */
+ } cword; /* Control word */
+ AES_KEY ks; /* Encryption key */
};
struct padlock_ctx {
@@ -42,4 +42,4 @@ int padlock_ecb_encrypt(void *out, const void *inp,
struct padlock_cipher_data *ctx, size_t len);
int padlock_cbc_encrypt(void *out, const void *inp,
struct padlock_cipher_data *ctx, size_t len);
-#endif /* GNUTLS_LIB_ACCELERATED_X86_AES_PADLOCK_H */
+#endif /* GNUTLS_LIB_ACCELERATED_X86_AES_PADLOCK_H */
diff --git a/lib/accelerated/x86/aes-x86.h b/lib/accelerated/x86/aes-x86.h
index b9e6dc5b2a..c6bc1cb399 100644
--- a/lib/accelerated/x86/aes-x86.h
+++ b/lib/accelerated/x86/aes-x86.h
@@ -1,15 +1,15 @@
#ifndef GNUTLS_LIB_ACCELERATED_X86_AES_X86_H
-# define GNUTLS_LIB_ACCELERATED_X86_AES_X86_H
+#define GNUTLS_LIB_ACCELERATED_X86_AES_X86_H
-# include "gnutls_int.h"
+#include "gnutls_int.h"
void register_x86_crypto(void);
-# define ALIGN16(x) \
- ((void *)(((ptrdiff_t)(x)+(ptrdiff_t)0x0f)&~((ptrdiff_t)0x0f)))
+#define ALIGN16(x) \
+ ((void *)(((ptrdiff_t)(x) + (ptrdiff_t)0x0f) & ~((ptrdiff_t)0x0f)))
-# define AES_KEY_ALIGN_SIZE 4
-# define AES_MAXNR 14
+#define AES_KEY_ALIGN_SIZE 4
+#define AES_MAXNR 14
typedef struct {
/* We add few more integers to allow alignment
* on a 16-byte boundary.
@@ -18,12 +18,12 @@ typedef struct {
uint32_t rounds;
} AES_KEY;
-# define CHECK_AES_KEYSIZE(s) \
+#define CHECK_AES_KEYSIZE(s) \
if (s != 16 && s != 24 && s != 32) \
- return GNUTLS_E_INVALID_REQUEST
+ return GNUTLS_E_INVALID_REQUEST
-# include <intprops.h>
-# define AES_GCM_ENCRYPT_MAX_BYTES ((1ULL << 36) - 32)
+#include <intprops.h>
+#define AES_GCM_ENCRYPT_MAX_BYTES ((1ULL << 36) - 32)
static inline int record_aes_gcm_encrypt_size(size_t *counter, size_t size)
{
size_t sum;
@@ -37,49 +37,45 @@ static inline int record_aes_gcm_encrypt_size(size_t *counter, size_t size)
return 0;
}
-void aesni_ecb_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const AES_KEY * key, int enc);
+void aesni_ecb_encrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const AES_KEY *key, int enc);
-void aesni_cbc_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const AES_KEY * key,
- unsigned char *ivec, const int enc);
+void aesni_cbc_encrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const AES_KEY *key, unsigned char *ivec, const int enc);
int aesni_set_decrypt_key(const unsigned char *userKey, const int bits,
- AES_KEY * key);
+ AES_KEY *key);
int aesni_set_encrypt_key(const unsigned char *userKey, const int bits,
- AES_KEY * key);
+ AES_KEY *key);
-void aesni_ctr32_encrypt_blocks(const unsigned char *in,
- unsigned char *out,
- size_t blocks,
- const void *key, const unsigned char *ivec);
+void aesni_ctr32_encrypt_blocks(const unsigned char *in, unsigned char *out,
+ size_t blocks, const void *key,
+ const unsigned char *ivec);
size_t aesni_gcm_encrypt(const void *inp, void *out, size_t len,
- const AES_KEY * key, const unsigned char iv[16],
- uint64_t * Xi);
+ const AES_KEY *key, const unsigned char iv[16],
+ uint64_t *Xi);
size_t aesni_gcm_decrypt(const void *inp, void *out, size_t len,
- const AES_KEY * key, const unsigned char iv[16],
- uint64_t * Xi);
+ const AES_KEY *key, const unsigned char iv[16],
+ uint64_t *Xi);
-void aesni_xts_encrypt(const unsigned char *in, unsigned char *out,
- size_t len, const AES_KEY * key, const AES_KEY * key2,
+void aesni_xts_encrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const AES_KEY *key, const AES_KEY *key2,
const unsigned char iv[16]);
-void aesni_xts_decrypt(const unsigned char *in, unsigned char *out,
- size_t len, const AES_KEY * key, const AES_KEY * key2,
+void aesni_xts_decrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const AES_KEY *key, const AES_KEY *key2,
const unsigned char iv[16]);
-int vpaes_set_encrypt_key(const unsigned char *userKey, int bits,
- AES_KEY * key);
-int vpaes_set_decrypt_key(const unsigned char *userKey, int bits,
- AES_KEY * key);
+int vpaes_set_encrypt_key(const unsigned char *userKey, int bits, AES_KEY *key);
+int vpaes_set_decrypt_key(const unsigned char *userKey, int bits, AES_KEY *key);
void vpaes_cbc_encrypt(const unsigned char *in, unsigned char *out,
- size_t length, const AES_KEY * key, unsigned char *ivec,
+ size_t length, const AES_KEY *key, unsigned char *ivec,
int enc);
void vpaes_encrypt(const unsigned char *in, unsigned char *out,
- const AES_KEY * key);
+ const AES_KEY *key);
void vpaes_decrypt(const unsigned char *in, unsigned char *out,
- const AES_KEY * key);
+ const AES_KEY *key);
extern const gnutls_crypto_cipher_st _gnutls_aes_gcm_pclmul;
extern const gnutls_crypto_cipher_st _gnutls_aes_gcm_pclmul_avx;
@@ -92,4 +88,4 @@ extern const gnutls_crypto_cipher_st _gnutls_aes_ssse3;
extern const gnutls_crypto_cipher_st _gnutls_aesni_x86;
-#endif /* GNUTLS_LIB_ACCELERATED_X86_AES_X86_H */
+#endif /* GNUTLS_LIB_ACCELERATED_X86_AES_X86_H */
diff --git a/lib/accelerated/x86/aes-xts-x86-aesni.c b/lib/accelerated/x86/aes-xts-x86-aesni.c
index 52761068c3..0f02f1b3ed 100644
--- a/lib/accelerated/x86/aes-xts-x86-aesni.c
+++ b/lib/accelerated/x86/aes-xts-x86-aesni.c
@@ -40,9 +40,8 @@ struct x86_aes_xts_ctx {
int enc;
};
-static int
-x86_aes_xts_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx,
- int enc)
+static int x86_aes_xts_cipher_init(gnutls_cipher_algorithm_t algorithm,
+ void **_ctx, int enc)
{
if (algorithm != GNUTLS_CIPHER_AES_128_XTS &&
algorithm != GNUTLS_CIPHER_AES_256_XTS)
@@ -59,8 +58,8 @@ x86_aes_xts_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx,
return 0;
}
-static int
-x86_aes_xts_cipher_setkey(void *_ctx, const void *userkey, size_t keysize)
+static int x86_aes_xts_cipher_setkey(void *_ctx, const void *userkey,
+ size_t keysize)
{
struct x86_aes_xts_ctx *ctx = _ctx;
int ret;
@@ -81,20 +80,17 @@ x86_aes_xts_cipher_setkey(void *_ctx, const void *userkey, size_t keysize)
keybits = keysize * 4;
if (ctx->enc)
- ret =
- aesni_set_encrypt_key(key, keybits,
- ALIGN16(&ctx->block_key));
+ ret = aesni_set_encrypt_key(key, keybits,
+ ALIGN16(&ctx->block_key));
else
- ret =
- aesni_set_decrypt_key(key, keybits,
- ALIGN16(&ctx->block_key));
+ ret = aesni_set_decrypt_key(key, keybits,
+ ALIGN16(&ctx->block_key));
if (ret != 0)
return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED);
- ret =
- aesni_set_encrypt_key(key + (keysize / 2), keybits,
- ALIGN16(&ctx->tweak_key));
+ ret = aesni_set_encrypt_key(key + (keysize / 2), keybits,
+ ALIGN16(&ctx->tweak_key));
if (ret != 0)
return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED);
@@ -112,9 +108,8 @@ static int x86_aes_xts_setiv(void *_ctx, const void *iv, size_t iv_size)
return 0;
}
-static int
-x86_aes_xts_encrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int x86_aes_xts_encrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct x86_aes_xts_ctx *ctx = _ctx;
@@ -129,9 +124,8 @@ x86_aes_xts_encrypt(void *_ctx, const void *src, size_t src_size,
return 0;
}
-static int
-x86_aes_xts_decrypt(void *_ctx, const void *src, size_t src_size,
- void *dst, size_t dst_size)
+static int x86_aes_xts_decrypt(void *_ctx, const void *src, size_t src_size,
+ void *dst, size_t dst_size)
{
struct x86_aes_xts_ctx *ctx = _ctx;
diff --git a/lib/accelerated/x86/hmac-padlock.c b/lib/accelerated/x86/hmac-padlock.c
index 8913166f34..5dd3f450dc 100644
--- a/lib/accelerated/x86/hmac-padlock.c
+++ b/lib/accelerated/x86/hmac-padlock.c
@@ -37,9 +37,9 @@
#ifdef HAVE_LIBNETTLE
-# define IPAD 0x36
-# define OPAD 0x5c
-# define MAX_SHA_DIGEST_SIZE (512/8)
+#define IPAD 0x36
+#define OPAD 0x5c
+#define MAX_SHA_DIGEST_SIZE (512 / 8)
typedef void (*update_func)(void *, size_t, const uint8_t *);
typedef void (*digest_func)(void *, size_t, uint8_t *);
@@ -62,133 +62,120 @@ struct padlock_hmac_ctx {
set_key_func setkey;
};
-static void
-padlock_hmac_sha1_set_key(struct hmac_sha1_ctx *ctx,
- size_t key_length, const uint8_t * key)
+static void padlock_hmac_sha1_set_key(struct hmac_sha1_ctx *ctx,
+ size_t key_length, const uint8_t *key)
{
HMAC_SET_KEY(ctx, &padlock_sha1, key_length, key);
}
-static void
-padlock_hmac_sha1_update(struct hmac_sha1_ctx *ctx,
- size_t length, const uint8_t * data)
+static void padlock_hmac_sha1_update(struct hmac_sha1_ctx *ctx, size_t length,
+ const uint8_t *data)
{
padlock_sha1_update(&ctx->state, length, data);
}
-static void
-padlock_hmac_sha1_digest(struct hmac_sha1_ctx *ctx,
- size_t length, uint8_t * digest)
+static void padlock_hmac_sha1_digest(struct hmac_sha1_ctx *ctx, size_t length,
+ uint8_t *digest)
{
HMAC_DIGEST(ctx, &padlock_sha1, length, digest);
}
-static void
-padlock_hmac_sha256_set_key(struct hmac_sha256_ctx *ctx,
- size_t key_length, const uint8_t * key)
+static void padlock_hmac_sha256_set_key(struct hmac_sha256_ctx *ctx,
+ size_t key_length, const uint8_t *key)
{
HMAC_SET_KEY(ctx, &padlock_sha256, key_length, key);
}
-static void
-padlock_hmac_sha256_update(struct hmac_sha256_ctx *ctx,
- size_t length, const uint8_t * data)
+static void padlock_hmac_sha256_update(struct hmac_sha256_ctx *ctx,
+ size_t length, const uint8_t *data)
{
padlock_sha256_update(&ctx->state, length, data);
}
-static void
-padlock_hmac_sha256_digest(struct hmac_sha256_ctx *ctx,
- size_t length, uint8_t * digest)
+static void padlock_hmac_sha256_digest(struct hmac_sha256_ctx *ctx,
+ size_t length, uint8_t *digest)
{
HMAC_DIGEST(ctx, &padlock_sha256, length, digest);
}
-static void
-padlock_hmac_sha224_set_key(struct hmac_sha224_ctx *ctx,
- size_t key_length, const uint8_t * key)
+static void padlock_hmac_sha224_set_key(struct hmac_sha224_ctx *ctx,
+ size_t key_length, const uint8_t *key)
{
HMAC_SET_KEY(ctx, &padlock_sha224, key_length, key);
}
-static void
-padlock_hmac_sha224_digest(struct hmac_sha224_ctx *ctx,
- size_t length, uint8_t * digest)
+static void padlock_hmac_sha224_digest(struct hmac_sha224_ctx *ctx,
+ size_t length, uint8_t *digest)
{
HMAC_DIGEST(ctx, &padlock_sha224, length, digest);
}
-static void
-padlock_hmac_sha384_set_key(struct hmac_sha384_ctx *ctx,
- size_t key_length, const uint8_t * key)
+static void padlock_hmac_sha384_set_key(struct hmac_sha384_ctx *ctx,
+ size_t key_length, const uint8_t *key)
{
HMAC_SET_KEY(ctx, &padlock_sha384, key_length, key);
}
-static void
-padlock_hmac_sha384_digest(struct hmac_sha384_ctx *ctx,
- size_t length, uint8_t * digest)
+static void padlock_hmac_sha384_digest(struct hmac_sha384_ctx *ctx,
+ size_t length, uint8_t *digest)
{
HMAC_DIGEST(ctx, &padlock_sha384, length, digest);
}
-static void
-padlock_hmac_sha512_set_key(struct hmac_sha512_ctx *ctx,
- size_t key_length, const uint8_t * key)
+static void padlock_hmac_sha512_set_key(struct hmac_sha512_ctx *ctx,
+ size_t key_length, const uint8_t *key)
{
HMAC_SET_KEY(ctx, &padlock_sha512, key_length, key);
}
-static void
-padlock_hmac_sha512_update(struct hmac_sha512_ctx *ctx,
- size_t length, const uint8_t * data)
+static void padlock_hmac_sha512_update(struct hmac_sha512_ctx *ctx,
+ size_t length, const uint8_t *data)
{
padlock_sha512_update(&ctx->state, length, data);
}
-static void
-padlock_hmac_sha512_digest(struct hmac_sha512_ctx *ctx,
- size_t length, uint8_t * digest)
+static void padlock_hmac_sha512_digest(struct hmac_sha512_ctx *ctx,
+ size_t length, uint8_t *digest)
{
HMAC_DIGEST(ctx, &padlock_sha512, length, digest);
}
-static int
-_hmac_ctx_init(gnutls_mac_algorithm_t algo, struct padlock_hmac_ctx *ctx)
+static int _hmac_ctx_init(gnutls_mac_algorithm_t algo,
+ struct padlock_hmac_ctx *ctx)
{
switch (algo) {
case GNUTLS_MAC_SHA1:
- ctx->update = (update_func) padlock_hmac_sha1_update;
- ctx->digest = (digest_func) padlock_hmac_sha1_digest;
- ctx->setkey = (set_key_func) padlock_hmac_sha1_set_key;
+ ctx->update = (update_func)padlock_hmac_sha1_update;
+ ctx->digest = (digest_func)padlock_hmac_sha1_digest;
+ ctx->setkey = (set_key_func)padlock_hmac_sha1_set_key;
ctx->ctx_ptr = &ctx->ctx.sha1;
ctx->length = SHA1_DIGEST_SIZE;
break;
case GNUTLS_MAC_SHA224:
- ctx->update = (update_func) padlock_hmac_sha256_update;
- ctx->digest = (digest_func) padlock_hmac_sha224_digest;
- ctx->setkey = (set_key_func) padlock_hmac_sha224_set_key;
+ ctx->update = (update_func)padlock_hmac_sha256_update;
+ ctx->digest = (digest_func)padlock_hmac_sha224_digest;
+ ctx->setkey = (set_key_func)padlock_hmac_sha224_set_key;
ctx->ctx_ptr = &ctx->ctx.sha224;
ctx->length = SHA224_DIGEST_SIZE;
break;
case GNUTLS_MAC_SHA256:
- ctx->update = (update_func) padlock_hmac_sha256_update;
- ctx->digest = (digest_func) padlock_hmac_sha256_digest;
- ctx->setkey = (set_key_func) padlock_hmac_sha256_set_key;
+ ctx->update = (update_func)padlock_hmac_sha256_update;
+ ctx->digest = (digest_func)padlock_hmac_sha256_digest;
+ ctx->setkey = (set_key_func)padlock_hmac_sha256_set_key;
ctx->ctx_ptr = &ctx->ctx.sha256;
ctx->length = SHA256_DIGEST_SIZE;
break;
case GNUTLS_MAC_SHA384:
- ctx->update = (update_func) padlock_hmac_sha512_update;
- ctx->digest = (digest_func) padlock_hmac_sha384_digest;
- ctx->setkey = (set_key_func) padlock_hmac_sha384_set_key;
+ ctx->update = (update_func)padlock_hmac_sha512_update;
+ ctx->digest = (digest_func)padlock_hmac_sha384_digest;
+ ctx->setkey = (set_key_func)padlock_hmac_sha384_set_key;
ctx->ctx_ptr = &ctx->ctx.sha384;
ctx->length = SHA384_DIGEST_SIZE;
break;
case GNUTLS_MAC_SHA512:
- ctx->update = (update_func) padlock_hmac_sha512_update;
- ctx->digest = (digest_func) padlock_hmac_sha512_digest;
- ctx->setkey = (set_key_func) padlock_hmac_sha512_set_key;
+ ctx->update = (update_func)padlock_hmac_sha512_update;
+ ctx->digest = (digest_func)padlock_hmac_sha512_digest;
+ ctx->setkey = (set_key_func)padlock_hmac_sha512_set_key;
ctx->ctx_ptr = &ctx->ctx.sha512;
ctx->length = SHA512_DIGEST_SIZE;
break;
@@ -226,7 +213,7 @@ static void *wrap_padlock_hmac_copy(const void *_ctx)
{
struct padlock_hmac_ctx *new_ctx;
const struct padlock_hmac_ctx *ctx = _ctx;
- ptrdiff_t off = (uint8_t *) ctx->ctx_ptr - (uint8_t *) (&ctx->ctx);
+ ptrdiff_t off = (uint8_t *)ctx->ctx_ptr - (uint8_t *)(&ctx->ctx);
new_ctx = gnutls_malloc(sizeof(struct padlock_hmac_ctx));
if (new_ctx == NULL) {
@@ -235,7 +222,7 @@ static void *wrap_padlock_hmac_copy(const void *_ctx)
}
memcpy(new_ctx, ctx, sizeof(*new_ctx));
- new_ctx->ctx_ptr = (uint8_t *) & new_ctx->ctx + off;
+ new_ctx->ctx_ptr = (uint8_t *)&new_ctx->ctx + off;
return new_ctx;
}
@@ -249,8 +236,8 @@ static int wrap_padlock_hmac_setkey(void *_ctx, const void *key, size_t keylen)
return GNUTLS_E_SUCCESS;
}
-static int
-wrap_padlock_hmac_update(void *_ctx, const void *text, size_t textsize)
+static int wrap_padlock_hmac_update(void *_ctx, const void *text,
+ size_t textsize)
{
struct padlock_hmac_ctx *ctx = _ctx;
@@ -259,8 +246,8 @@ wrap_padlock_hmac_update(void *_ctx, const void *text, size_t textsize)
return GNUTLS_E_SUCCESS;
}
-static int
-wrap_padlock_hmac_output(void *src_ctx, void *digest, size_t digestsize)
+static int wrap_padlock_hmac_output(void *src_ctx, void *digest,
+ size_t digestsize)
{
struct padlock_hmac_ctx *ctx;
ctx = src_ctx;
@@ -283,22 +270,22 @@ static void wrap_padlock_hmac_deinit(void *hd)
gnutls_free(ctx);
}
-static int
-wrap_padlock_hmac_fast(gnutls_mac_algorithm_t algo,
- const void *nonce, size_t nonce_size,
- const void *key, size_t key_size, const void *text,
- size_t text_size, void *digest)
+static int wrap_padlock_hmac_fast(gnutls_mac_algorithm_t algo,
+ const void *nonce, size_t nonce_size,
+ const void *key, size_t key_size,
+ const void *text, size_t text_size,
+ void *digest)
{
if (algo == GNUTLS_MAC_SHA1 || algo == GNUTLS_MAC_SHA256) {
unsigned char *pad;
unsigned char pad2[SHA1_DATA_SIZE + MAX_SHA_DIGEST_SIZE];
unsigned char hkey[MAX_SHA_DIGEST_SIZE];
unsigned int digest_size =
- _gnutls_mac_get_algo_len(mac_to_entry(algo));
+ _gnutls_mac_get_algo_len(mac_to_entry(algo));
if (key_size > SHA1_DATA_SIZE) {
- wrap_padlock_hash_fast((gnutls_digest_algorithm_t)
- algo, key, key_size, hkey);
+ wrap_padlock_hash_fast((gnutls_digest_algorithm_t)algo,
+ key, key_size, hkey);
key = hkey;
key_size = digest_size;
}
@@ -312,8 +299,8 @@ wrap_padlock_hmac_fast(gnutls_mac_algorithm_t algo,
memcpy(&pad[SHA1_DATA_SIZE], text, text_size);
- wrap_padlock_hash_fast((gnutls_digest_algorithm_t) algo,
- pad, text_size + SHA1_DATA_SIZE,
+ wrap_padlock_hash_fast((gnutls_digest_algorithm_t)algo, pad,
+ text_size + SHA1_DATA_SIZE,
&pad2[SHA1_DATA_SIZE]);
zeroize_temp_key(pad, text_size + SHA1_DATA_SIZE);
@@ -322,9 +309,8 @@ wrap_padlock_hmac_fast(gnutls_mac_algorithm_t algo,
memset(pad2, OPAD, SHA1_DATA_SIZE);
memxor(pad2, key, key_size);
- wrap_padlock_hash_fast((gnutls_digest_algorithm_t) algo,
- pad2, digest_size + SHA1_DATA_SIZE,
- digest);
+ wrap_padlock_hash_fast((gnutls_digest_algorithm_t)algo, pad2,
+ digest_size + SHA1_DATA_SIZE, digest);
zeroize_temp_key(pad2, sizeof(pad2));
zeroize_temp_key(hkey, sizeof(hkey));
@@ -370,4 +356,4 @@ const gnutls_crypto_mac_st _gnutls_hmac_sha_padlock = {
.fast = wrap_padlock_hmac_fast,
};
-#endif /* HAVE_LIBNETTLE */
+#endif /* HAVE_LIBNETTLE */
diff --git a/lib/accelerated/x86/hmac-x86-ssse3.c b/lib/accelerated/x86/hmac-x86-ssse3.c
index d743e28bf7..e037c4658b 100644
--- a/lib/accelerated/x86/hmac-x86-ssse3.c
+++ b/lib/accelerated/x86/hmac-x86-ssse3.c
@@ -57,92 +57,80 @@ struct x86_hmac_ctx {
set_key_func setkey;
};
-static void
-x86_hmac_sha1_set_key(struct hmac_sha1_ctx *ctx,
- size_t key_length, const uint8_t * key)
+static void x86_hmac_sha1_set_key(struct hmac_sha1_ctx *ctx, size_t key_length,
+ const uint8_t *key)
{
HMAC_SET_KEY(ctx, &x86_sha1, key_length, key);
}
-static void
-x86_hmac_sha1_update(struct hmac_sha1_ctx *ctx,
- size_t length, const uint8_t * data)
+static void x86_hmac_sha1_update(struct hmac_sha1_ctx *ctx, size_t length,
+ const uint8_t *data)
{
x86_sha1_update(&ctx->state, length, data);
}
-static void
-x86_hmac_sha1_digest(struct hmac_sha1_ctx *ctx, size_t length, uint8_t * digest)
+static void x86_hmac_sha1_digest(struct hmac_sha1_ctx *ctx, size_t length,
+ uint8_t *digest)
{
HMAC_DIGEST(ctx, &x86_sha1, length, digest);
}
-static void
-x86_hmac_sha256_set_key(struct hmac_sha256_ctx *ctx,
- size_t key_length, const uint8_t * key)
+static void x86_hmac_sha256_set_key(struct hmac_sha256_ctx *ctx,
+ size_t key_length, const uint8_t *key)
{
HMAC_SET_KEY(ctx, &x86_sha256, key_length, key);
}
-static void
-x86_hmac_sha256_update(struct hmac_sha256_ctx *ctx,
- size_t length, const uint8_t * data)
+static void x86_hmac_sha256_update(struct hmac_sha256_ctx *ctx, size_t length,
+ const uint8_t *data)
{
x86_sha256_update(&ctx->state, length, data);
}
-static void
-x86_hmac_sha256_digest(struct hmac_sha256_ctx *ctx,
- size_t length, uint8_t * digest)
+static void x86_hmac_sha256_digest(struct hmac_sha256_ctx *ctx, size_t length,
+ uint8_t *digest)
{
HMAC_DIGEST(ctx, &x86_sha256, length, digest);
}
-static void
-x86_hmac_sha224_set_key(struct hmac_sha224_ctx *ctx,
- size_t key_length, const uint8_t * key)
+static void x86_hmac_sha224_set_key(struct hmac_sha224_ctx *ctx,
+ size_t key_length, const uint8_t *key)
{
HMAC_SET_KEY(ctx, &x86_sha224, key_length, key);
}
-static void
-x86_hmac_sha224_digest(struct hmac_sha224_ctx *ctx,
- size_t length, uint8_t * digest)
+static void x86_hmac_sha224_digest(struct hmac_sha224_ctx *ctx, size_t length,
+ uint8_t *digest)
{
HMAC_DIGEST(ctx, &x86_sha224, length, digest);
}
-static void
-x86_hmac_sha384_set_key(struct hmac_sha384_ctx *ctx,
- size_t key_length, const uint8_t * key)
+static void x86_hmac_sha384_set_key(struct hmac_sha384_ctx *ctx,
+ size_t key_length, const uint8_t *key)
{
HMAC_SET_KEY(ctx, &x86_sha384, key_length, key);
}
-static void
-x86_hmac_sha384_digest(struct hmac_sha384_ctx *ctx,
- size_t length, uint8_t * digest)
+static void x86_hmac_sha384_digest(struct hmac_sha384_ctx *ctx, size_t length,
+ uint8_t *digest)
{
HMAC_DIGEST(ctx, &x86_sha384, length, digest);
}
-static void
-x86_hmac_sha512_set_key(struct hmac_sha512_ctx *ctx,
- size_t key_length, const uint8_t * key)
+static void x86_hmac_sha512_set_key(struct hmac_sha512_ctx *ctx,
+ size_t key_length, const uint8_t *key)
{
HMAC_SET_KEY(ctx, &x86_sha512, key_length, key);
}
-static void
-x86_hmac_sha512_update(struct hmac_sha512_ctx *ctx,
- size_t length, const uint8_t * data)
+static void x86_hmac_sha512_update(struct hmac_sha512_ctx *ctx, size_t length,
+ const uint8_t *data)
{
x86_sha512_update(&ctx->state, length, data);
}
-static void
-x86_hmac_sha512_digest(struct hmac_sha512_ctx *ctx,
- size_t length, uint8_t * digest)
+static void x86_hmac_sha512_digest(struct hmac_sha512_ctx *ctx, size_t length,
+ uint8_t *digest)
{
HMAC_DIGEST(ctx, &x86_sha512, length, digest);
}
@@ -151,37 +139,37 @@ static int _hmac_ctx_init(gnutls_mac_algorithm_t algo, struct x86_hmac_ctx *ctx)
{
switch (algo) {
case GNUTLS_MAC_SHA1:
- ctx->update = (update_func) x86_hmac_sha1_update;
- ctx->digest = (digest_func) x86_hmac_sha1_digest;
- ctx->setkey = (set_key_func) x86_hmac_sha1_set_key;
+ ctx->update = (update_func)x86_hmac_sha1_update;
+ ctx->digest = (digest_func)x86_hmac_sha1_digest;
+ ctx->setkey = (set_key_func)x86_hmac_sha1_set_key;
ctx->ctx_ptr = &ctx->ctx.sha1;
ctx->length = SHA1_DIGEST_SIZE;
break;
case GNUTLS_MAC_SHA224:
- ctx->update = (update_func) x86_hmac_sha256_update;
- ctx->digest = (digest_func) x86_hmac_sha224_digest;
- ctx->setkey = (set_key_func) x86_hmac_sha224_set_key;
+ ctx->update = (update_func)x86_hmac_sha256_update;
+ ctx->digest = (digest_func)x86_hmac_sha224_digest;
+ ctx->setkey = (set_key_func)x86_hmac_sha224_set_key;
ctx->ctx_ptr = &ctx->ctx.sha224;
ctx->length = SHA224_DIGEST_SIZE;
break;
case GNUTLS_MAC_SHA256:
- ctx->update = (update_func) x86_hmac_sha256_update;
- ctx->digest = (digest_func) x86_hmac_sha256_digest;
- ctx->setkey = (set_key_func) x86_hmac_sha256_set_key;
+ ctx->update = (update_func)x86_hmac_sha256_update;
+ ctx->digest = (digest_func)x86_hmac_sha256_digest;
+ ctx->setkey = (set_key_func)x86_hmac_sha256_set_key;
ctx->ctx_ptr = &ctx->ctx.sha256;
ctx->length = SHA256_DIGEST_SIZE;
break;
case GNUTLS_MAC_SHA384:
- ctx->update = (update_func) x86_hmac_sha512_update;
- ctx->digest = (digest_func) x86_hmac_sha384_digest;
- ctx->setkey = (set_key_func) x86_hmac_sha384_set_key;
+ ctx->update = (update_func)x86_hmac_sha512_update;
+ ctx->digest = (digest_func)x86_hmac_sha384_digest;
+ ctx->setkey = (set_key_func)x86_hmac_sha384_set_key;
ctx->ctx_ptr = &ctx->ctx.sha384;
ctx->length = SHA384_DIGEST_SIZE;
break;
case GNUTLS_MAC_SHA512:
- ctx->update = (update_func) x86_hmac_sha512_update;
- ctx->digest = (digest_func) x86_hmac_sha512_digest;
- ctx->setkey = (set_key_func) x86_hmac_sha512_set_key;
+ ctx->update = (update_func)x86_hmac_sha512_update;
+ ctx->digest = (digest_func)x86_hmac_sha512_digest;
+ ctx->setkey = (set_key_func)x86_hmac_sha512_set_key;
ctx->ctx_ptr = &ctx->ctx.sha512;
ctx->length = SHA512_DIGEST_SIZE;
break;
@@ -219,7 +207,7 @@ static void *wrap_x86_hmac_copy(const void *_ctx)
{
struct x86_hmac_ctx *new_ctx;
const struct x86_hmac_ctx *ctx = _ctx;
- ptrdiff_t off = (uint8_t *) ctx->ctx_ptr - (uint8_t *) (&ctx->ctx);
+ ptrdiff_t off = (uint8_t *)ctx->ctx_ptr - (uint8_t *)(&ctx->ctx);
new_ctx = gnutls_malloc(sizeof(struct x86_hmac_ctx));
if (new_ctx == NULL) {
@@ -228,7 +216,7 @@ static void *wrap_x86_hmac_copy(const void *_ctx)
}
memcpy(new_ctx, ctx, sizeof(*new_ctx));
- new_ctx->ctx_ptr = (uint8_t *) & new_ctx->ctx + off;
+ new_ctx->ctx_ptr = (uint8_t *)&new_ctx->ctx + off;
return new_ctx;
}
@@ -274,10 +262,10 @@ static void wrap_x86_hmac_deinit(void *hd)
gnutls_free(ctx);
}
-static int wrap_x86_hmac_fast(gnutls_mac_algorithm_t algo,
- const void *nonce, size_t nonce_size,
- const void *key, size_t key_size,
- const void *text, size_t text_size, void *digest)
+static int wrap_x86_hmac_fast(gnutls_mac_algorithm_t algo, const void *nonce,
+ size_t nonce_size, const void *key,
+ size_t key_size, const void *text,
+ size_t text_size, void *digest)
{
struct x86_hmac_ctx ctx;
int ret;
@@ -306,4 +294,4 @@ const gnutls_crypto_mac_st _gnutls_hmac_sha_x86_ssse3 = {
.fast = wrap_x86_hmac_fast,
};
-#endif /* HAVE_LIBNETTLE */
+#endif /* HAVE_LIBNETTLE */
diff --git a/lib/accelerated/x86/sha-padlock.c b/lib/accelerated/x86/sha-padlock.c
index 14f1d85976..1671c2f32a 100644
--- a/lib/accelerated/x86/sha-padlock.c
+++ b/lib/accelerated/x86/sha-padlock.c
@@ -55,8 +55,8 @@ struct padlock_hash_ctx {
init_func init;
};
-static int
-wrap_padlock_hash_update(void *_ctx, const void *text, size_t textsize)
+static int wrap_padlock_hash_update(void *_ctx, const void *text,
+ size_t textsize)
{
struct padlock_hash_ctx *ctx = _ctx;
@@ -70,32 +70,33 @@ static void wrap_padlock_hash_deinit(void *hd)
gnutls_free(hd);
}
-# define MD1_INCR(c) (c->count++)
-# define SHA1_COMPRESS(ctx, data) (padlock_sha1_blocks((void*)(ctx)->state, data, 1))
-# define SHA256_COMPRESS(ctx, data) (padlock_sha256_blocks((void*)(ctx)->state, data, 1))
-# define SHA512_COMPRESS(ctx, data) (padlock_sha512_blocks((void*)(ctx)->state, data, 1))
+#define MD1_INCR(c) (c->count++)
+#define SHA1_COMPRESS(ctx, data) \
+ (padlock_sha1_blocks((void *)(ctx)->state, data, 1))
+#define SHA256_COMPRESS(ctx, data) \
+ (padlock_sha256_blocks((void *)(ctx)->state, data, 1))
+#define SHA512_COMPRESS(ctx, data) \
+ (padlock_sha512_blocks((void *)(ctx)->state, data, 1))
-void
-padlock_sha1_update(struct sha1_ctx *ctx, size_t length, const uint8_t * data)
+void padlock_sha1_update(struct sha1_ctx *ctx, size_t length,
+ const uint8_t *data)
{
MD_UPDATE(ctx, length, data, SHA1_COMPRESS, MD1_INCR(ctx));
}
-void
-padlock_sha256_update(struct sha256_ctx *ctx,
- size_t length, const uint8_t * data)
+void padlock_sha256_update(struct sha256_ctx *ctx, size_t length,
+ const uint8_t *data)
{
MD_UPDATE(ctx, length, data, SHA256_COMPRESS, MD1_INCR(ctx));
}
-void
-padlock_sha512_update(struct sha512_ctx *ctx,
- size_t length, const uint8_t * data)
+void padlock_sha512_update(struct sha512_ctx *ctx, size_t length,
+ const uint8_t *data)
{
MD_UPDATE(ctx, length, data, SHA512_COMPRESS, MD_INCR(ctx));
}
-static void _nettle_write_be32(unsigned length, uint8_t * dst, uint32_t * src)
+static void _nettle_write_be32(unsigned length, uint8_t *dst, uint32_t *src)
{
unsigned i;
unsigned words;
@@ -128,8 +129,8 @@ static void _nettle_write_be32(unsigned length, uint8_t * dst, uint32_t * src)
}
}
-static void
-padlock_sha1_digest(struct sha1_ctx *ctx, size_t length, uint8_t * digest)
+static void padlock_sha1_digest(struct sha1_ctx *ctx, size_t length,
+ uint8_t *digest)
{
uint64_t bit_count;
@@ -147,8 +148,8 @@ padlock_sha1_digest(struct sha1_ctx *ctx, size_t length, uint8_t * digest)
_nettle_write_be32(length, digest, ctx->state);
}
-static void
-padlock_sha256_digest(struct sha256_ctx *ctx, size_t length, uint8_t * digest)
+static void padlock_sha256_digest(struct sha256_ctx *ctx, size_t length,
+ uint8_t *digest)
{
uint64_t bit_count;
@@ -168,8 +169,8 @@ padlock_sha256_digest(struct sha256_ctx *ctx, size_t length, uint8_t * digest)
_nettle_write_be32(length, digest, ctx->state);
}
-static void
-padlock_sha512_digest(struct sha512_ctx *ctx, size_t length, uint8_t * digest)
+static void padlock_sha512_digest(struct sha512_ctx *ctx, size_t length,
+ uint8_t *digest)
{
uint64_t high, low;
@@ -215,41 +216,41 @@ static int _ctx_init(gnutls_digest_algorithm_t algo,
switch (algo) {
case GNUTLS_DIG_SHA1:
sha1_init(&ctx->ctx.sha1);
- ctx->update = (update_func) padlock_sha1_update;
- ctx->digest = (digest_func) padlock_sha1_digest;
- ctx->init = (init_func) sha1_init;
+ ctx->update = (update_func)padlock_sha1_update;
+ ctx->digest = (digest_func)padlock_sha1_digest;
+ ctx->init = (init_func)sha1_init;
ctx->ctx_ptr = &ctx->ctx.sha1;
ctx->length = SHA1_DIGEST_SIZE;
break;
case GNUTLS_DIG_SHA224:
sha224_init(&ctx->ctx.sha224);
- ctx->update = (update_func) padlock_sha256_update;
- ctx->digest = (digest_func) padlock_sha256_digest;
- ctx->init = (init_func) sha224_init;
+ ctx->update = (update_func)padlock_sha256_update;
+ ctx->digest = (digest_func)padlock_sha256_digest;
+ ctx->init = (init_func)sha224_init;
ctx->ctx_ptr = &ctx->ctx.sha224;
ctx->length = SHA224_DIGEST_SIZE;
break;
case GNUTLS_DIG_SHA256:
sha256_init(&ctx->ctx.sha256);
- ctx->update = (update_func) padlock_sha256_update;
- ctx->digest = (digest_func) padlock_sha256_digest;
- ctx->init = (init_func) sha256_init;
+ ctx->update = (update_func)padlock_sha256_update;
+ ctx->digest = (digest_func)padlock_sha256_digest;
+ ctx->init = (init_func)sha256_init;
ctx->ctx_ptr = &ctx->ctx.sha256;
ctx->length = SHA256_DIGEST_SIZE;
break;
case GNUTLS_DIG_SHA384:
sha384_init(&ctx->ctx.sha384);
- ctx->update = (update_func) padlock_sha512_update;
- ctx->digest = (digest_func) padlock_sha512_digest;
- ctx->init = (init_func) sha384_init;
+ ctx->update = (update_func)padlock_sha512_update;
+ ctx->digest = (digest_func)padlock_sha512_digest;
+ ctx->init = (init_func)sha384_init;
ctx->ctx_ptr = &ctx->ctx.sha384;
ctx->length = SHA384_DIGEST_SIZE;
break;
case GNUTLS_DIG_SHA512:
sha512_init(&ctx->ctx.sha512);
- ctx->update = (update_func) padlock_sha512_update;
- ctx->digest = (digest_func) padlock_sha512_digest;
- ctx->init = (init_func) sha512_init;
+ ctx->update = (update_func)padlock_sha512_update;
+ ctx->digest = (digest_func)padlock_sha512_digest;
+ ctx->init = (init_func)sha512_init;
ctx->ctx_ptr = &ctx->ctx.sha512;
ctx->length = SHA512_DIGEST_SIZE;
break;
@@ -288,7 +289,7 @@ static void *wrap_padlock_hash_copy(const void *_ctx)
{
struct padlock_hash_ctx *new_ctx;
const struct padlock_hash_ctx *ctx = _ctx;
- ptrdiff_t off = (uint8_t *) ctx->ctx_ptr - (uint8_t *) (&ctx->ctx);
+ ptrdiff_t off = (uint8_t *)ctx->ctx_ptr - (uint8_t *)(&ctx->ctx);
new_ctx = gnutls_malloc(sizeof(struct padlock_hash_ctx));
if (new_ctx == NULL) {
@@ -297,13 +298,13 @@ static void *wrap_padlock_hash_copy(const void *_ctx)
}
memcpy(new_ctx, ctx, sizeof(*new_ctx));
- new_ctx->ctx_ptr = (uint8_t *) & new_ctx->ctx + off;
+ new_ctx->ctx_ptr = (uint8_t *)&new_ctx->ctx + off;
return new_ctx;
}
-static int
-wrap_padlock_hash_output(void *src_ctx, void *digest, size_t digestsize)
+static int wrap_padlock_hash_output(void *src_ctx, void *digest,
+ size_t digestsize)
{
struct padlock_hash_ctx *ctx;
ctx = src_ctx;
@@ -318,27 +319,22 @@ wrap_padlock_hash_output(void *src_ctx, void *digest, size_t digestsize)
return 0;
}
-int wrap_padlock_hash_fast(gnutls_digest_algorithm_t algo,
- const void *text, size_t text_size, void *digest)
+int wrap_padlock_hash_fast(gnutls_digest_algorithm_t algo, const void *text,
+ size_t text_size, void *digest)
{
if (text_size == 0 && text == NULL)
text = digest;
if (algo == GNUTLS_DIG_SHA1) {
uint32_t iv[5] = {
- 0x67452301UL,
- 0xEFCDAB89UL,
- 0x98BADCFEUL,
- 0x10325476UL,
- 0xC3D2E1F0UL,
+ 0x67452301UL, 0xEFCDAB89UL, 0x98BADCFEUL,
+ 0x10325476UL, 0xC3D2E1F0UL,
};
padlock_sha1_oneshot(iv, text, text_size);
_nettle_write_be32(20, digest, iv);
} else if (algo == GNUTLS_DIG_SHA256) {
uint32_t iv[8] = {
- 0x6a09e667UL, 0xbb67ae85UL, 0x3c6ef372UL,
- 0xa54ff53aUL,
- 0x510e527fUL, 0x9b05688cUL, 0x1f83d9abUL,
- 0x5be0cd19UL,
+ 0x6a09e667UL, 0xbb67ae85UL, 0x3c6ef372UL, 0xa54ff53aUL,
+ 0x510e527fUL, 0x9b05688cUL, 0x1f83d9abUL, 0x5be0cd19UL,
};
padlock_sha256_oneshot(iv, text, text_size);
_nettle_write_be32(32, digest, iv);
@@ -360,15 +356,15 @@ int wrap_padlock_hash_fast(gnutls_digest_algorithm_t algo,
}
const struct nettle_hash padlock_sha1 =
-NN_HASH(sha1, padlock_sha1_update, padlock_sha1_digest, SHA1);
+ NN_HASH(sha1, padlock_sha1_update, padlock_sha1_digest, SHA1);
const struct nettle_hash padlock_sha224 =
-NN_HASH(sha224, padlock_sha256_update, padlock_sha256_digest, SHA224);
+ NN_HASH(sha224, padlock_sha256_update, padlock_sha256_digest, SHA224);
const struct nettle_hash padlock_sha256 =
-NN_HASH(sha256, padlock_sha256_update, padlock_sha256_digest, SHA256);
+ NN_HASH(sha256, padlock_sha256_update, padlock_sha256_digest, SHA256);
const struct nettle_hash padlock_sha384 =
-NN_HASH(sha384, padlock_sha512_update, padlock_sha512_digest, SHA384);
+ NN_HASH(sha384, padlock_sha512_update, padlock_sha512_digest, SHA384);
const struct nettle_hash padlock_sha512 =
-NN_HASH(sha512, padlock_sha512_update, padlock_sha512_digest, SHA512);
+ NN_HASH(sha512, padlock_sha512_update, padlock_sha512_digest, SHA512);
const gnutls_crypto_digest_st _gnutls_sha_padlock_oneshot = {
.init = NULL,
@@ -387,4 +383,4 @@ const gnutls_crypto_digest_st _gnutls_sha_padlock = {
.fast = wrap_padlock_hash_fast,
};
-#endif /* HAVE_LIBNETTLE */
+#endif /* HAVE_LIBNETTLE */
diff --git a/lib/accelerated/x86/sha-padlock.h b/lib/accelerated/x86/sha-padlock.h
index fe5b9e4185..990c02d989 100644
--- a/lib/accelerated/x86/sha-padlock.h
+++ b/lib/accelerated/x86/sha-padlock.h
@@ -1,7 +1,7 @@
#ifndef GNUTLS_LIB_ACCELERATED_X86_SHA_PADLOCK_H
-# define GNUTLS_LIB_ACCELERATED_X86_SHA_PADLOCK_H
+#define GNUTLS_LIB_ACCELERATED_X86_SHA_PADLOCK_H
-# include <nettle/sha.h>
+#include <nettle/sha.h>
void padlock_sha1_oneshot(void *ctx, const void *inp, size_t len);
void padlock_sha256_oneshot(void *ctx, const void *inp, size_t len);
@@ -10,15 +10,15 @@ void padlock_sha1_blocks(unsigned int *ctx, const void *inp, size_t blocks);
void padlock_sha256_blocks(unsigned int *ctx, const void *inp, size_t blocks);
void padlock_sha512_blocks(unsigned int *ctx, const void *inp, size_t blocks);
-int wrap_padlock_hash_fast(gnutls_digest_algorithm_t algo,
- const void *text, size_t text_size, void *digest);
+int wrap_padlock_hash_fast(gnutls_digest_algorithm_t algo, const void *text,
+ size_t text_size, void *digest);
-void padlock_sha1_update(struct sha1_ctx *ctx,
- size_t length, const uint8_t * data);
-void padlock_sha256_update(struct sha256_ctx *ctx,
- size_t length, const uint8_t * data);
-void padlock_sha512_update(struct sha512_ctx *ctx,
- size_t length, const uint8_t * data);
+void padlock_sha1_update(struct sha1_ctx *ctx, size_t length,
+ const uint8_t *data);
+void padlock_sha256_update(struct sha256_ctx *ctx, size_t length,
+ const uint8_t *data);
+void padlock_sha512_update(struct sha512_ctx *ctx, size_t length,
+ const uint8_t *data);
extern const struct nettle_hash padlock_sha1;
extern const struct nettle_hash padlock_sha224;
@@ -29,4 +29,4 @@ extern const struct nettle_hash padlock_sha512;
extern const gnutls_crypto_mac_st _gnutls_hmac_sha_padlock;
extern const gnutls_crypto_digest_st _gnutls_sha_padlock;
-#endif /* GNUTLS_LIB_ACCELERATED_X86_SHA_PADLOCK_H */
+#endif /* GNUTLS_LIB_ACCELERATED_X86_SHA_PADLOCK_H */
diff --git a/lib/accelerated/x86/sha-x86-ssse3.c b/lib/accelerated/x86/sha-x86-ssse3.c
index bcfcc4d337..5f099d5cb0 100644
--- a/lib/accelerated/x86/sha-x86-ssse3.c
+++ b/lib/accelerated/x86/sha-x86-ssse3.c
@@ -70,7 +70,7 @@ static void wrap_x86_hash_deinit(void *hd)
gnutls_free(hd);
}
-void x86_sha1_update(struct sha1_ctx *ctx, size_t length, const uint8_t * data)
+void x86_sha1_update(struct sha1_ctx *ctx, size_t length, const uint8_t *data)
{
struct {
uint32_t h0, h1, h2, h3, h4;
@@ -103,7 +103,6 @@ void x86_sha1_update(struct sha1_ctx *ctx, size_t length, const uint8_t * data)
length -= res;
if (length > 0) {
-
t2 = length / SHA1_DATA_SIZE;
sha1_block_data_order(&octx, data, t2);
@@ -125,11 +124,10 @@ void x86_sha1_update(struct sha1_ctx *ctx, size_t length, const uint8_t * data)
if (res > 0) {
sha1_update(ctx, res, data);
}
-
}
void x86_sha256_update(struct sha256_ctx *ctx, size_t length,
- const uint8_t * data)
+ const uint8_t *data)
{
struct {
uint32_t h[8];
@@ -177,7 +175,7 @@ void x86_sha256_update(struct sha256_ctx *ctx, size_t length,
}
void x86_sha512_update(struct sha512_ctx *ctx, size_t length,
- const uint8_t * data)
+ const uint8_t *data)
{
struct {
uint64_t h[8];
@@ -232,41 +230,41 @@ static int _ctx_init(gnutls_digest_algorithm_t algo, struct x86_hash_ctx *ctx)
switch (algo) {
case GNUTLS_DIG_SHA1:
sha1_init(&ctx->ctx.sha1);
- ctx->update = (update_func) x86_sha1_update;
- ctx->digest = (digest_func) sha1_digest;
- ctx->init = (init_func) sha1_init;
+ ctx->update = (update_func)x86_sha1_update;
+ ctx->digest = (digest_func)sha1_digest;
+ ctx->init = (init_func)sha1_init;
ctx->ctx_ptr = &ctx->ctx.sha1;
ctx->length = SHA1_DIGEST_SIZE;
break;
case GNUTLS_DIG_SHA224:
sha224_init(&ctx->ctx.sha224);
- ctx->update = (update_func) x86_sha256_update;
- ctx->digest = (digest_func) sha224_digest;
- ctx->init = (init_func) sha224_init;
+ ctx->update = (update_func)x86_sha256_update;
+ ctx->digest = (digest_func)sha224_digest;
+ ctx->init = (init_func)sha224_init;
ctx->ctx_ptr = &ctx->ctx.sha224;
ctx->length = SHA224_DIGEST_SIZE;
break;
case GNUTLS_DIG_SHA256:
sha256_init(&ctx->ctx.sha256);
- ctx->update = (update_func) x86_sha256_update;
- ctx->digest = (digest_func) sha256_digest;
- ctx->init = (init_func) sha256_init;
+ ctx->update = (update_func)x86_sha256_update;
+ ctx->digest = (digest_func)sha256_digest;
+ ctx->init = (init_func)sha256_init;
ctx->ctx_ptr = &ctx->ctx.sha256;
ctx->length = SHA256_DIGEST_SIZE;
break;
case GNUTLS_DIG_SHA384:
sha384_init(&ctx->ctx.sha384);
- ctx->update = (update_func) x86_sha512_update;
- ctx->digest = (digest_func) sha384_digest;
- ctx->init = (init_func) sha384_init;
+ ctx->update = (update_func)x86_sha512_update;
+ ctx->digest = (digest_func)sha384_digest;
+ ctx->init = (init_func)sha384_init;
ctx->ctx_ptr = &ctx->ctx.sha384;
ctx->length = SHA384_DIGEST_SIZE;
break;
case GNUTLS_DIG_SHA512:
sha512_init(&ctx->ctx.sha512);
- ctx->update = (update_func) x86_sha512_update;
- ctx->digest = (digest_func) sha512_digest;
- ctx->init = (init_func) sha512_init;
+ ctx->update = (update_func)x86_sha512_update;
+ ctx->digest = (digest_func)sha512_digest;
+ ctx->init = (init_func)sha512_init;
ctx->ctx_ptr = &ctx->ctx.sha512;
ctx->length = SHA512_DIGEST_SIZE;
break;
@@ -305,7 +303,7 @@ static void *wrap_x86_hash_copy(const void *_ctx)
{
struct x86_hash_ctx *new_ctx;
const struct x86_hash_ctx *ctx = _ctx;
- ptrdiff_t off = (uint8_t *) ctx->ctx_ptr - (uint8_t *) (&ctx->ctx);
+ ptrdiff_t off = (uint8_t *)ctx->ctx_ptr - (uint8_t *)(&ctx->ctx);
new_ctx = gnutls_malloc(sizeof(struct x86_hash_ctx));
if (new_ctx == NULL) {
@@ -314,7 +312,7 @@ static void *wrap_x86_hash_copy(const void *_ctx)
}
memcpy(new_ctx, ctx, sizeof(*new_ctx));
- new_ctx->ctx_ptr = (uint8_t *) & new_ctx->ctx + off;
+ new_ctx->ctx_ptr = (uint8_t *)&new_ctx->ctx + off;
return new_ctx;
}
@@ -332,8 +330,8 @@ static int wrap_x86_hash_output(void *src_ctx, void *digest, size_t digestsize)
return 0;
}
-static int wrap_x86_hash_fast(gnutls_digest_algorithm_t algo,
- const void *text, size_t text_size, void *digest)
+static int wrap_x86_hash_fast(gnutls_digest_algorithm_t algo, const void *text,
+ size_t text_size, void *digest)
{
struct x86_hash_ctx ctx;
int ret;
@@ -349,16 +347,16 @@ static int wrap_x86_hash_fast(gnutls_digest_algorithm_t algo,
}
const struct nettle_hash x86_sha1 =
-NN_HASH(sha1, x86_sha1_update, sha1_digest, SHA1);
+ NN_HASH(sha1, x86_sha1_update, sha1_digest, SHA1);
const struct nettle_hash x86_sha224 =
-NN_HASH(sha224, x86_sha256_update, sha224_digest, SHA224);
+ NN_HASH(sha224, x86_sha256_update, sha224_digest, SHA224);
const struct nettle_hash x86_sha256 =
-NN_HASH(sha256, x86_sha256_update, sha256_digest, SHA256);
+ NN_HASH(sha256, x86_sha256_update, sha256_digest, SHA256);
const struct nettle_hash x86_sha384 =
-NN_HASH(sha384, x86_sha512_update, sha384_digest, SHA384);
+ NN_HASH(sha384, x86_sha512_update, sha384_digest, SHA384);
const struct nettle_hash x86_sha512 =
-NN_HASH(sha512, x86_sha512_update, sha512_digest, SHA512);
+ NN_HASH(sha512, x86_sha512_update, sha512_digest, SHA512);
const gnutls_crypto_digest_st _gnutls_sha_x86_ssse3 = {
.init = wrap_x86_hash_init,
diff --git a/lib/accelerated/x86/sha-x86.h b/lib/accelerated/x86/sha-x86.h
index f1fcea055b..d96936663f 100644
--- a/lib/accelerated/x86/sha-x86.h
+++ b/lib/accelerated/x86/sha-x86.h
@@ -1,7 +1,7 @@
#ifndef GNUTLS_LIB_ACCELERATED_X86_SHA_X86_H
-# define GNUTLS_LIB_ACCELERATED_X86_SHA_X86_H
+#define GNUTLS_LIB_ACCELERATED_X86_SHA_X86_H
-# include <nettle/sha.h>
+#include <nettle/sha.h>
extern const struct nettle_hash x86_sha1;
extern const struct nettle_hash x86_sha224;
@@ -9,13 +9,13 @@ extern const struct nettle_hash x86_sha256;
extern const struct nettle_hash x86_sha384;
extern const struct nettle_hash x86_sha512;
-void x86_sha1_update(struct sha1_ctx *ctx, size_t length, const uint8_t * data);
+void x86_sha1_update(struct sha1_ctx *ctx, size_t length, const uint8_t *data);
void x86_sha256_update(struct sha256_ctx *ctx, size_t length,
- const uint8_t * data);
+ const uint8_t *data);
void x86_sha512_update(struct sha512_ctx *ctx, size_t length,
- const uint8_t * data);
+ const uint8_t *data);
extern const gnutls_crypto_digest_st _gnutls_sha_x86_ssse3;
extern const gnutls_crypto_mac_st _gnutls_hmac_sha_x86_ssse3;
-#endif /* GNUTLS_LIB_ACCELERATED_X86_SHA_X86_H */
+#endif /* GNUTLS_LIB_ACCELERATED_X86_SHA_X86_H */
diff --git a/lib/accelerated/x86/x86-common.c b/lib/accelerated/x86/x86-common.c
index 24744c41cb..a7047a7ba1 100644
--- a/lib/accelerated/x86/x86-common.c
+++ b/lib/accelerated/x86/x86-common.c
@@ -34,15 +34,15 @@
#include <sha-x86.h>
#include <x86-common.h>
#ifdef HAVE_LIBNETTLE
-# include <nettle/aes.h> /* for key generation in 192 and 256 bits */
-# include <sha-padlock.h>
+#include <nettle/aes.h> /* for key generation in 192 and 256 bits */
+#include <sha-padlock.h>
#endif
#include <aes-padlock.h>
#ifdef HAVE_CPUID_H
-# include <cpuid.h>
+#include <cpuid.h>
#else
-# define __get_cpuid(...) 0
-# define __get_cpuid_count(...) 0
+#define __get_cpuid(...) 0
+#define __get_cpuid_count(...) 0
#endif
/* ebx, ecx, edx
@@ -56,57 +56,57 @@ __hidden
unsigned int GNUTLS_x86_cpuid_s[4];
#ifndef bit_SHA
-# define bit_SHA (1<<29)
+#define bit_SHA (1 << 29)
#endif
/* ecx */
#ifndef bit_AVX512BITALG
-# define bit_AVX512BITALG 0x4000
+#define bit_AVX512BITALG 0x4000
#endif
#ifndef bit_PCLMUL
-# define bit_PCLMUL 0x2
+#define bit_PCLMUL 0x2
#endif
#ifndef bit_SSSE3
/* ecx */
-# define bit_SSSE3 0x0000200
+#define bit_SSSE3 0x0000200
#endif
#ifndef bit_AES
-# define bit_AES 0x2000000
+#define bit_AES 0x2000000
#endif
#ifndef bit_AVX
-# define bit_AVX 0x10000000
+#define bit_AVX 0x10000000
#endif
#ifndef bit_AVX2
-# define bit_AVX2 0x00000020
+#define bit_AVX2 0x00000020
#endif
#ifndef bit_AVX512F
-# define bit_AVX512F 0x00010000
+#define bit_AVX512F 0x00010000
#endif
#ifndef bit_AVX512IFMA
-# define bit_AVX512IFMA 0x00200000
+#define bit_AVX512IFMA 0x00200000
#endif
#ifndef bit_AVX512BW
-# define bit_AVX512BW 0x40000000
+#define bit_AVX512BW 0x40000000
#endif
#ifndef bit_AVX512VL
-# define bit_AVX512VL 0x80000000
+#define bit_AVX512VL 0x80000000
#endif
#ifndef bit_OSXSAVE
-# define bit_OSXSAVE 0x8000000
+#define bit_OSXSAVE 0x8000000
#endif
#ifndef bit_MOVBE
-# define bit_MOVBE 0x00400000
+#define bit_MOVBE 0x00400000
#endif
#define bit_PADLOCK (0x3 << 6)
@@ -116,19 +116,18 @@ unsigned int GNUTLS_x86_cpuid_s[4];
/* Our internal bit-string for cpu capabilities. Should be set
* in GNUTLS_CPUID_OVERRIDE */
#define EMPTY_SET 1
-#define INTEL_AES_NI (1<<1)
-#define INTEL_SSSE3 (1<<2)
-#define INTEL_PCLMUL (1<<3)
-#define INTEL_AVX (1<<4)
-#define INTEL_SHA (1<<5)
-#define PADLOCK (1<<20)
-#define PADLOCK_PHE (1<<21)
-#define PADLOCK_PHE_SHA512 (1<<22)
+#define INTEL_AES_NI (1 << 1)
+#define INTEL_SSSE3 (1 << 2)
+#define INTEL_PCLMUL (1 << 3)
+#define INTEL_AVX (1 << 4)
+#define INTEL_SHA (1 << 5)
+#define PADLOCK (1 << 20)
+#define PADLOCK_PHE (1 << 21)
+#define PADLOCK_PHE_SHA512 (1 << 22)
#ifndef HAVE_GET_CPUID_COUNT
-static inline void
-get_cpuid_level7(unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
+static inline void get_cpuid_level7(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
{
/* we avoid using __get_cpuid_count, because it is not available with gcc 4.8 */
if (__get_cpuid_max(7, 0) < 7)
@@ -138,7 +137,7 @@ get_cpuid_level7(unsigned int *eax, unsigned int *ebx,
return;
}
#else
-# define get_cpuid_level7(a,b,c,d) __get_cpuid_count(7, 0, a, b, c, d)
+#define get_cpuid_level7(a, b, c, d) __get_cpuid_count(7, 0, a, b, c, d)
#endif
static unsigned read_cpuid_vals(unsigned int vals[4])
@@ -170,7 +169,7 @@ static unsigned check_4th_gen_intel_features(unsigned ecx)
#if defined(_MSC_VER) && !defined(__clang__)
xcr0 = _xgetbv(0);
#else
- __asm__("xgetbv": "=a"(xcr0): "c"(0):"%edx");
+ __asm__("xgetbv" : "=a"(xcr0) : "c"(0) : "%edx");
#endif
/* Check if xmm and ymm state are enabled in XCR0. */
return (xcr0 & 6) == 6;
@@ -191,8 +190,8 @@ static void capabilities_to_intel_cpuid(unsigned capabilities)
if (a[1] & bit_AES) {
GNUTLS_x86_cpuid_s[1] |= bit_AES;
} else {
- _gnutls_debug_log
- ("AESNI acceleration requested but not available\n");
+ _gnutls_debug_log(
+ "AESNI acceleration requested but not available\n");
}
}
@@ -200,8 +199,8 @@ static void capabilities_to_intel_cpuid(unsigned capabilities)
if (a[1] & bit_SSSE3) {
GNUTLS_x86_cpuid_s[1] |= bit_SSSE3;
} else {
- _gnutls_debug_log
- ("SSSE3 acceleration requested but not available\n");
+ _gnutls_debug_log(
+ "SSSE3 acceleration requested but not available\n");
}
}
@@ -210,8 +209,8 @@ static void capabilities_to_intel_cpuid(unsigned capabilities)
check_4th_gen_intel_features(a[1])) {
GNUTLS_x86_cpuid_s[1] |= bit_AVX | bit_MOVBE;
} else {
- _gnutls_debug_log
- ("AVX acceleration requested but not available\n");
+ _gnutls_debug_log(
+ "AVX acceleration requested but not available\n");
}
}
@@ -219,8 +218,8 @@ static void capabilities_to_intel_cpuid(unsigned capabilities)
if (a[1] & bit_PCLMUL) {
GNUTLS_x86_cpuid_s[1] |= bit_PCLMUL;
} else {
- _gnutls_debug_log
- ("PCLMUL acceleration requested but not available\n");
+ _gnutls_debug_log(
+ "PCLMUL acceleration requested but not available\n");
}
}
@@ -228,8 +227,8 @@ static void capabilities_to_intel_cpuid(unsigned capabilities)
if (a[2] & bit_SHA) {
GNUTLS_x86_cpuid_s[2] |= bit_SHA;
} else {
- _gnutls_debug_log
- ("SHA acceleration requested but not available\n");
+ _gnutls_debug_log(
+ "SHA acceleration requested but not available\n");
}
}
}
@@ -253,7 +252,7 @@ static unsigned check_sha(void)
static unsigned check_avx_movbe(void)
{
return (GNUTLS_x86_cpuid_s[1] & (bit_AVX | bit_MOVBE)) ==
- (bit_AVX | bit_MOVBE);
+ (bit_AVX | bit_MOVBE);
}
static unsigned check_pclmul(void)
@@ -277,8 +276,8 @@ static unsigned capabilities_to_zhaoxin_edx(unsigned capabilities)
if (c & bit_PADLOCK) {
GNUTLS_x86_cpuid_s[2] |= bit_PADLOCK;
} else {
- _gnutls_debug_log
- ("Padlock acceleration requested but not available\n");
+ _gnutls_debug_log(
+ "Padlock acceleration requested but not available\n");
}
}
@@ -286,8 +285,8 @@ static unsigned capabilities_to_zhaoxin_edx(unsigned capabilities)
if (c & bit_PADLOCK_PHE) {
GNUTLS_x86_cpuid_s[2] |= bit_PADLOCK_PHE;
} else {
- _gnutls_debug_log
- ("Padlock-PHE acceleration requested but not available\n");
+ _gnutls_debug_log(
+ "Padlock-PHE acceleration requested but not available\n");
}
}
@@ -295,8 +294,8 @@ static unsigned capabilities_to_zhaoxin_edx(unsigned capabilities)
if (c & bit_PADLOCK_PHE_SHA512) {
GNUTLS_x86_cpuid_s[2] |= bit_PADLOCK_PHE_SHA512;
} else {
- _gnutls_debug_log
- ("Padlock-PHE-SHA512 acceleration requested but not available\n");
+ _gnutls_debug_log(
+ "Padlock-PHE-SHA512 acceleration requested but not available\n");
}
}
@@ -340,12 +339,10 @@ static int check_fast_pclmul(void)
static int check_phe_partial(void)
{
- const char text[64] =
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
- uint32_t iv[5] = { 0x67452301UL, 0xEFCDAB89UL,
- 0x98BADCFEUL, 0x10325476UL, 0xC3D2E1F0UL
- };
+ const char text[64] = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+ uint32_t iv[5] = { 0x67452301UL, 0xEFCDAB89UL, 0x98BADCFEUL,
+ 0x10325476UL, 0xC3D2E1F0UL };
/* If EAX is set to -1 (this is the case with padlock_sha1_blocks), the
* xsha1 instruction takes a complete SHA-1 block (64 bytes), while it
@@ -368,19 +365,17 @@ static unsigned check_zhaoxin(void)
return 0;
/* Zhaoxin and VIA CPU was detected */
- if ((memcmp(&b, "Cent", 4) == 0 &&
- memcmp(&d, "aurH", 4) == 0 &&
+ if ((memcmp(&b, "Cent", 4) == 0 && memcmp(&d, "aurH", 4) == 0 &&
memcmp(&c, "auls", 4) == 0) ||
- (memcmp(&b, " Sh", 4) == 0 &&
- memcmp(&d, "angh", 4) == 0 && memcmp(&c, "ai ", 4) == 0)) {
+ (memcmp(&b, " Sh", 4) == 0 && memcmp(&d, "angh", 4) == 0 &&
+ memcmp(&c, "ai ", 4) == 0)) {
return 1;
}
return 0;
}
-static
-void register_x86_padlock_crypto(unsigned capabilities)
+static void register_x86_padlock_crypto(unsigned capabilities)
{
int ret, phe;
unsigned edx;
@@ -402,47 +397,41 @@ void register_x86_padlock_crypto(unsigned capabilities)
if (check_ssse3()) {
_gnutls_debug_log("Zhaoxin SSSE3 was detected\n");
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_GCM, 90,
- &_gnutls_aes_gcm_x86_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_GCM, 90,
+ &_gnutls_aes_gcm_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_GCM, 90,
- &_gnutls_aes_gcm_x86_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_GCM, 90,
+ &_gnutls_aes_gcm_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_GCM, 90,
- &_gnutls_aes_gcm_x86_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_GCM, 90,
+ &_gnutls_aes_gcm_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_CBC, 90, &_gnutls_aes_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_CBC, 90, &_gnutls_aes_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_CBC, 90, &_gnutls_aes_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_CBC, 90, &_gnutls_aes_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_CBC, 90, &_gnutls_aes_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_CBC, 90, &_gnutls_aes_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
@@ -452,239 +441,191 @@ void register_x86_padlock_crypto(unsigned capabilities)
if (check_sha())
_gnutls_debug_log("Zhaoxin SHA was detected\n");
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1,
- 80,
- &_gnutls_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA1, 80, &_gnutls_sha_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA224,
- 80,
- &_gnutls_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA224, 80, &_gnutls_sha_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256,
- 80,
- &_gnutls_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA256, 80, &_gnutls_sha_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1,
- 80,
- &_gnutls_hmac_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA1, 80, &_gnutls_hmac_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA224,
- 80,
- &_gnutls_hmac_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA224, 80, &_gnutls_hmac_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256,
- 80,
- &_gnutls_hmac_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA256, 80, &_gnutls_hmac_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA384,
- 80,
- &_gnutls_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA384, 80, &_gnutls_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA512,
- 80,
- &_gnutls_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA512, 80, &_gnutls_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA384,
- 80,
- &_gnutls_hmac_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA384, 80, &_gnutls_hmac_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA512,
- 80,
- &_gnutls_hmac_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA512, 80, &_gnutls_hmac_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
}
if (check_optimized_aes()) {
_gnutls_debug_log("Zhaoxin AES accelerator was detected\n");
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aesni_x86, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aesni_x86, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aesni_x86, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aesni_x86, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aesni_x86, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aesni_x86, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_CCM, 80,
- &_gnutls_aes_ccm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_CCM, 80,
+ &_gnutls_aes_ccm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_CCM, 80,
- &_gnutls_aes_ccm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_CCM, 80,
+ &_gnutls_aes_ccm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_CCM_8, 80,
- &_gnutls_aes_ccm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_CCM_8, 80,
+ &_gnutls_aes_ccm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_CCM_8, 80,
- &_gnutls_aes_ccm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_CCM_8, 80,
+ &_gnutls_aes_ccm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_XTS, 80,
- &_gnutls_aes_xts_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_XTS, 80,
+ &_gnutls_aes_xts_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_XTS, 80,
- &_gnutls_aes_xts_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_XTS, 80,
+ &_gnutls_aes_xts_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
-# ifdef ASM_X86_64
+#ifdef ASM_X86_64
if (check_pclmul()) {
/* register GCM ciphers */
- _gnutls_debug_log
- ("Zhaoxin GCM accelerator was detected\n");
+ _gnutls_debug_log(
+ "Zhaoxin GCM accelerator was detected\n");
if (check_avx_movbe() && check_fast_pclmul()) {
- _gnutls_debug_log
- ("Zhaoxin GCM accelerator (AVX) was detected\n");
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_GCM, 80,
- &_gnutls_aes_gcm_pclmul_avx, 0);
+ _gnutls_debug_log(
+ "Zhaoxin GCM accelerator (AVX) was detected\n");
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_GCM, 80,
+ &_gnutls_aes_gcm_pclmul_avx, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_GCM, 80,
- &_gnutls_aes_gcm_pclmul_avx, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_GCM, 80,
+ &_gnutls_aes_gcm_pclmul_avx, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_GCM, 80,
- &_gnutls_aes_gcm_pclmul_avx, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_GCM, 80,
+ &_gnutls_aes_gcm_pclmul_avx, 0);
if (ret < 0) {
gnutls_assert();
}
} else {
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_GCM, 80,
- &_gnutls_aes_gcm_pclmul, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_GCM, 80,
+ &_gnutls_aes_gcm_pclmul, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_GCM, 80,
- &_gnutls_aes_gcm_pclmul, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_GCM, 80,
+ &_gnutls_aes_gcm_pclmul, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_GCM, 80,
- &_gnutls_aes_gcm_pclmul, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_GCM, 80,
+ &_gnutls_aes_gcm_pclmul, 0);
if (ret < 0) {
gnutls_assert();
}
}
} else
-# endif
+#endif
{
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_GCM, 80,
- &_gnutls_aes_gcm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_GCM, 80,
+ &_gnutls_aes_gcm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_GCM, 80,
- &_gnutls_aes_gcm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_GCM, 80,
+ &_gnutls_aes_gcm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_GCM, 80,
- &_gnutls_aes_gcm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_GCM, 80,
+ &_gnutls_aes_gcm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
@@ -693,40 +634,35 @@ void register_x86_padlock_crypto(unsigned capabilities)
if (check_padlock(edx)) {
_gnutls_debug_log("Padlock AES accelerator was detected\n");
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aes_padlock, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aes_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aes_padlock, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aes_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
/* register GCM ciphers */
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_GCM, 90,
- &_gnutls_aes_gcm_padlock, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_GCM, 90, &_gnutls_aes_gcm_padlock,
+ 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aes_padlock, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aes_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_GCM, 90,
- &_gnutls_aes_gcm_padlock, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_GCM, 90, &_gnutls_aes_gcm_padlock,
+ 0);
if (ret < 0) {
gnutls_assert();
}
@@ -735,134 +671,105 @@ void register_x86_padlock_crypto(unsigned capabilities)
if (!check_optimized_aes() && !check_padlock(edx))
_gnutls_priority_update_non_aesni();
-# ifdef HAVE_LIBNETTLE
+#ifdef HAVE_LIBNETTLE
phe = check_phe(edx);
if (phe && check_phe_partial()) {
- _gnutls_debug_log
- ("Padlock SHA1 and SHA256 (partial) accelerator was detected\n");
+ _gnutls_debug_log(
+ "Padlock SHA1 and SHA256 (partial) accelerator was detected\n");
if (check_phe_sha512(edx)) {
- _gnutls_debug_log
- ("Padlock SHA512 (partial) accelerator was detected\n");
- ret =
- gnutls_crypto_single_digest_register
- (GNUTLS_DIG_SHA384, 80, &_gnutls_sha_padlock, 0);
+ _gnutls_debug_log(
+ "Padlock SHA512 (partial) accelerator was detected\n");
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA384, 80, &_gnutls_sha_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_digest_register
- (GNUTLS_DIG_SHA512, 80, &_gnutls_sha_padlock, 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA512, 80, &_gnutls_sha_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_mac_register
- (GNUTLS_MAC_SHA384, 80,
- &_gnutls_hmac_sha_padlock, 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA384, 80,
+ &_gnutls_hmac_sha_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_mac_register
- (GNUTLS_MAC_SHA512, 80,
- &_gnutls_hmac_sha_padlock, 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA512, 80,
+ &_gnutls_hmac_sha_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
}
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1,
- 90,
- &_gnutls_sha_padlock,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA1, 90, &_gnutls_sha_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA224,
- 90,
- &_gnutls_sha_padlock,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA224, 90, &_gnutls_sha_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256,
- 90,
- &_gnutls_sha_padlock,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA256, 90, &_gnutls_sha_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1,
- 90,
- &_gnutls_hmac_sha_padlock,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA1, 90, &_gnutls_hmac_sha_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
/* we don't register MAC_SHA224 because it is not used by TLS */
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256,
- 90,
- &_gnutls_hmac_sha_padlock,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA256, 90, &_gnutls_hmac_sha_padlock, 0);
if (ret < 0) {
gnutls_assert();
}
} else if (phe) {
/* Original padlock PHE. Does not support incremental operations.
*/
- _gnutls_debug_log
- ("Padlock SHA1 and SHA256 accelerator was detected\n");
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1,
- 90,
- &_gnutls_sha_padlock_oneshot,
- 0);
+ _gnutls_debug_log(
+ "Padlock SHA1 and SHA256 accelerator was detected\n");
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA1, 90, &_gnutls_sha_padlock_oneshot, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256,
- 90,
- &_gnutls_sha_padlock_oneshot,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA256, 90, &_gnutls_sha_padlock_oneshot, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1,
- 90,
- &_gnutls_hmac_sha_padlock_oneshot,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA1, 90, &_gnutls_hmac_sha_padlock_oneshot,
+ 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256,
- 90,
- &_gnutls_hmac_sha_padlock_oneshot,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA256, 90,
+ &_gnutls_hmac_sha_padlock_oneshot, 0);
if (ret < 0) {
gnutls_assert();
}
}
-# endif
+#endif
return;
}
@@ -882,21 +789,20 @@ static enum x86_cpu_vendor check_x86_cpu_vendor(void)
return X86_CPU_VENDOR_OTHER;
}
- if (memcmp(&b, "Genu", 4) == 0 &&
- memcmp(&d, "ineI", 4) == 0 && memcmp(&c, "ntel", 4) == 0) {
+ if (memcmp(&b, "Genu", 4) == 0 && memcmp(&d, "ineI", 4) == 0 &&
+ memcmp(&c, "ntel", 4) == 0) {
return X86_CPU_VENDOR_INTEL;
}
- if (memcmp(&b, "Auth", 4) == 0 &&
- memcmp(&d, "enti", 4) == 0 && memcmp(&c, "cAMD", 4) == 0) {
+ if (memcmp(&b, "Auth", 4) == 0 && memcmp(&d, "enti", 4) == 0 &&
+ memcmp(&c, "cAMD", 4) == 0) {
return X86_CPU_VENDOR_AMD;
}
return X86_CPU_VENDOR_OTHER;
}
-static
-void register_x86_intel_crypto(unsigned capabilities)
+static void register_x86_intel_crypto(unsigned capabilities)
{
int ret;
enum x86_cpu_vendor vendor;
@@ -918,10 +824,9 @@ void register_x86_intel_crypto(unsigned capabilities)
* OpenSSL does. Should we clear
* bit_AVX512DQ, bit_AVX512PF, bit_AVX512ER,
* and bit_AVX512CD? */
- GNUTLS_x86_cpuid_s[2] &= ~(bit_AVX2 |
- bit_AVX512F |
- bit_AVX512IFMA |
- bit_AVX512BW | bit_AVX512BW);
+ GNUTLS_x86_cpuid_s[2] &=
+ ~(bit_AVX2 | bit_AVX512F | bit_AVX512IFMA |
+ bit_AVX512BW | bit_AVX512BW);
}
} else {
capabilities_to_intel_cpuid(capabilities);
@@ -937,47 +842,41 @@ void register_x86_intel_crypto(unsigned capabilities)
if (check_ssse3()) {
_gnutls_debug_log("Intel SSSE3 was detected\n");
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_GCM, 90,
- &_gnutls_aes_gcm_x86_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_GCM, 90,
+ &_gnutls_aes_gcm_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_GCM, 90,
- &_gnutls_aes_gcm_x86_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_GCM, 90,
+ &_gnutls_aes_gcm_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_GCM, 90,
- &_gnutls_aes_gcm_x86_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_GCM, 90,
+ &_gnutls_aes_gcm_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_CBC, 90, &_gnutls_aes_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_CBC, 90, &_gnutls_aes_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_CBC, 90, &_gnutls_aes_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_CBC, 90, &_gnutls_aes_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_CBC, 90, &_gnutls_aes_ssse3, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_CBC, 90, &_gnutls_aes_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
@@ -987,156 +886,117 @@ void register_x86_intel_crypto(unsigned capabilities)
if (check_sha())
_gnutls_debug_log("Intel SHA was detected\n");
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1,
- 80,
- &_gnutls_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA1, 80, &_gnutls_sha_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA224,
- 80,
- &_gnutls_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA224, 80, &_gnutls_sha_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256,
- 80,
- &_gnutls_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA256, 80, &_gnutls_sha_x86_ssse3, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1,
- 80,
- &_gnutls_hmac_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA1, 80, &_gnutls_hmac_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA224,
- 80,
- &_gnutls_hmac_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA224, 80, &_gnutls_hmac_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256,
- 80,
- &_gnutls_hmac_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA256, 80, &_gnutls_hmac_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA384,
- 80,
- &_gnutls_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA384, 80, &_gnutls_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA512,
- 80,
- &_gnutls_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_digest_register(
+ GNUTLS_DIG_SHA512, 80, &_gnutls_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA384,
- 80,
- &_gnutls_hmac_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA384, 80, &_gnutls_hmac_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
- ret =
- gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA512,
- 80,
- &_gnutls_hmac_sha_x86_ssse3,
- 0);
+ ret = gnutls_crypto_single_mac_register(
+ GNUTLS_MAC_SHA512, 80, &_gnutls_hmac_sha_x86_ssse3, 0);
if (ret < 0)
gnutls_assert();
}
if (check_optimized_aes()) {
_gnutls_debug_log("Intel AES accelerator was detected\n");
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aesni_x86, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aesni_x86, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aesni_x86, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aesni_x86, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aesni_x86, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aesni_x86, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_CCM, 80,
- &_gnutls_aes_ccm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_CCM, 80,
+ &_gnutls_aes_ccm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_CCM, 80,
- &_gnutls_aes_ccm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_CCM, 80,
+ &_gnutls_aes_ccm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_CCM_8, 80,
- &_gnutls_aes_ccm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_CCM_8, 80,
+ &_gnutls_aes_ccm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_CCM_8, 80,
- &_gnutls_aes_ccm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_CCM_8, 80,
+ &_gnutls_aes_ccm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_XTS, 80,
- &_gnutls_aes_xts_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_XTS, 80,
+ &_gnutls_aes_xts_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_XTS, 80,
- &_gnutls_aes_xts_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_XTS, 80,
+ &_gnutls_aes_xts_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
@@ -1145,54 +1005,48 @@ void register_x86_intel_crypto(unsigned capabilities)
if (check_pclmul()) {
/* register GCM ciphers */
if (check_avx_movbe()) {
- _gnutls_debug_log
- ("Intel GCM accelerator (AVX) was detected\n");
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_GCM, 80,
- &_gnutls_aes_gcm_pclmul_avx, 0);
+ _gnutls_debug_log(
+ "Intel GCM accelerator (AVX) was detected\n");
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_GCM, 80,
+ &_gnutls_aes_gcm_pclmul_avx, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_GCM, 80,
- &_gnutls_aes_gcm_pclmul_avx, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_GCM, 80,
+ &_gnutls_aes_gcm_pclmul_avx, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_GCM, 80,
- &_gnutls_aes_gcm_pclmul_avx, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_GCM, 80,
+ &_gnutls_aes_gcm_pclmul_avx, 0);
if (ret < 0) {
gnutls_assert();
}
} else {
- _gnutls_debug_log
- ("Intel GCM accelerator was detected\n");
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_GCM, 80,
- &_gnutls_aes_gcm_pclmul, 0);
+ _gnutls_debug_log(
+ "Intel GCM accelerator was detected\n");
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_GCM, 80,
+ &_gnutls_aes_gcm_pclmul, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_GCM, 80,
- &_gnutls_aes_gcm_pclmul, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_GCM, 80,
+ &_gnutls_aes_gcm_pclmul, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_GCM, 80,
- &_gnutls_aes_gcm_pclmul, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_GCM, 80,
+ &_gnutls_aes_gcm_pclmul, 0);
if (ret < 0) {
gnutls_assert();
}
@@ -1200,26 +1054,23 @@ void register_x86_intel_crypto(unsigned capabilities)
} else
#endif
{
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_128_GCM, 80,
- &_gnutls_aes_gcm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_128_GCM, 80,
+ &_gnutls_aes_gcm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_192_GCM, 80,
- &_gnutls_aes_gcm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_192_GCM, 80,
+ &_gnutls_aes_gcm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
- ret =
- gnutls_crypto_single_cipher_register
- (GNUTLS_CIPHER_AES_256_GCM, 80,
- &_gnutls_aes_gcm_x86_aesni, 0);
+ ret = gnutls_crypto_single_cipher_register(
+ GNUTLS_CIPHER_AES_256_GCM, 80,
+ &_gnutls_aes_gcm_x86_aesni, 0);
if (ret < 0) {
gnutls_assert();
}
diff --git a/lib/accelerated/x86/x86-common.h b/lib/accelerated/x86/x86-common.h
index 8d6d14f4af..97d984677c 100644
--- a/lib/accelerated/x86/x86-common.h
+++ b/lib/accelerated/x86/x86-common.h
@@ -20,29 +20,28 @@
*
*/
#ifndef GNUTLS_LIB_ACCELERATED_X86_X86_COMMON_H
-# define GNUTLS_LIB_ACCELERATED_X86_X86_COMMON_H
+#define GNUTLS_LIB_ACCELERATED_X86_X86_COMMON_H
-# include <config.h>
+#include <config.h>
-# if defined(ASM_X86)
+#if defined(ASM_X86)
void gnutls_cpuid(unsigned int func, unsigned int *ax, unsigned int *bx,
unsigned int *cx, unsigned int *dx);
-# endif
+#endif
-# define CHECK_AES_KEYSIZE(s) \
+#define CHECK_AES_KEYSIZE(s) \
if (s != 16 && s != 24 && s != 32) \
- return GNUTLS_E_INVALID_REQUEST
+ return GNUTLS_E_INVALID_REQUEST
-# define NN_HASH(name, update_func, digest_func, NAME) { \
- #name, \
- sizeof(struct name##_ctx), \
- NAME##_DIGEST_SIZE, \
- NAME##_DATA_SIZE, \
- (nettle_hash_init_func *) name##_init, \
- (nettle_hash_update_func *) update_func, \
- (nettle_hash_digest_func *) digest_func \
-}
+#define NN_HASH(name, update_func, digest_func, NAME) \
+ { \
+#name, sizeof(struct name##_ctx), NAME##_DIGEST_SIZE, \
+ NAME##_DATA_SIZE, \
+ (nettle_hash_init_func *)name##_init, \
+ (nettle_hash_update_func *)update_func, \
+ (nettle_hash_digest_func *)digest_func \
+ }
#endif