summaryrefslogtreecommitdiff
path: root/crypto/md4/md4_locl.h
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/md4/md4_locl.h')
-rw-r--r--crypto/md4/md4_locl.h42
1 files changed, 0 insertions, 42 deletions
diff --git a/crypto/md4/md4_locl.h b/crypto/md4/md4_locl.h
index 7e219b17ea..c8085b0ead 100644
--- a/crypto/md4/md4_locl.h
+++ b/crypto/md4/md4_locl.h
@@ -65,41 +65,13 @@
#define MD4_LONG_LOG2 2 /* default to 32 bits */
#endif
-void md4_block_host_order (MD4_CTX *c, const void *p,size_t num);
void md4_block_data_order (MD4_CTX *c, const void *p,size_t num);
-#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__)
-/*
- * *_block_host_order is expected to handle aligned data while
- * *_block_data_order - unaligned. As algorithm and host (x86)
- * are in this case of the same "endianness" these two are
- * otherwise indistinguishable. But normally you don't want to
- * call the same function because unaligned access in places
- * where alignment is expected is usually a "Bad Thing". Indeed,
- * on RISCs you get punished with BUS ERROR signal or *severe*
- * performance degradation. Intel CPUs are in turn perfectly
- * capable of loading unaligned data without such drastic side
- * effect. Yes, they say it's slower than aligned load, but no
- * exception is generated and therefore performance degradation
- * is *incomparable* with RISCs. What we should weight here is
- * costs of unaligned access against costs of aligning data.
- * According to my measurements allowing unaligned access results
- * in ~9% performance improvement on Pentium II operating at
- * 266MHz. I won't be surprised if the difference will be higher
- * on faster systems:-)
- *
- * <appro@fy.chalmers.se>
- */
-#define md4_block_data_order md4_block_host_order
-#endif
-
#define DATA_ORDER_IS_LITTLE_ENDIAN
#define HASH_LONG MD4_LONG
-#define HASH_LONG_LOG2 MD4_LONG_LOG2
#define HASH_CTX MD4_CTX
#define HASH_CBLOCK MD4_CBLOCK
-#define HASH_LBLOCK MD4_LBLOCK
#define HASH_UPDATE MD4_Update
#define HASH_TRANSFORM MD4_Transform
#define HASH_FINAL MD4_Final
@@ -110,21 +82,7 @@ void md4_block_data_order (MD4_CTX *c, const void *p,size_t num);
ll=(c)->C; HOST_l2c(ll,(s)); \
ll=(c)->D; HOST_l2c(ll,(s)); \
} while (0)
-#define HASH_BLOCK_HOST_ORDER md4_block_host_order
-#if !defined(L_ENDIAN) || defined(md4_block_data_order)
#define HASH_BLOCK_DATA_ORDER md4_block_data_order
-/*
- * Little-endians (Intel and Alpha) feel better without this.
- * It looks like memcpy does better job than generic
- * md4_block_data_order on copying-n-aligning input data.
- * But frankly speaking I didn't expect such result on Alpha.
- * On the other hand I've got this with egcs-1.0.2 and if
- * program is compiled with another (better?) compiler it
- * might turn out other way around.
- *
- * <appro@fy.chalmers.se>
- */
-#endif
#include "md32_common.h"