summaryrefslogtreecommitdiff
path: root/crypto/rc4/asm/rc4-amd64.pl
blob: 9e0da8af995604f1f5343fe3b5034b15d0d9657d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
# project. Rights for redistribution and usage in source and binary
# forms are granted according to the OpenSSL license.
# ====================================================================
#
# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
# "hand-coded assembler"] doesn't stand for the whole improvement
# coefficient. It turned out that eliminating RC4_CHAR from config
# line results in ~40% improvement (yes, even for C implementation).
# Presumably it has everything to do with AMD cache architecture and
# RAW or whatever penalties. Once again! The module *requires* config
# line *without* RC4_CHAR! As for coding "secret," I bet on partial
# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
# I simply 'inc %r8b'. Even though optimization manual discourages
# to operate on partial registers, it turned out to be the best bet.
# At least for AMD... How IA32E would perform remains to be seen...

# As was shown by Marc Bevand reordering of couple of load operations
# results in even higher performance gain of 3.3x:-) At least on
# Opteron... For reference, 1x in this case is RC4_CHAR C-code
# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
# Latter means that if you want to *estimate* what to expect from
# *your* CPU, then multiply 54 by 3.3 and clock frequency in GHz.

# Intel P4 EM64T core was found to run the AMD64 code really slow...
# The only way to achieve comparable performance on P4 is to keep
# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
# compose blended code, which would perform even within 30% marginal
# on either AMD and Intel platforms, I implement both cases. See
# rc4_skey.c for further details... This applies to 0.9.8 and later.
# In 0.9.7 context RC4_CHAR codepath is never engaged and ~70 bytes
# of code remain redundant.

$output=shift;

$win64a=1 if ($output =~ /win64a.[s|asm]/);

open STDOUT,">$output" || die "can't open $output: $!";

if (defined($win64a)) {
    $dat="%rcx";	# arg1
    $len="%rdx";	# arg2
    $inp="%rsi";	# r8, arg3 moves here
    $out="%rdi";	# r9, arg4 moves here
} else {
    $dat="%rdi";	# arg1
    $len="%rsi";	# arg2
    $inp="%rdx";	# arg3
    $out="%rcx";	# arg4
}

$XX="%r10";
$TX="%r8";
$YY="%r11";
$TY="%r9";

sub PTR() {
    my $ret=shift;
    if (defined($win64a)) {
	$ret =~ s/\[([\S]+)\+([\S]+)\]/[$2+$1]/g;   # [%rN+%rM*4]->[%rM*4+%rN]
	$ret =~ s/:([^\[]+)\[([^\]]+)\]/:[$2+$1]/g; # :off[ea]->:[ea+off]
    } else {
	$ret =~ s/[\+\*]/,/g;		# [%rN+%rM*4]->[%rN,%rM,4]
	$ret =~ s/\[([^\]]+)\]/($1)/g;	# [%rN]->(%rN)
    }
    $ret;
}

$code=<<___ if (!defined($win64a));
.text

.globl	RC4
.type	RC4,\@function
.align	16
RC4:	or	$len,$len
	jne	.Lentry
	repret
.Lentry:
___
$code=<<___ if (defined($win64a));
_TEXT	SEGMENT
PUBLIC	RC4
ALIGN	16
RC4	PROC
	or	$len,$len
	jne	.Lentry
	repret
.Lentry:
	push	%rdi
	push	%rsi
	sub	\$40,%rsp
	mov	%r8,$inp
	mov	%r9,$out
___
$code.=<<___;
	add	\$8,$dat
	movl	`&PTR("DWORD:-8[$dat]")`,$XX#d
	movl	`&PTR("DWORD:-4[$dat]")`,$YY#d
	cmpl	\$-1,`&PTR("DWORD:256[$dat]")`
	je	.LRC4_CHAR
	test	\$-8,$len
	jz	.Lloop1
.align	16
.Lloop8:
	inc	$XX#b
	movl	`&PTR("DWORD:[$dat+$XX*4]")`,$TX#d
	add	$TX#b,$YY#b
	movl	`&PTR("DWORD:[$dat+$YY*4]")`,$TY#d
	movl	$TX#d,`&PTR("DWORD:[$dat+$YY*4]")`
	movl	$TY#d,`&PTR("DWORD:[$dat+$XX*4]")`
	add	$TX#b,$TY#b
	inc	$XX#b
	movl	`&PTR("DWORD:[$dat+$XX*4]")`,$TX#d
	movb	`&PTR("BYTE:[$dat+$TY*4]")`,%al
___
for ($i=1;$i<=6;$i++) {
$code.=<<___;
	add	$TX#b,$YY#b
	ror	\$8,%rax
	movl	`&PTR("DWORD:[$dat+$YY*4]")`,$TY#d
	movl	$TX#d,`&PTR("DWORD:[$dat+$YY*4]")`
	movl	$TY#d,`&PTR("DWORD:[$dat+$XX*4]")`
	add	$TX#b,$TY#b
	inc	$XX#b
	movl	`&PTR("DWORD:[$dat+$XX*4]")`,$TX#d
	movb	`&PTR("BYTE:[$dat+$TY*4]")`,%al
___
}
$code.=<<___;
	add	$TX#b,$YY#b
	ror	\$8,%rax
	movl	`&PTR("DWORD:[$dat+$YY*4]")`,$TY#d
	movl	$TX#d,`&PTR("DWORD:[$dat+$YY*4]")`
	movl	$TY#d,`&PTR("DWORD:[$dat+$XX*4]")`
	sub	\$8,$len
	add	$TY#b,$TX#b
	movb	`&PTR("BYTE:[$dat+$TX*4]")`,%al
	ror	\$8,%rax
	add	\$8,$inp
	add	\$8,$out

	xor	`&PTR("QWORD:-8[$inp]")`,%rax
	mov	%rax,`&PTR("QWORD:-8[$out]")`

	test	\$-8,$len
	jnz	.Lloop8
	cmp	\$0,$len
	jne	.Lloop1
.Lexit:
	movl	$XX#d,`&PTR("DWORD:-8[$dat]")`
	movl	$YY#d,`&PTR("DWORD:-4[$dat]")`
___
$code.=<<___ if (defined($win64a));
	add	\$40,%rsp
	pop	%rsi
	pop	%rdi
___
$code.=<<___;
	repret
.align	16
.Lloop1:
	movzb	`&PTR("BYTE:[$inp]")`,%eax
	inc	$XX#b
	movl	`&PTR("DWORD:[$dat+$XX*4]")`,$TX#d
	add	$TX#b,$YY#b
	movl	`&PTR("DWORD:[$dat+$YY*4]")`,$TY#d
	movl	$TX#d,`&PTR("DWORD:[$dat+$YY*4]")`
	movl	$TY#d,`&PTR("DWORD:[$dat+$XX*4]")`
	add	$TY#b,$TX#b
	movl	`&PTR("DWORD:[$dat+$TX*4]")`,$TY#d
	xor	$TY,%rax
	inc	$inp
	movb	%al,`&PTR("BYTE:[$out]")`
	inc	$out
	dec	$len
	jnz	.Lloop1
	jmp	.Lexit

.align	16
.LRC4_CHAR:
	inc	$XX#b
	movzb	`&PTR("BYTE:[$dat+$XX]")`,$TX#d
	add	$TX#b,$YY#b
	movzb	`&PTR("BYTE:[$dat+$YY]")`,$TY#d
	movb	$TX#b,`&PTR("BYTE:[$dat+$YY]")`
	movb	$TY#b,`&PTR("BYTE:[$dat+$XX]")`
	add	$TX#b,$TY#b
	movzb	`&PTR("BYTE:[$dat+$TY]")`,$TY#d
	xorb	`&PTR("BYTE:[$inp]")`,$TY#b
	movb	$TY#b,`&PTR("BYTE:[$out]")`
	inc	$inp
	inc	$out
	dec	$len
	jnz	.LRC4_CHAR
	jmp	.Lexit
___
$code.=<<___ if (defined($win64a));
RC4	ENDP
_TEXT	ENDS
END
___
$code.=<<___ if (!defined($win64a));
.size	RC4,.-RC4
___

$code =~ s/#([bwd])/$1/gm;
$code =~ s/\`([^\`]*)\`/eval $1/gem;

if (defined($win64a)) {
    $code =~ s/\.align/ALIGN/gm;
    $code =~ s/[\$%]//gm;
    $code =~ s/\.L/\$L/gm;
    $code =~ s/([\w]+)([\s]+)([\S]+),([\S]+)/$1$2$4,$3/gm;
    $code =~ s/([QD]*WORD|BYTE):/$1 PTR/gm;
    $code =~ s/mov[bwlq]/mov/gm;
    $code =~ s/movzb/movzx/gm;
    $code =~ s/repret/DB\t0F3h,0C3h/gm;
    $code =~ s/cmpl/cmp/gm;
    $code =~ s/xorb/xor/gm;
} else {
    $code =~ s/([QD]*WORD|BYTE)://gm;
    $code =~ s/repret/.byte\t0xF3,0xC3/gm;
}
print $code;