1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
#include "textflag.h"
// ARM atomic operations, for use by asm_$(GOOS)_arm.s.
TEXT ·armCompareAndSwapUint32(SB),NOSPLIT,$0-13
MOVW addr+0(FP), R1
MOVW old+4(FP), R2
MOVW new+8(FP), R3
casloop:
// LDREX and STREX were introduced in ARMv6.
LDREX (R1), R0
CMP R0, R2
BNE casfail
STREX R3, (R1), R0
CMP $0, R0
BNE casloop
MOVW $1, R0
MOVBU R0, ret+12(FP)
RET
casfail:
MOVW $0, R0
MOVBU R0, ret+12(FP)
RET
TEXT ·armCompareAndSwapUint64(SB),NOSPLIT,$0-21
BL fastCheck64<>(SB)
MOVW addr+0(FP), R1
// make unaligned atomic access panic
AND.S $7, R1, R2
BEQ 2(PC)
MOVW R2, (R2)
MOVW oldlo+4(FP), R2
MOVW oldhi+8(FP), R3
MOVW newlo+12(FP), R4
MOVW newhi+16(FP), R5
cas64loop:
// LDREXD and STREXD were introduced in ARMv6k.
LDREXD (R1), R6 // loads R6 and R7
CMP R2, R6
BNE cas64fail
CMP R3, R7
BNE cas64fail
STREXD R4, (R1), R0 // stores R4 and R5
CMP $0, R0
BNE cas64loop
MOVW $1, R0
MOVBU R0, ret+20(FP)
RET
cas64fail:
MOVW $0, R0
MOVBU R0, ret+20(FP)
RET
TEXT ·armAddUint32(SB),NOSPLIT,$0-12
MOVW addr+0(FP), R1
MOVW delta+4(FP), R2
addloop:
// LDREX and STREX were introduced in ARMv6.
LDREX (R1), R3
ADD R2, R3
STREX R3, (R1), R0
CMP $0, R0
BNE addloop
MOVW R3, ret+8(FP)
RET
TEXT ·armAddUint64(SB),NOSPLIT,$0-20
BL fastCheck64<>(SB)
MOVW addr+0(FP), R1
// make unaligned atomic access panic
AND.S $7, R1, R2
BEQ 2(PC)
MOVW R2, (R2)
MOVW deltalo+4(FP), R2
MOVW deltahi+8(FP), R3
add64loop:
// LDREXD and STREXD were introduced in ARMv6k.
LDREXD (R1), R4 // loads R4 and R5
ADD.S R2, R4
ADC R3, R5
STREXD R4, (R1), R0 // stores R4 and R5
CMP $0, R0
BNE add64loop
MOVW R4, retlo+12(FP)
MOVW R5, rethi+16(FP)
RET
TEXT ·armSwapUint32(SB),NOSPLIT,$0-12
MOVW addr+0(FP), R1
MOVW new+4(FP), R2
swaploop:
// LDREX and STREX were introduced in ARMv6.
LDREX (R1), R3
STREX R2, (R1), R0
CMP $0, R0
BNE swaploop
MOVW R3, old+8(FP)
RET
TEXT ·armSwapUint64(SB),NOSPLIT,$0-20
BL fastCheck64<>(SB)
MOVW addr+0(FP), R1
// make unaligned atomic access panic
AND.S $7, R1, R2
BEQ 2(PC)
MOVW R2, (R2)
MOVW newlo+4(FP), R2
MOVW newhi+8(FP), R3
swap64loop:
// LDREXD and STREXD were introduced in ARMv6k.
LDREXD (R1), R4 // loads R4 and R5
STREXD R2, (R1), R0 // stores R2 and R3
CMP $0, R0
BNE swap64loop
MOVW R4, oldlo+12(FP)
MOVW R5, oldhi+16(FP)
RET
TEXT ·armLoadUint64(SB),NOSPLIT,$0-12
BL fastCheck64<>(SB)
MOVW addr+0(FP), R1
// make unaligned atomic access panic
AND.S $7, R1, R2
BEQ 2(PC)
MOVW R2, (R2)
load64loop:
LDREXD (R1), R2 // loads R2 and R3
STREXD R2, (R1), R0 // stores R2 and R3
CMP $0, R0
BNE load64loop
MOVW R2, vallo+4(FP)
MOVW R3, valhi+8(FP)
RET
TEXT ·armStoreUint64(SB),NOSPLIT,$0-12
BL fastCheck64<>(SB)
MOVW addr+0(FP), R1
// make unaligned atomic access panic
AND.S $7, R1, R2
BEQ 2(PC)
MOVW R2, (R2)
MOVW vallo+4(FP), R2
MOVW valhi+8(FP), R3
store64loop:
LDREXD (R1), R4 // loads R4 and R5
STREXD R2, (R1), R0 // stores R2 and R3
CMP $0, R0
BNE store64loop
RET
// Check for broken 64-bit LDREXD as found in QEMU.
// LDREXD followed by immediate STREXD should succeed.
// If it fails, try a few times just to be sure (maybe our thread got
// rescheduled between the two instructions) and then panic.
// A bug in some copies of QEMU makes STREXD never succeed,
// which will make uses of the 64-bit atomic operations loop forever.
// If things are working, set okLDREXD to avoid future checks.
// https://bugs.launchpad.net/qemu/+bug/670883.
TEXT check64<>(SB),NOSPLIT,$16-0
MOVW $10, R1
// 8-aligned stack address scratch space.
MOVW $8(R13), R5
AND $~7, R5
loop:
LDREXD (R5), R2
STREXD R2, (R5), R0
CMP $0, R0
BEQ ok
SUB $1, R1
CMP $0, R1
BNE loop
// Must be buggy QEMU.
BL ·panic64(SB)
ok:
RET
// Fast, cached version of check. No frame, just MOVW CMP RET after first time.
TEXT fastCheck64<>(SB),NOSPLIT,$-4
MOVW ok64<>(SB), R0
CMP $0, R0 // have we been here before?
RET.NE
B slowCheck64<>(SB)
TEXT slowCheck64<>(SB),NOSPLIT,$0-0
BL check64<>(SB)
// Still here, must be okay.
MOVW $1, R0
MOVW R0, ok64<>(SB)
RET
GLOBL ok64<>(SB), $4
|