summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2014-11-15 08:00:38 -0500
committerRuss Cox <rsc@golang.org>2014-11-15 08:00:38 -0500
commit8b46a22b652d6e7a4a69b9523d57aff12f9dc945 (patch)
tree8f1c3f2d7e06daa3a9ad5721658a31b63519d748
parent66fbfa707147ff58e13beed2d4a957a06bede869 (diff)
parent0b7f128e76956367ddb4f9ddbee4477d3b462e96 (diff)
downloadgo-8b46a22b652d6e7a4a69b9523d57aff12f9dc945.tar.gz
[dev.garbage] all: merge dev.cc into dev.garbage
The garbage collector is now written in Go. There is plenty to clean up (just like on dev.cc). all.bash passes on darwin/amd64, darwin/386, linux/amd64, linux/386. TBR=rlh R=austin, rlh, bradfitz CC=golang-codereviews https://codereview.appspot.com/173250043
-rw-r--r--dev.cc0
-rw-r--r--include/link.h2
-rw-r--r--src/cmd/5c/Makefile5
-rw-r--r--src/cmd/5c/cgen.c1213
-rw-r--r--src/cmd/5c/doc.go16
-rw-r--r--src/cmd/5c/gc.h333
-rw-r--r--src/cmd/5c/list.c39
-rw-r--r--src/cmd/5c/mul.c640
-rw-r--r--src/cmd/5c/peep.c1478
-rw-r--r--src/cmd/5c/reg.c1210
-rw-r--r--src/cmd/5c/sgen.c265
-rw-r--r--src/cmd/5c/swt.c461
-rw-r--r--src/cmd/5c/txt.c1361
-rw-r--r--src/cmd/6c/Makefile5
-rw-r--r--src/cmd/6c/cgen.c2046
-rw-r--r--src/cmd/6c/div.c236
-rw-r--r--src/cmd/6c/doc.go16
-rw-r--r--src/cmd/6c/gc.h359
-rw-r--r--src/cmd/6c/list.c38
-rw-r--r--src/cmd/6c/machcap.c107
-rw-r--r--src/cmd/6c/mul.c458
-rw-r--r--src/cmd/6c/peep.c902
-rw-r--r--src/cmd/6c/reg.c1523
-rw-r--r--src/cmd/6c/sgen.c483
-rw-r--r--src/cmd/6c/swt.c353
-rw-r--r--src/cmd/6c/txt.c1674
-rw-r--r--src/cmd/8c/Makefile5
-rw-r--r--src/cmd/8c/cgen.c1939
-rw-r--r--src/cmd/8c/cgen64.c2657
-rw-r--r--src/cmd/8c/div.c236
-rw-r--r--src/cmd/8c/doc.go16
-rw-r--r--src/cmd/8c/gc.h364
-rw-r--r--src/cmd/8c/list.c38
-rw-r--r--src/cmd/8c/machcap.c116
-rw-r--r--src/cmd/8c/mul.c458
-rw-r--r--src/cmd/8c/peep.c807
-rw-r--r--src/cmd/8c/reg.c1438
-rw-r--r--src/cmd/8c/sgen.c483
-rw-r--r--src/cmd/8c/swt.c341
-rw-r--r--src/cmd/8c/txt.c1537
-rw-r--r--src/cmd/cc/Makefile10
-rw-r--r--src/cmd/cc/acid.c344
-rw-r--r--src/cmd/cc/bits.c120
-rw-r--r--src/cmd/cc/cc.h835
-rw-r--r--src/cmd/cc/cc.y1220
-rw-r--r--src/cmd/cc/com.c1384
-rw-r--r--src/cmd/cc/com64.c644
-rw-r--r--src/cmd/cc/dcl.c1707
-rw-r--r--src/cmd/cc/doc.go13
-rw-r--r--src/cmd/cc/dpchk.c793
-rw-r--r--src/cmd/cc/funct.c431
-rw-r--r--src/cmd/cc/godefs.c367
-rw-r--r--src/cmd/cc/lex.c1593
-rw-r--r--src/cmd/cc/mac.c34
-rw-r--r--src/cmd/cc/omachcap.c40
-rw-r--r--src/cmd/cc/pgen.c622
-rw-r--r--src/cmd/cc/pswt.c140
-rw-r--r--src/cmd/cc/scon.c640
-rw-r--r--src/cmd/cc/sub.c2068
-rw-r--r--src/cmd/cc/y.tab.c3822
-rw-r--r--src/cmd/cc/y.tab.h230
-rw-r--r--src/cmd/cgo/main.go1
-rw-r--r--src/cmd/cgo/out.go154
-rw-r--r--src/cmd/dist/build.c146
-rw-r--r--src/cmd/dist/buildruntime.c306
-rw-r--r--src/cmd/gc/export.c41
-rw-r--r--src/cmd/gc/go.h7
-rw-r--r--src/cmd/gc/lex.c200
-rw-r--r--src/cmd/gc/obj.c12
-rw-r--r--src/cmd/gc/reflect.c6
-rw-r--r--src/cmd/gc/subr.c46
-rw-r--r--src/cmd/go/build.go54
-rw-r--r--src/liblink/asm6.c5
-rw-r--r--src/liblink/objfile.c33
-rw-r--r--src/reflect/type.go5
-rwxr-xr-xsrc/run.bash4
-rw-r--r--src/runtime/alg.go3
-rw-r--r--src/runtime/arch1_386.go15
-rw-r--r--src/runtime/arch1_amd64.go15
-rw-r--r--src/runtime/arch1_arm.go15
-rw-r--r--src/runtime/arch_386.h17
-rw-r--r--src/runtime/arch_amd64.h25
-rw-r--r--src/runtime/arch_arm.h17
-rw-r--r--src/runtime/asm.s5
-rw-r--r--src/runtime/asm_386.s65
-rw-r--r--src/runtime/asm_amd64.s63
-rw-r--r--src/runtime/asm_amd64p32.s52
-rw-r--r--src/runtime/asm_arm.s60
-rw-r--r--src/runtime/atomic.go81
-rw-r--r--src/runtime/atomic_386.c46
-rw-r--r--src/runtime/atomic_386.go91
-rw-r--r--src/runtime/atomic_amd64x.c29
-rw-r--r--src/runtime/atomic_amd64x.go82
-rw-r--r--src/runtime/atomic_arm.go10
-rw-r--r--src/runtime/cgo.go23
-rw-r--r--src/runtime/cgo/callbacks.c83
-rw-r--r--src/runtime/cgo/callbacks.go95
-rw-r--r--src/runtime/cgo/dragonfly.go (renamed from src/runtime/cgo/dragonfly.c)14
-rw-r--r--src/runtime/cgo/freebsd.go (renamed from src/runtime/cgo/freebsd.c)17
-rw-r--r--src/runtime/cgo/iscgo.go (renamed from src/runtime/cgo/iscgo.c)11
-rw-r--r--src/runtime/cgo/netbsd.go (renamed from src/runtime/cgo/netbsd.c)14
-rw-r--r--src/runtime/cgo/openbsd.go (renamed from src/runtime/cgo/openbsd.c)24
-rw-r--r--src/runtime/cgo/setenv.c13
-rw-r--r--src/runtime/cgo/setenv.go21
-rw-r--r--src/runtime/cgocall.go20
-rw-r--r--src/runtime/chan.go2
-rw-r--r--src/runtime/chan.h68
-rw-r--r--src/runtime/chan1.go61
-rw-r--r--src/runtime/complex.go39
-rw-r--r--src/runtime/cpuprof.go8
-rw-r--r--src/runtime/cputicks.go11
-rw-r--r--src/runtime/debug.go18
-rw-r--r--src/runtime/defs.c15
-rw-r--r--src/runtime/defs1_netbsd_386.go171
-rw-r--r--src/runtime/defs1_netbsd_amd64.go183
-rw-r--r--src/runtime/defs1_netbsd_arm.go170
-rw-r--r--src/runtime/defs1_solaris_amd64.go245
-rw-r--r--src/runtime/defs_android_arm.h3
-rw-r--r--src/runtime/defs_darwin_386.go382
-rw-r--r--src/runtime/defs_darwin_386.h392
-rw-r--r--src/runtime/defs_darwin_amd64.go385
-rw-r--r--src/runtime/defs_darwin_amd64.h395
-rw-r--r--src/runtime/defs_dragonfly_386.go190
-rw-r--r--src/runtime/defs_dragonfly_386.h198
-rw-r--r--src/runtime/defs_dragonfly_amd64.go208
-rw-r--r--src/runtime/defs_dragonfly_amd64.h208
-rw-r--r--src/runtime/defs_freebsd_386.go213
-rw-r--r--src/runtime/defs_freebsd_386.h213
-rw-r--r--src/runtime/defs_freebsd_amd64.go224
-rw-r--r--src/runtime/defs_freebsd_amd64.h224
-rw-r--r--src/runtime/defs_freebsd_arm.go186
-rw-r--r--src/runtime/defs_freebsd_arm.h186
-rw-r--r--src/runtime/defs_linux_386.go217
-rw-r--r--src/runtime/defs_linux_386.h211
-rw-r--r--src/runtime/defs_linux_amd64.go253
-rw-r--r--src/runtime/defs_linux_amd64.h254
-rw-r--r--src/runtime/defs_linux_arm.go167
-rw-r--r--src/runtime/defs_linux_arm.h168
-rw-r--r--src/runtime/defs_nacl_386.go42
-rw-r--r--src/runtime/defs_nacl_386.h63
-rw-r--r--src/runtime/defs_nacl_amd64p32.go63
-rw-r--r--src/runtime/defs_nacl_amd64p32.h90
-rw-r--r--src/runtime/defs_nacl_arm.go49
-rw-r--r--src/runtime/defs_nacl_arm.h70
-rw-r--r--src/runtime/defs_netbsd_386.h182
-rw-r--r--src/runtime/defs_netbsd_amd64.h194
-rw-r--r--src/runtime/defs_netbsd_arm.h184
-rw-r--r--src/runtime/defs_openbsd_386.go170
-rw-r--r--src/runtime/defs_openbsd_386.h168
-rw-r--r--src/runtime/defs_openbsd_amd64.go181
-rw-r--r--src/runtime/defs_openbsd_amd64.h179
-rw-r--r--src/runtime/defs_plan9_386.go23
-rw-r--r--src/runtime/defs_plan9_386.h26
-rw-r--r--src/runtime/defs_plan9_amd64.go32
-rw-r--r--src/runtime/defs_plan9_amd64.h34
-rw-r--r--src/runtime/defs_solaris_amd64.h254
-rw-r--r--src/runtime/defs_windows_386.go109
-rw-r--r--src/runtime/defs_windows_386.h116
-rw-r--r--src/runtime/defs_windows_amd64.go124
-rw-r--r--src/runtime/defs_windows_amd64.h131
-rw-r--r--src/runtime/env_posix.go10
-rw-r--r--src/runtime/export_test.go80
-rw-r--r--src/runtime/extern.go3
-rw-r--r--src/runtime/float.c10
-rw-r--r--src/runtime/funcdata.h10
-rw-r--r--src/runtime/futex_test.go4
-rw-r--r--src/runtime/gcinfo_test.go5
-rw-r--r--src/runtime/go_tls.h22
-rw-r--r--src/runtime/heapdump.c851
-rw-r--r--src/runtime/heapdump.go729
-rw-r--r--src/runtime/lfstack.c85
-rw-r--r--src/runtime/lfstack.go36
-rw-r--r--src/runtime/lfstack_32bit.go21
-rw-r--r--src/runtime/lfstack_amd64.go24
-rw-r--r--src/runtime/lfstack_linux_power64x.go26
-rw-r--r--src/runtime/lock_futex.go7
-rw-r--r--src/runtime/lock_sema.go8
-rw-r--r--src/runtime/malloc.c396
-rw-r--r--src/runtime/malloc.go134
-rw-r--r--src/runtime/malloc.h620
-rw-r--r--src/runtime/malloc1.go318
-rw-r--r--src/runtime/malloc2.go473
-rw-r--r--src/runtime/mcache.c115
-rw-r--r--src/runtime/mcache.go91
-rw-r--r--src/runtime/mcentral.c214
-rw-r--r--src/runtime/mcentral.go199
-rw-r--r--src/runtime/mem.go24
-rw-r--r--src/runtime/mem_bsd.go88
-rw-r--r--src/runtime/mem_darwin.c82
-rw-r--r--src/runtime/mem_darwin.go58
-rw-r--r--src/runtime/mem_dragonfly.c105
-rw-r--r--src/runtime/mem_freebsd.c100
-rw-r--r--src/runtime/mem_linux.c166
-rw-r--r--src/runtime/mem_linux.go135
-rw-r--r--src/runtime/mem_netbsd.c100
-rw-r--r--src/runtime/mem_openbsd.c100
-rw-r--r--src/runtime/mem_solaris.c101
-rw-r--r--src/runtime/mem_windows.c132
-rw-r--r--src/runtime/mem_windows.go119
-rw-r--r--src/runtime/mfixalloc.c64
-rw-r--r--src/runtime/mfixalloc.go59
-rw-r--r--src/runtime/mgc.go2422
-rw-r--r--src/runtime/mgc0.c2682
-rw-r--r--src/runtime/mgc0.go18
-rw-r--r--src/runtime/mgc0.h74
-rw-r--r--src/runtime/mgc1.go80
-rw-r--r--src/runtime/mheap.c889
-rw-r--r--src/runtime/mheap.go785
-rw-r--r--src/runtime/mprof.go33
-rw-r--r--src/runtime/msize.c184
-rw-r--r--src/runtime/msize.go174
-rw-r--r--src/runtime/netpoll.go53
-rw-r--r--src/runtime/netpoll_solaris.go (renamed from src/runtime/netpoll_solaris.c)259
-rw-r--r--src/runtime/netpoll_windows.c163
-rw-r--r--src/runtime/netpoll_windows.go156
-rw-r--r--src/runtime/norace_test.go4
-rw-r--r--src/runtime/os1_darwin.go423
-rw-r--r--src/runtime/os1_dragonfly.go220
-rw-r--r--src/runtime/os1_freebsd.go221
-rw-r--r--src/runtime/os1_linux.go287
-rw-r--r--src/runtime/os1_openbsd.go235
-rw-r--r--src/runtime/os2_darwin.go14
-rw-r--r--src/runtime/os2_dragonfly.go12
-rw-r--r--src/runtime/os2_freebsd.go12
-rw-r--r--src/runtime/os2_linux.go23
-rw-r--r--src/runtime/os2_openbsd.go14
-rw-r--r--src/runtime/os2_solaris.go13
-rw-r--r--src/runtime/os3_solaris.go493
-rw-r--r--src/runtime/os_darwin.c570
-rw-r--r--src/runtime/os_darwin.go30
-rw-r--r--src/runtime/os_darwin.h43
-rw-r--r--src/runtime/os_dragonfly.c315
-rw-r--r--src/runtime/os_dragonfly.go34
-rw-r--r--src/runtime/os_dragonfly.h30
-rw-r--r--src/runtime/os_freebsd.c323
-rw-r--r--src/runtime/os_freebsd.go29
-rw-r--r--src/runtime/os_freebsd.h29
-rw-r--r--src/runtime/os_freebsd_arm.go (renamed from src/runtime/os_freebsd_arm.c)17
-rw-r--r--src/runtime/os_linux.c362
-rw-r--r--src/runtime/os_linux.go26
-rw-r--r--src/runtime/os_linux.h41
-rw-r--r--src/runtime/os_linux_386.c38
-rw-r--r--src/runtime/os_linux_386.go36
-rw-r--r--src/runtime/os_linux_arm.c80
-rw-r--r--src/runtime/os_linux_arm.go75
-rw-r--r--src/runtime/os_openbsd.c312
-rw-r--r--src/runtime/os_openbsd.go30
-rw-r--r--src/runtime/os_openbsd.h26
-rw-r--r--src/runtime/os_solaris.c560
-rw-r--r--src/runtime/os_solaris.go52
-rw-r--r--src/runtime/os_solaris.h55
-rw-r--r--src/runtime/os_windows.go4
-rw-r--r--src/runtime/panic.c200
-rw-r--r--src/runtime/panic.go50
-rw-r--r--src/runtime/panic1.go161
-rw-r--r--src/runtime/parfor.c226
-rw-r--r--src/runtime/parfor.go186
-rw-r--r--src/runtime/pprof/pprof_test.go2
-rw-r--r--src/runtime/proc.c3497
-rw-r--r--src/runtime/proc.go42
-rw-r--r--src/runtime/proc1.go3186
-rw-r--r--src/runtime/race.c314
-rw-r--r--src/runtime/race.go42
-rw-r--r--src/runtime/race.h34
-rw-r--r--src/runtime/race0.go2
-rw-r--r--src/runtime/race1.go304
-rw-r--r--src/runtime/race_amd64.s3
-rw-r--r--src/runtime/rdebug.go18
-rw-r--r--src/runtime/rt0_linux_386.s1
-rw-r--r--src/runtime/rt0_windows_amd64.s3
-rw-r--r--src/runtime/runtime.c411
-rw-r--r--src/runtime/runtime.h1151
-rw-r--r--src/runtime/runtime1.go417
-rw-r--r--src/runtime/runtime2.go613
-rw-r--r--src/runtime/runtime2_windows.go8
-rw-r--r--src/runtime/select.go6
-rw-r--r--src/runtime/signal.c25
-rw-r--r--src/runtime/signal1_unix.go111
-rw-r--r--src/runtime/signal_386.c122
-rw-r--r--src/runtime/signal_386.go131
-rw-r--r--src/runtime/signal_amd64x.c156
-rw-r--r--src/runtime/signal_amd64x.go163
-rw-r--r--src/runtime/signal_arm.c121
-rw-r--r--src/runtime/signal_arm.go126
-rw-r--r--src/runtime/signal_darwin.go45
-rw-r--r--src/runtime/signal_darwin_386.go34
-rw-r--r--src/runtime/signal_darwin_386.h23
-rw-r--r--src/runtime/signal_darwin_amd64.go42
-rw-r--r--src/runtime/signal_darwin_amd64.h31
-rw-r--r--src/runtime/signal_dragonfly.go46
-rw-r--r--src/runtime/signal_dragonfly_amd64.go44
-rw-r--r--src/runtime/signal_dragonfly_amd64.h31
-rw-r--r--src/runtime/signal_freebsd.go46
-rw-r--r--src/runtime/signal_freebsd_386.go34
-rw-r--r--src/runtime/signal_freebsd_386.h23
-rw-r--r--src/runtime/signal_freebsd_amd64.go44
-rw-r--r--src/runtime/signal_freebsd_amd64.h31
-rw-r--r--src/runtime/signal_freebsd_arm.go48
-rw-r--r--src/runtime/signal_freebsd_arm.h28
-rw-r--r--src/runtime/signal_linux.go78
-rw-r--r--src/runtime/signal_linux_386.go36
-rw-r--r--src/runtime/signal_linux_386.h24
-rw-r--r--src/runtime/signal_linux_amd64.go46
-rw-r--r--src/runtime/signal_linux_amd64.h32
-rw-r--r--src/runtime/signal_linux_arm.go48
-rw-r--r--src/runtime/signal_linux_arm.h28
-rw-r--r--src/runtime/signal_openbsd.go46
-rw-r--r--src/runtime/signal_openbsd_386.go41
-rw-r--r--src/runtime/signal_openbsd_386.h23
-rw-r--r--src/runtime/signal_openbsd_amd64.go49
-rw-r--r--src/runtime/signal_openbsd_amd64.h31
-rw-r--r--src/runtime/signal_solaris.go88
-rw-r--r--src/runtime/signal_solaris_amd64.go46
-rw-r--r--src/runtime/signal_solaris_amd64.h31
-rw-r--r--src/runtime/signal_unix.c119
-rw-r--r--src/runtime/signal_unix.go4
-rw-r--r--src/runtime/signals_darwin.h53
-rw-r--r--src/runtime/signals_dragonfly.h54
-rw-r--r--src/runtime/signals_freebsd.h54
-rw-r--r--src/runtime/signals_linux.h86
-rw-r--r--src/runtime/signals_openbsd.h54
-rw-r--r--src/runtime/signals_solaris.h97
-rw-r--r--src/runtime/sigpanic_unix.go9
-rw-r--r--src/runtime/sigqueue.go21
-rw-r--r--src/runtime/slice.go8
-rw-r--r--src/runtime/softfloat64.go22
-rw-r--r--src/runtime/softfloat64_test.go2
-rw-r--r--src/runtime/softfloat_arm.c687
-rw-r--r--src/runtime/softfloat_arm.go644
-rw-r--r--src/runtime/sqrt.go9
-rw-r--r--src/runtime/stack.c874
-rw-r--r--src/runtime/stack.go13
-rw-r--r--src/runtime/stack.h97
-rw-r--r--src/runtime/stack1.go818
-rw-r--r--src/runtime/stack2.go106
-rw-r--r--src/runtime/string.c226
-rw-r--r--src/runtime/string.go14
-rw-r--r--src/runtime/string1.go108
-rw-r--r--src/runtime/stubs.go164
-rw-r--r--src/runtime/stubs2.go27
-rw-r--r--src/runtime/symtab.go22
-rw-r--r--src/runtime/sys_arm.c35
-rw-r--r--src/runtime/sys_arm.go35
-rw-r--r--src/runtime/sys_darwin_386.s3
-rw-r--r--src/runtime/sys_darwin_amd64.s3
-rw-r--r--src/runtime/sys_dragonfly_386.s3
-rw-r--r--src/runtime/sys_dragonfly_amd64.s3
-rw-r--r--src/runtime/sys_freebsd_386.s3
-rw-r--r--src/runtime/sys_freebsd_amd64.s3
-rw-r--r--src/runtime/sys_freebsd_arm.s5
-rw-r--r--src/runtime/sys_linux_386.s3
-rw-r--r--src/runtime/sys_linux_amd64.s3
-rw-r--r--src/runtime/sys_linux_arm.s5
-rw-r--r--src/runtime/sys_nacl_386.s3
-rw-r--r--src/runtime/sys_nacl_amd64p32.s3
-rw-r--r--src/runtime/sys_nacl_arm.s5
-rw-r--r--src/runtime/sys_netbsd_386.s3
-rw-r--r--src/runtime/sys_netbsd_amd64.s3
-rw-r--r--src/runtime/sys_netbsd_arm.s5
-rw-r--r--src/runtime/sys_openbsd_386.s3
-rw-r--r--src/runtime/sys_openbsd_amd64.s3
-rw-r--r--src/runtime/sys_plan9_386.s3
-rw-r--r--src/runtime/sys_plan9_amd64.s3
-rw-r--r--src/runtime/sys_solaris_amd64.s15
-rw-r--r--src/runtime/sys_windows_386.s3
-rw-r--r--src/runtime/sys_windows_amd64.s3
-rw-r--r--src/runtime/sys_x86.go (renamed from src/runtime/sys_x86.c)58
-rw-r--r--src/runtime/syscall2_solaris.go47
-rw-r--r--src/runtime/syscall_solaris.c23
-rw-r--r--src/runtime/syscall_solaris.go79
-rw-r--r--src/runtime/thunk.s16
-rw-r--r--src/runtime/thunk_solaris_amd64.s88
-rw-r--r--src/runtime/thunk_windows.s3
-rw-r--r--src/runtime/tls_arm.s3
-rw-r--r--src/runtime/traceback.go28
-rw-r--r--src/runtime/type.go99
-rw-r--r--src/runtime/type.h113
-rw-r--r--src/runtime/typekind.h12
-rw-r--r--src/runtime/typekind1.go39
-rw-r--r--src/runtime/vdso_linux_amd64.c371
-rw-r--r--src/runtime/vdso_linux_amd64.go328
-rw-r--r--src/runtime/vdso_none.go11
-rw-r--r--src/runtime/vlop_arm.s5
-rw-r--r--src/runtime/vlrt.c914
384 files changed, 22418 insertions, 69496 deletions
diff --git a/dev.cc b/dev.cc
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/dev.cc
diff --git a/include/link.h b/include/link.h
index 80f3f4d82..06f3ebb48 100644
--- a/include/link.h
+++ b/include/link.h
@@ -89,7 +89,7 @@ struct Prog
int32 lineno;
Prog* link;
short as;
- uchar scond; // arm only
+ uchar scond; // arm only; condition codes
// operands
Addr from;
diff --git a/src/cmd/5c/Makefile b/src/cmd/5c/Makefile
deleted file mode 100644
index 3f528d751..000000000
--- a/src/cmd/5c/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright 2012 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../Make.dist
diff --git a/src/cmd/5c/cgen.c b/src/cmd/5c/cgen.c
deleted file mode 100644
index 5a049ae62..000000000
--- a/src/cmd/5c/cgen.c
+++ /dev/null
@@ -1,1213 +0,0 @@
-// Inferno utils/5c/cgen.c
-// http://code.google.com/p/inferno-os/source/browse/utils/5c/cgen.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-#include "../../runtime/funcdata.h"
-
-void
-_cgen(Node *n, Node *nn, int inrel)
-{
- Node *l, *r;
- Prog *p1;
- Node nod, nod1, nod2, nod3, nod4;
- int o, t;
- int32 v, curs;
-
- if(debug['g']) {
- prtree(nn, "cgen lhs");
- prtree(n, "cgen");
- }
- if(n == Z || n->type == T)
- return;
- if(typesuv[n->type->etype] && (n->op != OFUNC || nn != Z)) {
- sugen(n, nn, n->type->width);
- return;
- }
- l = n->left;
- r = n->right;
- o = n->op;
- if(n->addable >= INDEXED) {
- if(nn == Z) {
- switch(o) {
- default:
- nullwarn(Z, Z);
- break;
- case OINDEX:
- nullwarn(l, r);
- break;
- }
- return;
- }
- gmove(n, nn);
- return;
- }
- curs = cursafe;
-
- if(n->complex >= FNX)
- if(l->complex >= FNX)
- if(r != Z && r->complex >= FNX)
- switch(o) {
- default:
- regret(&nod, r, 0, 0);
- cgen(r, &nod);
-
- regsalloc(&nod1, r);
- gopcode(OAS, &nod, Z, &nod1);
-
- regfree(&nod);
- nod = *n;
- nod.right = &nod1;
- cgen(&nod, nn);
- return;
-
- case OFUNC:
- case OCOMMA:
- case OANDAND:
- case OOROR:
- case OCOND:
- case ODOT:
- break;
- }
-
- switch(o) {
- default:
- diag(n, "unknown op in cgen: %O", o);
- break;
-
- case OAS:
- if(l->op == OBIT)
- goto bitas;
- if(l->addable >= INDEXED && l->complex < FNX) {
- if(nn != Z || r->addable < INDEXED) {
- if(r->complex >= FNX && nn == Z)
- regret(&nod, r, 0, 0);
- else
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- gmove(&nod, l);
- if(nn != Z)
- gmove(&nod, nn);
- regfree(&nod);
- } else
- gmove(r, l);
- break;
- }
- if(l->complex >= r->complex) {
- reglcgen(&nod1, l, Z);
- if(r->addable >= INDEXED) {
- gmove(r, &nod1);
- if(nn != Z)
- gmove(r, nn);
- regfree(&nod1);
- break;
- }
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- } else {
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- reglcgen(&nod1, l, Z);
- }
- gmove(&nod, &nod1);
- regfree(&nod);
- regfree(&nod1);
- break;
-
- bitas:
- n = l->left;
- regalloc(&nod, r, nn);
- if(l->complex >= r->complex) {
- reglcgen(&nod1, n, Z);
- cgen(r, &nod);
- } else {
- cgen(r, &nod);
- reglcgen(&nod1, n, Z);
- }
- regalloc(&nod2, n, Z);
- gopcode(OAS, &nod1, Z, &nod2);
- bitstore(l, &nod, &nod1, &nod2, nn);
- break;
-
- case OBIT:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- bitload(n, &nod, Z, Z, nn);
- gopcode(OAS, &nod, Z, nn);
- regfree(&nod);
- break;
-
- case ODIV:
- case OMOD:
- if(nn != Z)
- if((t = vlog(r)) >= 0) {
- /* signed div/mod by constant power of 2 */
- cgen(l, nn);
- gopcode(OGE, nodconst(0), nn, Z);
- p1 = p;
- if(o == ODIV) {
- gopcode(OADD, nodconst((1<<t)-1), Z, nn);
- patch(p1, pc);
- gopcode(OASHR, nodconst(t), Z, nn);
- } else {
- gopcode(OSUB, nn, nodconst(0), nn);
- gopcode(OAND, nodconst((1<<t)-1), Z, nn);
- gopcode(OSUB, nn, nodconst(0), nn);
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- gopcode(OAND, nodconst((1<<t)-1), Z, nn);
- patch(p1, pc);
- }
- break;
- }
- goto muldiv;
-
- case OSUB:
- if(nn != Z)
- if(l->op == OCONST)
- if(!typefd[n->type->etype]) {
- cgen(r, nn);
- gopcode(o, Z, l, nn);
- break;
- }
- case OADD:
- case OAND:
- case OOR:
- case OXOR:
- case OLSHR:
- case OASHL:
- case OASHR:
- /*
- * immediate operands
- */
- if(nn != Z)
- if(r->op == OCONST)
- if(!typefd[n->type->etype]) {
- cgen(l, nn);
- if(r->vconst == 0)
- if(o != OAND)
- break;
- if(nn != Z)
- gopcode(o, r, Z, nn);
- break;
- }
-
- case OLMUL:
- case OLDIV:
- case OLMOD:
- case OMUL:
- muldiv:
- if(nn == Z) {
- nullwarn(l, r);
- break;
- }
- if(o == OMUL || o == OLMUL) {
- if(mulcon(n, nn))
- break;
- }
- if(l->complex >= r->complex) {
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- gopcode(o, &nod1, Z, &nod);
- } else {
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- regalloc(&nod1, l, Z);
- cgen(l, &nod1);
- gopcode(o, &nod, &nod1, &nod);
- }
- gopcode(OAS, &nod, Z, nn);
- regfree(&nod);
- regfree(&nod1);
- break;
-
- case OASLSHR:
- case OASASHL:
- case OASASHR:
- case OASAND:
- case OASADD:
- case OASSUB:
- case OASXOR:
- case OASOR:
- if(l->op == OBIT)
- goto asbitop;
- if(r->op == OCONST)
- if(!typefd[r->type->etype])
- if(!typefd[n->type->etype]) {
- if(l->addable < INDEXED)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- regalloc(&nod, r, nn);
- gopcode(OAS, &nod2, Z, &nod);
- gopcode(o, r, Z, &nod);
- gopcode(OAS, &nod, Z, &nod2);
-
- regfree(&nod);
- if(l->addable < INDEXED)
- regfree(&nod2);
- break;
- }
-
- case OASLMUL:
- case OASLDIV:
- case OASLMOD:
- case OASMUL:
- case OASDIV:
- case OASMOD:
- if(l->op == OBIT)
- goto asbitop;
- if(l->complex >= r->complex) {
- if(l->addable < INDEXED)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- } else {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- if(l->addable < INDEXED)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- }
-
- regalloc(&nod, n, nn);
- gmove(&nod2, &nod);
- gopcode(o, &nod1, Z, &nod);
- gmove(&nod, &nod2);
- if(nn != Z)
- gopcode(OAS, &nod, Z, nn);
- regfree(&nod);
- regfree(&nod1);
- if(l->addable < INDEXED)
- regfree(&nod2);
- break;
-
- asbitop:
- regalloc(&nod4, n, nn);
- if(l->complex >= r->complex) {
- bitload(l, &nod, &nod1, &nod2, &nod4);
- regalloc(&nod3, r, Z);
- cgen(r, &nod3);
- } else {
- regalloc(&nod3, r, Z);
- cgen(r, &nod3);
- bitload(l, &nod, &nod1, &nod2, &nod4);
- }
- gmove(&nod, &nod4);
- gopcode(o, &nod3, Z, &nod4);
- regfree(&nod3);
- gmove(&nod4, &nod);
- regfree(&nod4);
- bitstore(l, &nod, &nod1, &nod2, nn);
- break;
-
- case OADDR:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- lcgen(l, nn);
- break;
-
- case OFUNC:
- if(l->complex >= FNX) {
- if(l->op != OIND)
- diag(n, "bad function call");
-
- regret(&nod, l->left, 0, 0);
- cgen(l->left, &nod);
- regsalloc(&nod1, l->left);
- gopcode(OAS, &nod, Z, &nod1);
- regfree(&nod);
-
- nod = *n;
- nod.left = &nod2;
- nod2 = *l;
- nod2.left = &nod1;
- nod2.complex = 1;
- cgen(&nod, nn);
-
- return;
- }
- if(REGARG >= 0)
- o = reg[REGARG];
- gargs(r, &nod, &nod1);
- if(l->addable < INDEXED) {
- reglcgen(&nod, l, Z);
- gopcode(OFUNC, Z, Z, &nod);
- regfree(&nod);
- } else
- gopcode(OFUNC, Z, Z, l);
- if(REGARG >= 0)
- if(o != reg[REGARG])
- reg[REGARG]--;
- regret(&nod, n, l->type, 1);
- if(nn != Z)
- gmove(&nod, nn);
- if(nod.op == OREGISTER)
- regfree(&nod);
- break;
-
- case OIND:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- regialloc(&nod, n, nn);
- r = l;
- while(r->op == OADD)
- r = r->right;
- if(sconst(r) && (v = r->vconst+nod.xoffset) > -4096 && v < 4096) {
- v = r->vconst;
- r->vconst = 0;
- cgen(l, &nod);
- nod.xoffset += v;
- r->vconst = v;
- } else
- cgen(l, &nod);
- regind(&nod, n);
- gopcode(OAS, &nod, Z, nn);
- regfree(&nod);
- break;
-
- case OEQ:
- case ONE:
- case OLE:
- case OLT:
- case OGE:
- case OGT:
- case OLO:
- case OLS:
- case OHI:
- case OHS:
- if(nn == Z) {
- nullwarn(l, r);
- break;
- }
- boolgen(n, 1, nn);
- break;
-
- case OANDAND:
- case OOROR:
- boolgen(n, 1, nn);
- if(nn == Z)
- patch(p, pc);
- break;
-
- case ONOT:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- boolgen(n, 1, nn);
- break;
-
- case OCOMMA:
- cgen(l, Z);
- cgen(r, nn);
- break;
-
- case OCAST:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- /*
- * convert from types l->n->nn
- */
- if(nocast(l->type, n->type)) {
- if(nocast(n->type, nn->type)) {
- cgen(l, nn);
- break;
- }
- }
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- regalloc(&nod1, n, &nod);
- if(inrel)
- gmover(&nod, &nod1);
- else
- gopcode(OAS, &nod, Z, &nod1);
- gopcode(OAS, &nod1, Z, nn);
- regfree(&nod1);
- regfree(&nod);
- break;
-
- case ODOT:
- sugen(l, nodrat, l->type->width);
- if(nn != Z) {
- warn(n, "non-interruptable temporary");
- nod = *nodrat;
- if(!r || r->op != OCONST) {
- diag(n, "DOT and no offset");
- break;
- }
- nod.xoffset += (int32)r->vconst;
- nod.type = n->type;
- cgen(&nod, nn);
- }
- break;
-
- case OCOND:
- bcgen(l, 1);
- p1 = p;
- cgen(r->left, nn);
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- cgen(r->right, nn);
- patch(p1, pc);
- break;
-
- case OPOSTINC:
- case OPOSTDEC:
- v = 1;
- if(l->type->etype == TIND)
- v = l->type->link->width;
- if(o == OPOSTDEC)
- v = -v;
- if(l->op == OBIT)
- goto bitinc;
- if(nn == Z)
- goto pre;
-
- if(l->addable < INDEXED)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
-
- regalloc(&nod, l, nn);
- gopcode(OAS, &nod2, Z, &nod);
- regalloc(&nod1, l, Z);
- if(typefd[l->type->etype]) {
- regalloc(&nod3, l, Z);
- if(v < 0) {
- gopcode(OAS, nodfconst(-v), Z, &nod3);
- gopcode(OSUB, &nod3, &nod, &nod1);
- } else {
- gopcode(OAS, nodfconst(v), Z, &nod3);
- gopcode(OADD, &nod3, &nod, &nod1);
- }
- regfree(&nod3);
- } else
- gopcode(OADD, nodconst(v), &nod, &nod1);
- gopcode(OAS, &nod1, Z, &nod2);
-
- regfree(&nod);
- regfree(&nod1);
- if(l->addable < INDEXED)
- regfree(&nod2);
- break;
-
- case OPREINC:
- case OPREDEC:
- v = 1;
- if(l->type->etype == TIND)
- v = l->type->link->width;
- if(o == OPREDEC)
- v = -v;
- if(l->op == OBIT)
- goto bitinc;
-
- pre:
- if(l->addable < INDEXED)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
-
- regalloc(&nod, l, nn);
- gopcode(OAS, &nod2, Z, &nod);
- if(typefd[l->type->etype]) {
- regalloc(&nod3, l, Z);
- if(v < 0) {
- gopcode(OAS, nodfconst(-v), Z, &nod3);
- gopcode(OSUB, &nod3, Z, &nod);
- } else {
- gopcode(OAS, nodfconst(v), Z, &nod3);
- gopcode(OADD, &nod3, Z, &nod);
- }
- regfree(&nod3);
- } else
- gopcode(OADD, nodconst(v), Z, &nod);
- gopcode(OAS, &nod, Z, &nod2);
-
- regfree(&nod);
- if(l->addable < INDEXED)
- regfree(&nod2);
- break;
-
- bitinc:
- if(nn != Z && (o == OPOSTINC || o == OPOSTDEC)) {
- bitload(l, &nod, &nod1, &nod2, Z);
- gopcode(OAS, &nod, Z, nn);
- gopcode(OADD, nodconst(v), Z, &nod);
- bitstore(l, &nod, &nod1, &nod2, Z);
- break;
- }
- bitload(l, &nod, &nod1, &nod2, nn);
- gopcode(OADD, nodconst(v), Z, &nod);
- bitstore(l, &nod, &nod1, &nod2, nn);
- break;
- }
- cursafe = curs;
- return;
-}
-
-void
-cgen(Node *n, Node *nn)
-{
- _cgen(n, nn, 0);
-}
-
-void
-cgenrel(Node *n, Node *nn)
-{
- _cgen(n, nn, 1);
-}
-
-void
-reglcgen(Node *t, Node *n, Node *nn)
-{
- Node *r;
- int32 v;
-
- regialloc(t, n, nn);
- if(n->op == OIND) {
- r = n->left;
- while(r->op == OADD)
- r = r->right;
- if(sconst(r) && (v = r->vconst+t->xoffset) > -4096 && v < 4096) {
- v = r->vconst;
- r->vconst = 0;
- lcgen(n, t);
- t->xoffset += v;
- r->vconst = v;
- regind(t, n);
- return;
- }
- } else if(n->op == OINDREG) {
- if((v = n->xoffset) > -4096 && v < 4096) {
- n->op = OREGISTER;
- cgen(n, t);
- t->xoffset += v;
- n->op = OINDREG;
- regind(t, n);
- return;
- }
- }
- lcgen(n, t);
- regind(t, n);
-}
-
-void
-reglpcgen(Node *n, Node *nn, int f)
-{
- Type *t;
-
- t = nn->type;
- nn->type = types[TLONG];
- if(f)
- reglcgen(n, nn, Z);
- else {
- regialloc(n, nn, Z);
- lcgen(nn, n);
- regind(n, nn);
- }
- nn->type = t;
-}
-
-void
-lcgen(Node *n, Node *nn)
-{
- Prog *p1;
- Node nod;
-
- if(debug['g']) {
- prtree(nn, "lcgen lhs");
- prtree(n, "lcgen");
- }
- if(n == Z || n->type == T)
- return;
- if(nn == Z) {
- nn = &nod;
- regalloc(&nod, n, Z);
- }
- switch(n->op) {
- default:
- if(n->addable < INDEXED) {
- diag(n, "unknown op in lcgen: %O", n->op);
- break;
- }
- nod = *n;
- nod.op = OADDR;
- nod.left = n;
- nod.right = Z;
- nod.type = types[TIND];
- gopcode(OAS, &nod, Z, nn);
- break;
-
- case OCOMMA:
- cgen(n->left, n->left);
- lcgen(n->right, nn);
- break;
-
- case OIND:
- cgen(n->left, nn);
- break;
-
- case OCOND:
- bcgen(n->left, 1);
- p1 = p;
- lcgen(n->right->left, nn);
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- lcgen(n->right->right, nn);
- patch(p1, pc);
- break;
- }
-}
-
-void
-bcgen(Node *n, int true)
-{
-
- if(n->type == T)
- gbranch(OGOTO);
- else
- boolgen(n, true, Z);
-}
-
-void
-boolgen(Node *n, int true, Node *nn)
-{
- int o;
- Prog *p1, *p2;
- Node *l, *r, nod, nod1;
- int32 curs;
-
- if(debug['g']) {
- prtree(nn, "boolgen lhs");
- prtree(n, "boolgen");
- }
- curs = cursafe;
- l = n->left;
- r = n->right;
- switch(n->op) {
-
- default:
- regalloc(&nod, n, nn);
- cgen(n, &nod);
- o = ONE;
- if(true)
- o = comrel[relindex(o)];
- if(typefd[n->type->etype]) {
- gopcode(o, nodfconst(0), &nod, Z);
- } else
- gopcode(o, nodconst(0), &nod, Z);
- regfree(&nod);
- goto com;
-
- case OCONST:
- o = vconst(n);
- if(!true)
- o = !o;
- gbranch(OGOTO);
- if(o) {
- p1 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- }
- goto com;
-
- case OCOMMA:
- cgen(l, Z);
- boolgen(r, true, nn);
- break;
-
- case ONOT:
- boolgen(l, !true, nn);
- break;
-
- case OCOND:
- bcgen(l, 1);
- p1 = p;
- bcgen(r->left, true);
- p2 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- bcgen(r->right, !true);
- patch(p2, pc);
- p2 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- patch(p2, pc);
- goto com;
-
- case OANDAND:
- if(!true)
- goto caseor;
-
- caseand:
- bcgen(l, true);
- p1 = p;
- bcgen(r, !true);
- p2 = p;
- patch(p1, pc);
- gbranch(OGOTO);
- patch(p2, pc);
- goto com;
-
- case OOROR:
- if(!true)
- goto caseand;
-
- caseor:
- bcgen(l, !true);
- p1 = p;
- bcgen(r, !true);
- p2 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- patch(p2, pc);
- goto com;
-
- case OEQ:
- case ONE:
- case OLE:
- case OLT:
- case OGE:
- case OGT:
- case OHI:
- case OHS:
- case OLO:
- case OLS:
- o = n->op;
- if(true)
- o = comrel[relindex(o)];
- if(l->complex >= FNX && r->complex >= FNX) {
- regret(&nod, r, 0, 0);
- cgenrel(r, &nod);
- regsalloc(&nod1, r);
- gopcode(OAS, &nod, Z, &nod1);
- regfree(&nod);
- nod = *n;
- nod.right = &nod1;
- boolgen(&nod, true, nn);
- break;
- }
- if(sconst(l)) {
- regalloc(&nod, r, nn);
- cgenrel(r, &nod);
- o = invrel[relindex(o)];
- gopcode(o, l, &nod, Z);
- regfree(&nod);
- goto com;
- }
- if(sconst(r)) {
- regalloc(&nod, l, nn);
- cgenrel(l, &nod);
- gopcode(o, r, &nod, Z);
- regfree(&nod);
- goto com;
- }
- if(l->complex >= r->complex) {
- regalloc(&nod1, l, nn);
- cgenrel(l, &nod1);
- regalloc(&nod, r, Z);
- cgenrel(r, &nod);
- } else {
- regalloc(&nod, r, nn);
- cgenrel(r, &nod);
- regalloc(&nod1, l, Z);
- cgenrel(l, &nod1);
- }
- gopcode(o, &nod, &nod1, Z);
- regfree(&nod);
- regfree(&nod1);
-
- com:
- if(nn != Z) {
- p1 = p;
- gopcode(OAS, nodconst(1), Z, nn);
- gbranch(OGOTO);
- p2 = p;
- patch(p1, pc);
- gopcode(OAS, nodconst(0), Z, nn);
- patch(p2, pc);
- }
- break;
- }
- cursafe = curs;
-}
-
-void
-sugen(Node *n, Node *nn, int32 w)
-{
- Prog *p1;
- Node nod0, nod1, nod2, nod3, nod4, *l, *r;
- Type *t;
- int32 pc1;
- int i, m, c;
-
- if(n == Z || n->type == T)
- return;
- if(debug['g']) {
- prtree(nn, "sugen lhs");
- prtree(n, "sugen");
- }
- if(nn == nodrat)
- if(w > nrathole)
- nrathole = w;
- switch(n->op) {
- case OIND:
- if(nn == Z) {
- nullwarn(n->left, Z);
- break;
- }
-
- default:
- goto copy;
-
- case OCONST:
- if(n->type && typev[n->type->etype]) {
- if(nn == Z) {
- nullwarn(n->left, Z);
- break;
- }
-
- t = nn->type;
- nn->type = types[TLONG];
- reglcgen(&nod1, nn, Z);
- nn->type = t;
-
- if(isbigendian)
- gopcode(OAS, nod32const(n->vconst>>32), Z, &nod1);
- else
- gopcode(OAS, nod32const(n->vconst), Z, &nod1);
- nod1.xoffset += SZ_LONG;
- if(isbigendian)
- gopcode(OAS, nod32const(n->vconst), Z, &nod1);
- else
- gopcode(OAS, nod32const(n->vconst>>32), Z, &nod1);
-
- regfree(&nod1);
- break;
- }
- goto copy;
-
- case ODOT:
- l = n->left;
- sugen(l, nodrat, l->type->width);
- if(nn != Z) {
- warn(n, "non-interruptable temporary");
- nod1 = *nodrat;
- r = n->right;
- if(!r || r->op != OCONST) {
- diag(n, "DOT and no offset");
- break;
- }
- nod1.xoffset += (int32)r->vconst;
- nod1.type = n->type;
- sugen(&nod1, nn, w);
- }
- break;
-
- case OSTRUCT:
- /*
- * rewrite so lhs has no side effect.
- */
- if(nn != Z && side(nn)) {
- nod1 = *n;
- nod1.type = typ(TIND, n->type);
- regret(&nod2, &nod1, 0, 0);
- lcgen(nn, &nod2);
- regsalloc(&nod0, &nod1);
- gopcode(OAS, &nod2, Z, &nod0);
- regfree(&nod2);
-
- nod1 = *n;
- nod1.op = OIND;
- nod1.left = &nod0;
- nod1.right = Z;
- nod1.complex = 1;
-
- sugen(n, &nod1, w);
- return;
- }
-
- r = n->left;
- for(t = n->type->link; t != T; t = t->down) {
- l = r;
- if(r->op == OLIST) {
- l = r->left;
- r = r->right;
- }
- if(nn == Z) {
- cgen(l, nn);
- continue;
- }
- /*
- * hand craft *(&nn + o) = l
- */
- nod0 = znode;
- nod0.op = OAS;
- nod0.type = t;
- nod0.left = &nod1;
- nod0.right = l;
-
- nod1 = znode;
- nod1.op = OIND;
- nod1.type = t;
- nod1.left = &nod2;
-
- nod2 = znode;
- nod2.op = OADD;
- nod2.type = typ(TIND, t);
- nod2.left = &nod3;
- nod2.right = &nod4;
-
- nod3 = znode;
- nod3.op = OADDR;
- nod3.type = nod2.type;
- nod3.left = nn;
-
- nod4 = znode;
- nod4.op = OCONST;
- nod4.type = nod2.type;
- nod4.vconst = t->offset;
-
- ccom(&nod0);
- acom(&nod0);
- xcom(&nod0);
- nod0.addable = 0;
-
- cgen(&nod0, Z);
- }
- break;
-
- case OAS:
- if(nn == Z) {
- if(n->addable < INDEXED)
- sugen(n->right, n->left, w);
- break;
- }
- sugen(n->right, nodrat, w);
- warn(n, "non-interruptable temporary");
- sugen(nodrat, n->left, w);
- sugen(nodrat, nn, w);
- break;
-
- case OFUNC:
- if(!hasdotdotdot(n->left->type)) {
- cgen(n, Z);
- if(nn != Z) {
- curarg -= n->type->width;
- regret(&nod1, n, n->left->type, 1);
- if(nn->complex >= FNX) {
- regsalloc(&nod2, n);
- cgen(&nod1, &nod2);
- nod1 = nod2;
- }
- cgen(&nod1, nn);
- }
- break;
- }
- if(nn == Z) {
- sugen(n, nodrat, w);
- break;
- }
- if(nn->op != OIND) {
- nn = new1(OADDR, nn, Z);
- nn->type = types[TIND];
- nn->addable = 0;
- } else
- nn = nn->left;
- n = new(OFUNC, n->left, new(OLIST, nn, n->right));
- n->type = types[TVOID];
- n->left->type = types[TVOID];
- cgen(n, Z);
- break;
-
- case OCOND:
- bcgen(n->left, 1);
- p1 = p;
- sugen(n->right->left, nn, w);
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- sugen(n->right->right, nn, w);
- patch(p1, pc);
- break;
-
- case OCOMMA:
- cgen(n->left, Z);
- sugen(n->right, nn, w);
- break;
- }
- return;
-
-copy:
- if(nn == Z)
- return;
- if(n->complex >= FNX && nn->complex >= FNX) {
- t = nn->type;
- nn->type = types[TLONG];
- regialloc(&nod1, nn, Z);
- lcgen(nn, &nod1);
- regsalloc(&nod2, nn);
- nn->type = t;
-
- gopcode(OAS, &nod1, Z, &nod2);
- regfree(&nod1);
-
- nod2.type = typ(TIND, t);
-
- nod1 = nod2;
- nod1.op = OIND;
- nod1.left = &nod2;
- nod1.right = Z;
- nod1.complex = 1;
- nod1.type = t;
-
- sugen(n, &nod1, w);
- return;
- }
-
- w /= SZ_LONG;
- if(w <= 2) {
- if(n->complex > nn->complex) {
- reglpcgen(&nod1, n, 1);
- reglpcgen(&nod2, nn, 1);
- } else {
- reglpcgen(&nod2, nn, 1);
- reglpcgen(&nod1, n, 1);
- }
- regalloc(&nod3, &regnode, Z);
- regalloc(&nod4, &regnode, Z);
- nod0 = *nodconst((1<<nod3.reg)|(1<<nod4.reg));
- if(w == 2 && nod1.xoffset == 0)
- gmovm(&nod1, &nod0, 0);
- else {
- gmove(&nod1, &nod3);
- if(w == 2) {
- nod1.xoffset += SZ_LONG;
- gmove(&nod1, &nod4);
- }
- }
- if(w == 2 && nod2.xoffset == 0)
- gmovm(&nod0, &nod2, 0);
- else {
- gmove(&nod3, &nod2);
- if(w == 2) {
- nod2.xoffset += SZ_LONG;
- gmove(&nod4, &nod2);
- }
- }
- regfree(&nod1);
- regfree(&nod2);
- regfree(&nod3);
- regfree(&nod4);
- return;
- }
-
- if(n->complex > nn->complex) {
- reglpcgen(&nod1, n, 0);
- reglpcgen(&nod2, nn, 0);
- } else {
- reglpcgen(&nod2, nn, 0);
- reglpcgen(&nod1, n, 0);
- }
-
- m = 0;
- for(c = 0; c < w && c < 4; c++) {
- i = tmpreg();
- if (i == 0)
- break;
- reg[i]++;
- m |= 1<<i;
- }
- nod4 = *(nodconst(m));
- if(w < 3*c) {
- for (; w>c; w-=c) {
- gmovm(&nod1, &nod4, 1);
- gmovm(&nod4, &nod2, 1);
- }
- goto out;
- }
-
- regalloc(&nod3, &regnode, Z);
- gopcode(OAS, nodconst(w/c), Z, &nod3);
- w %= c;
-
- pc1 = pc;
- gmovm(&nod1, &nod4, 1);
- gmovm(&nod4, &nod2, 1);
-
- gopcode(OSUB, nodconst(1), Z, &nod3);
- gopcode(OEQ, nodconst(0), &nod3, Z);
- p->as = ABGT;
- patch(p, pc1);
- regfree(&nod3);
-
-out:
- if (w) {
- i = 0;
- while (c>w) {
- while ((m&(1<<i)) == 0)
- i++;
- m &= ~(1<<i);
- reg[i] = 0;
- c--;
- i++;
- }
- nod4.vconst = m;
- gmovm(&nod1, &nod4, 0);
- gmovm(&nod4, &nod2, 0);
- }
- i = 0;
- do {
- while ((m&(1<<i)) == 0)
- i++;
- reg[i] = 0;
- c--;
- i++;
- } while (c>0);
- regfree(&nod1);
- regfree(&nod2);
-}
diff --git a/src/cmd/5c/doc.go b/src/cmd/5c/doc.go
deleted file mode 100644
index 7291d45f4..000000000
--- a/src/cmd/5c/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-
-5c is a version of the Plan 9 C compiler. The original is documented at
-
- http://plan9.bell-labs.com/magic/man2html/1/8c
-
-Its target architecture is the ARM, referred to by these tools as arm.
-
-*/
-package main
diff --git a/src/cmd/5c/gc.h b/src/cmd/5c/gc.h
deleted file mode 100644
index 7417b7dbe..000000000
--- a/src/cmd/5c/gc.h
+++ /dev/null
@@ -1,333 +0,0 @@
-// Inferno utils/5c/gc.h
-// http://code.google.com/p/inferno-os/source/browse/utils/5c/gc.h
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "../cc/cc.h"
-#include "../5l/5.out.h"
-
-/*
- * 5c/arm
- * Arm 7500
- */
-#define SZ_CHAR 1
-#define SZ_SHORT 2
-#define SZ_INT 4
-#define SZ_LONG 4
-#define SZ_IND 4
-#define SZ_FLOAT 4
-#define SZ_VLONG 8
-#define SZ_DOUBLE 8
-#define FNX 100
-
-typedef struct Case Case;
-typedef struct C1 C1;
-typedef struct Multab Multab;
-typedef struct Hintab Hintab;
-typedef struct Reg Reg;
-typedef struct Rgn Rgn;
-
-
-#define R0ISZERO 0
-
-#define A ((Addr*)0)
-
-#define INDEXED 9
-#define P ((Prog*)0)
-
-struct Case
-{
- Case* link;
- int32 val;
- int32 label;
- char def;
- char isv;
-};
-#define C ((Case*)0)
-
-struct C1
-{
- int32 val;
- int32 label;
-};
-
-struct Multab
-{
- int32 val;
- char code[20];
-};
-
-struct Hintab
-{
- ushort val;
- char hint[10];
-};
-
-struct Reg
-{
- int32 pc;
- int32 rpo; /* reverse post ordering */
-
- Bits set;
- Bits use1;
- Bits use2;
-
- Bits refbehind;
- Bits refahead;
- Bits calbehind;
- Bits calahead;
- Bits regdiff;
- Bits act;
-
- int32 regu;
- int32 loop; /* could be shorter */
-
-
- Reg* log5;
- int32 active;
-
- Reg* p1;
- Reg* p2;
- Reg* p2link;
- Reg* s1;
- Reg* s2;
- Reg* link;
- Prog* prog;
-};
-#define R ((Reg*)0)
-
-#define NRGN 600
-struct Rgn
-{
- Reg* enter;
- short cost;
- short varno;
- short regno;
-};
-
-EXTERN int32 breakpc;
-EXTERN int32 nbreak;
-EXTERN Case* cases;
-EXTERN Node constnode;
-EXTERN Node fconstnode;
-EXTERN int32 continpc;
-EXTERN int32 curarg;
-EXTERN int32 cursafe;
-EXTERN int32 isbigendian;
-EXTERN Prog* lastp;
-EXTERN int32 maxargsafe;
-EXTERN int mnstring;
-EXTERN Multab multab[20];
-extern int hintabsize;
-EXTERN Node* nodrat;
-EXTERN Node* nodret;
-EXTERN Node* nodsafe;
-EXTERN int32 nrathole;
-EXTERN int32 nstring;
-EXTERN Prog* p;
-EXTERN int32 pc;
-EXTERN Node regnode;
-EXTERN char string[NSNAME];
-EXTERN Sym* symrathole;
-EXTERN Node znode;
-EXTERN Prog zprog;
-EXTERN char reg[NREG+NFREG];
-EXTERN int32 exregoffset;
-EXTERN int32 exfregoffset;
-EXTERN int suppress;
-
-#define BLOAD(r) band(bnot(r->refbehind), r->refahead)
-#define BSTORE(r) band(bnot(r->calbehind), r->calahead)
-#define LOAD(r) (~r->refbehind.b[z] & r->refahead.b[z])
-#define STORE(r) (~r->calbehind.b[z] & r->calahead.b[z])
-
-#define bset(a,n) ((a).b[(n)/32]&(1L<<(n)%32))
-
-#define CLOAD 4
-#define CREF 5
-#define CINF 1000
-#define LOOP 3
-
-EXTERN Rgn region[NRGN];
-EXTERN Rgn* rgp;
-EXTERN int nregion;
-EXTERN int nvar;
-
-EXTERN Bits externs;
-EXTERN Bits params;
-EXTERN Bits consts;
-EXTERN Bits addrs;
-
-EXTERN int32 regbits;
-EXTERN int32 exregbits;
-
-EXTERN int change;
-
-EXTERN Reg* firstr;
-EXTERN Reg* lastr;
-EXTERN Reg zreg;
-EXTERN Reg* freer;
-EXTERN int32* idom;
-EXTERN Reg** rpo2r;
-EXTERN int32 maxnr;
-
-extern char* anames[];
-extern Hintab hintab[];
-
-/*
- * sgen.c
- */
-void codgen(Node*, Node*);
-void gen(Node*);
-void noretval(int);
-void usedset(Node*, int);
-void xcom(Node*);
-int bcomplex(Node*, Node*);
-Prog* gtext(Sym*, int32);
-vlong argsize(int);
-
-/*
- * cgen.c
- */
-void cgen(Node*, Node*);
-void reglcgen(Node*, Node*, Node*);
-void lcgen(Node*, Node*);
-void bcgen(Node*, int);
-void boolgen(Node*, int, Node*);
-void sugen(Node*, Node*, int32);
-void layout(Node*, Node*, int, int, Node*);
-void cgenrel(Node*, Node*);
-
-/*
- * txt.c
- */
-void ginit(void);
-void gclean(void);
-void nextpc(void);
-void gargs(Node*, Node*, Node*);
-void garg1(Node*, Node*, Node*, int, Node**);
-Node* nodconst(int32);
-Node* nod32const(vlong);
-Node* nodfconst(double);
-void nodreg(Node*, Node*, int);
-void regret(Node*, Node*, Type*, int);
-int tmpreg(void);
-void regalloc(Node*, Node*, Node*);
-void regfree(Node*);
-void regialloc(Node*, Node*, Node*);
-void regsalloc(Node*, Node*);
-void regaalloc1(Node*, Node*);
-void regaalloc(Node*, Node*);
-void regind(Node*, Node*);
-void gprep(Node*, Node*);
-void raddr(Node*, Prog*);
-void naddr(Node*, Addr*);
-void gmovm(Node*, Node*, int);
-void gmove(Node*, Node*);
-void gmover(Node*, Node*);
-void gins(int a, Node*, Node*);
-void gopcode(int, Node*, Node*, Node*);
-int samaddr(Node*, Node*);
-void gbranch(int);
-void patch(Prog*, int32);
-int sconst(Node*);
-int sval(int32);
-void gpseudo(int, Sym*, Node*);
-void gprefetch(Node*);
-void gpcdata(int, int);
-
-/*
- * swt.c
- */
-int swcmp(const void*, const void*);
-void doswit(Node*);
-void swit1(C1*, int, int32, Node*);
-void swit2(C1*, int, int32, Node*);
-void newcase(void);
-void bitload(Node*, Node*, Node*, Node*, Node*);
-void bitstore(Node*, Node*, Node*, Node*, Node*);
-int mulcon(Node*, Node*);
-Multab* mulcon0(int32);
-void nullwarn(Node*, Node*);
-void outcode(void);
-
-/*
- * list
- */
-void listinit(void);
-
-/*
- * reg.c
- */
-Reg* rega(void);
-int rcmp(const void*, const void*);
-void regopt(Prog*);
-void addmove(Reg*, int, int, int);
-Bits mkvar(Addr*, int);
-void prop(Reg*, Bits, Bits);
-void loopit(Reg*, int32);
-void synch(Reg*, Bits);
-uint32 allreg(uint32, Rgn*);
-void paint1(Reg*, int);
-uint32 paint2(Reg*, int);
-void paint3(Reg*, int, int32, int);
-void addreg(Addr*, int);
-
-/*
- * peep.c
- */
-void peep(void);
-void excise(Reg*);
-Reg* uniqp(Reg*);
-Reg* uniqs(Reg*);
-int regtyp(Addr*);
-int regzer(Addr*);
-int anyvar(Addr*);
-int subprop(Reg*);
-int copyprop(Reg*);
-int shiftprop(Reg*);
-void constprop(Addr*, Addr*, Reg*);
-int copy1(Addr*, Addr*, Reg*, int);
-int copyu(Prog*, Addr*, Addr*);
-
-int copyas(Addr*, Addr*);
-int copyau(Addr*, Addr*);
-int copyau1(Prog*, Addr*);
-int copysub(Addr*, Addr*, Addr*, int);
-int copysub1(Prog*, Addr*, Addr*, int);
-
-int32 RtoB(int);
-int32 FtoB(int);
-int BtoR(int32);
-int BtoF(int32);
-
-void predicate(void);
-int isbranch(Prog *);
-int predicable(Prog *p);
-int modifiescpsr(Prog *p);
diff --git a/src/cmd/5c/list.c b/src/cmd/5c/list.c
deleted file mode 100644
index 98da424de..000000000
--- a/src/cmd/5c/list.c
+++ /dev/null
@@ -1,39 +0,0 @@
-// Inferno utils/5c/list.c
-// http://code.google.com/p/inferno-os/source/browse/utils/5c/list.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-
-#define EXTERN
-#include "gc.h"
-
-void
-listinit(void)
-{
- listinit5();
-}
diff --git a/src/cmd/5c/mul.c b/src/cmd/5c/mul.c
deleted file mode 100644
index ff50c4845..000000000
--- a/src/cmd/5c/mul.c
+++ /dev/null
@@ -1,640 +0,0 @@
-// Inferno utils/5c/mul.c
-// http://code.google.com/p/inferno-os/source/browse/utils/5c/mul.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-
-#include "gc.h"
-
-/*
- * code sequences for multiply by constant.
- * [a-l][0-3]
- * lsl $(A-'a'),r0,r1
- * [+][0-7]
- * add r0,r1,r2
- * [-][0-7]
- * sub r0,r1,r2
- */
-
-static int maxmulops = 3; /* max # of ops to replace mul with */
-static int multabp;
-static int32 mulval;
-static char* mulcp;
-static int32 valmax;
-static int shmax;
-
-static int docode(char *hp, char *cp, int r0, int r1);
-static int gen1(int len);
-static int gen2(int len, int32 r1);
-static int gen3(int len, int32 r0, int32 r1, int flag);
-enum
-{
- SR1 = 1<<0, /* r1 has been shifted */
- SR0 = 1<<1, /* r0 has been shifted */
- UR1 = 1<<2, /* r1 has not been used */
- UR0 = 1<<3, /* r0 has not been used */
-};
-
-Multab*
-mulcon0(int32 v)
-{
- int a1, a2, g;
- Multab *m, *m1;
- char hint[10];
-
- if(v < 0)
- v = -v;
-
- /*
- * look in cache
- */
- m = multab;
- for(g=0; g<nelem(multab); g++) {
- if(m->val == v) {
- if(m->code[0] == 0)
- return 0;
- return m;
- }
- m++;
- }
-
- /*
- * select a spot in cache to overwrite
- */
- multabp++;
- if(multabp < 0 || multabp >= nelem(multab))
- multabp = 0;
- m = multab+multabp;
- m->val = v;
- mulval = v;
-
- /*
- * look in execption hint table
- */
- a1 = 0;
- a2 = hintabsize;
- for(;;) {
- if(a1 >= a2)
- goto no;
- g = (a2 + a1)/2;
- if(v < hintab[g].val) {
- a2 = g;
- continue;
- }
- if(v > hintab[g].val) {
- a1 = g+1;
- continue;
- }
- break;
- }
-
- if(docode(hintab[g].hint, m->code, 1, 0))
- return m;
- print("multiply table failure %d\n", v);
- m->code[0] = 0;
- return 0;
-
-no:
- /*
- * try to search
- */
- hint[0] = 0;
- for(g=1; g<=maxmulops; g++) {
- if(g >= maxmulops && v >= 65535)
- break;
- mulcp = hint+g;
- *mulcp = 0;
- if(gen1(g)) {
- if(docode(hint, m->code, 1, 0))
- return m;
- print("multiply table failure %d\n", v);
- break;
- }
- }
-
- /*
- * try a recur followed by a shift
- */
- g = 0;
- while(!(v & 1)) {
- g++;
- v >>= 1;
- }
- if(g) {
- m1 = mulcon0(v);
- if(m1) {
- strcpy(m->code, m1->code);
- sprint(strchr(m->code, 0), "%c0", g+'a');
- return m;
- }
- }
- m->code[0] = 0;
- return 0;
-}
-
-static int
-docode(char *hp, char *cp, int r0, int r1)
-{
- int c, i;
-
- c = *hp++;
- *cp = c;
- cp += 2;
- switch(c) {
- default:
- c -= 'a';
- if(c < 1 || c >= 30)
- break;
- for(i=0; i<4; i++) {
- switch(i) {
- case 0:
- if(docode(hp, cp, r0<<c, r1))
- goto out;
- break;
- case 1:
- if(docode(hp, cp, r1<<c, r1))
- goto out;
- break;
- case 2:
- if(docode(hp, cp, r0, r0<<c))
- goto out;
- break;
- case 3:
- if(docode(hp, cp, r0, r1<<c))
- goto out;
- break;
- }
- }
- break;
-
- case '+':
- for(i=0; i<8; i++) {
- cp[-1] = i+'0';
- switch(i) {
- case 1:
- if(docode(hp, cp, r0+r1, r1))
- goto out;
- break;
- case 5:
- if(docode(hp, cp, r0, r0+r1))
- goto out;
- break;
- }
- }
- break;
-
- case '-':
- for(i=0; i<8; i++) {
- cp[-1] = i+'0';
- switch(i) {
- case 1:
- if(docode(hp, cp, r0-r1, r1))
- goto out;
- break;
- case 2:
- if(docode(hp, cp, r1-r0, r1))
- goto out;
- break;
- case 5:
- if(docode(hp, cp, r0, r0-r1))
- goto out;
- break;
- case 6:
- if(docode(hp, cp, r0, r1-r0))
- goto out;
- break;
- }
- }
- break;
-
- case 0:
- if(r0 == mulval)
- return 1;
- }
- return 0;
-
-out:
- cp[-1] = i+'0';
- return 1;
-}
-
-static int
-gen1(int len)
-{
- int i;
-
- for(shmax=1; shmax<30; shmax++) {
- valmax = 1<<shmax;
- if(valmax >= mulval)
- break;
- }
- if(mulval == 1)
- return 1;
-
- len--;
- for(i=1; i<=shmax; i++)
- if(gen2(len, 1<<i)) {
- *--mulcp = 'a'+i;
- return 1;
- }
- return 0;
-}
-
-static int
-gen2(int len, int32 r1)
-{
- int i;
-
- if(len <= 0) {
- if(r1 == mulval)
- return 1;
- return 0;
- }
-
- len--;
- if(len == 0)
- goto calcr0;
-
- if(gen3(len, r1, r1+1, UR1)) {
- i = '+';
- goto out;
- }
- if(gen3(len, r1-1, r1, UR0)) {
- i = '-';
- goto out;
- }
- if(gen3(len, 1, r1+1, UR1)) {
- i = '+';
- goto out;
- }
- if(gen3(len, 1, r1-1, UR1)) {
- i = '-';
- goto out;
- }
-
- return 0;
-
-calcr0:
- if(mulval == r1+1) {
- i = '+';
- goto out;
- }
- if(mulval == r1-1) {
- i = '-';
- goto out;
- }
- return 0;
-
-out:
- *--mulcp = i;
- return 1;
-}
-
-static int
-gen3(int len, int32 r0, int32 r1, int flag)
-{
- int i, f1, f2;
- int32 x;
-
- if(r0 <= 0 ||
- r0 >= r1 ||
- r1 > valmax)
- return 0;
-
- len--;
- if(len == 0)
- goto calcr0;
-
- if(!(flag & UR1)) {
- f1 = UR1|SR1;
- for(i=1; i<=shmax; i++) {
- x = r0<<i;
- if(x > valmax)
- break;
- if(gen3(len, r0, x, f1)) {
- i += 'a';
- goto out;
- }
- }
- }
-
- if(!(flag & UR0)) {
- f1 = UR1|SR1;
- for(i=1; i<=shmax; i++) {
- x = r1<<i;
- if(x > valmax)
- break;
- if(gen3(len, r1, x, f1)) {
- i += 'a';
- goto out;
- }
- }
- }
-
- if(!(flag & SR1)) {
- f1 = UR1|SR1|(flag&UR0);
- for(i=1; i<=shmax; i++) {
- x = r1<<i;
- if(x > valmax)
- break;
- if(gen3(len, r0, x, f1)) {
- i += 'a';
- goto out;
- }
- }
- }
-
- if(!(flag & SR0)) {
- f1 = UR0|SR0|(flag&(SR1|UR1));
-
- f2 = UR1|SR1;
- if(flag & UR1)
- f2 |= UR0;
- if(flag & SR1)
- f2 |= SR0;
-
- for(i=1; i<=shmax; i++) {
- x = r0<<i;
- if(x > valmax)
- break;
- if(x > r1) {
- if(gen3(len, r1, x, f2)) {
- i += 'a';
- goto out;
- }
- } else
- if(gen3(len, x, r1, f1)) {
- i += 'a';
- goto out;
- }
- }
- }
-
- x = r1+r0;
- if(gen3(len, r0, x, UR1)) {
- i = '+';
- goto out;
- }
-
- if(gen3(len, r1, x, UR1)) {
- i = '+';
- goto out;
- }
-
- x = r1-r0;
- if(gen3(len, x, r1, UR0)) {
- i = '-';
- goto out;
- }
-
- if(x > r0) {
- if(gen3(len, r0, x, UR1)) {
- i = '-';
- goto out;
- }
- } else
- if(gen3(len, x, r0, UR0)) {
- i = '-';
- goto out;
- }
-
- return 0;
-
-calcr0:
- f1 = flag & (UR0|UR1);
- if(f1 == UR1) {
- for(i=1; i<=shmax; i++) {
- x = r1<<i;
- if(x >= mulval) {
- if(x == mulval) {
- i += 'a';
- goto out;
- }
- break;
- }
- }
- }
-
- if(mulval == r1+r0) {
- i = '+';
- goto out;
- }
- if(mulval == r1-r0) {
- i = '-';
- goto out;
- }
-
- return 0;
-
-out:
- *--mulcp = i;
- return 1;
-}
-
-/*
- * hint table has numbers that
- * the search algorithm fails on.
- * <1000:
- * all numbers
- * <5000:
- * ÷ by 5
- * <10000:
- * ÷ by 50
- * <65536:
- * ÷ by 250
- */
-Hintab hintab[] =
-{
- 683, "b++d+e+",
- 687, "b+e++e-",
- 691, "b++d+e+",
- 731, "b++d+e+",
- 811, "b++d+i+",
- 821, "b++e+e+",
- 843, "b+d++e+",
- 851, "b+f-+e-",
- 853, "b++e+e+",
- 877, "c++++g-",
- 933, "b+c++g-",
- 981, "c-+e-d+",
- 1375, "b+c+b+h-",
- 1675, "d+b++h+",
- 2425, "c++f-e+",
- 2675, "c+d++f-",
- 2750, "b+d-b+h-",
- 2775, "c-+g-e-",
- 3125, "b++e+g+",
- 3275, "b+c+g+e+",
- 3350, "c++++i+",
- 3475, "c-+e-f-",
- 3525, "c-+d+g-",
- 3625, "c-+e-j+",
- 3675, "b+d+d+e+",
- 3725, "b+d-+h+",
- 3925, "b+d+f-d-",
- 4275, "b+g++e+",
- 4325, "b+h-+d+",
- 4425, "b+b+g-j-",
- 4525, "b+d-d+f+",
- 4675, "c++d-g+",
- 4775, "b+d+b+g-",
- 4825, "c+c-+i-",
- 4850, "c++++i-",
- 4925, "b++e-g-",
- 4975, "c+f++e-",
- 5500, "b+g-c+d+",
- 6700, "d+b++i+",
- 9700, "d++++j-",
- 11000, "b+f-c-h-",
- 11750, "b+d+g+j-",
- 12500, "b+c+e-k+",
- 13250, "b+d+e-f+",
- 13750, "b+h-c-d+",
- 14250, "b+g-c+e-",
- 14500, "c+f+j-d-",
- 14750, "d-g--f+",
- 16750, "b+e-d-n+",
- 17750, "c+h-b+e+",
- 18250, "d+b+h-d+",
- 18750, "b+g-++f+",
- 19250, "b+e+b+h+",
- 19750, "b++h--f-",
- 20250, "b+e-l-c+",
- 20750, "c++bi+e-",
- 21250, "b+i+l+c+",
- 22000, "b+e+d-g-",
- 22250, "b+d-h+k-",
- 22750, "b+d-e-g+",
- 23250, "b+c+h+e-",
- 23500, "b+g-c-g-",
- 23750, "b+g-b+h-",
- 24250, "c++g+m-",
- 24750, "b+e+e+j-",
- 25000, "b++dh+g+",
- 25250, "b+e+d-g-",
- 25750, "b+e+b+j+",
- 26250, "b+h+c+e+",
- 26500, "b+h+c+g+",
- 26750, "b+d+e+g-",
- 27250, "b+e+e+f+",
- 27500, "c-i-c-d+",
- 27750, "b+bd++j+",
- 28250, "d-d-++i-",
- 28500, "c+c-h-e-",
- 29000, "b+g-d-f+",
- 29500, "c+h+++e-",
- 29750, "b+g+f-c+",
- 30250, "b+f-g-c+",
- 33500, "c-f-d-n+",
- 33750, "b+d-b+j-",
- 34250, "c+e+++i+",
- 35250, "e+b+d+k+",
- 35500, "c+e+d-g-",
- 35750, "c+i-++e+",
- 36250, "b+bh-d+e+",
- 36500, "c+c-h-e-",
- 36750, "d+e--i+",
- 37250, "b+g+g+b+",
- 37500, "b+h-b+f+",
- 37750, "c+be++j-",
- 38500, "b+e+b+i+",
- 38750, "d+i-b+d+",
- 39250, "b+g-l-+d+",
- 39500, "b+g-c+g-",
- 39750, "b+bh-c+f-",
- 40250, "b+bf+d+g-",
- 40500, "b+g-c+g+",
- 40750, "c+b+i-e+",
- 41250, "d++bf+h+",
- 41500, "b+j+c+d-",
- 41750, "c+f+b+h-",
- 42500, "c+h++g+",
- 42750, "b+g+d-f-",
- 43250, "b+l-e+d-",
- 43750, "c+bd+h+f-",
- 44000, "b+f+g-d-",
- 44250, "b+d-g--f+",
- 44500, "c+e+c+h+",
- 44750, "b+e+d-h-",
- 45250, "b++g+j-g+",
- 45500, "c+d+e-g+",
- 45750, "b+d-h-e-",
- 46250, "c+bd++j+",
- 46500, "b+d-c-j-",
- 46750, "e-e-b+g-",
- 47000, "b+c+d-j-",
- 47250, "b+e+e-g-",
- 47500, "b+g-c-h-",
- 47750, "b+f-c+h-",
- 48250, "d--h+n-",
- 48500, "b+c-g+m-",
- 48750, "b+e+e-g+",
- 49500, "c-f+e+j-",
- 49750, "c+c+g++f-",
- 50000, "b+e+e+k+",
- 50250, "b++i++g+",
- 50500, "c+g+f-i+",
- 50750, "b+e+d+k-",
- 51500, "b+i+c-f+",
- 51750, "b+bd+g-e-",
- 52250, "b+d+g-j+",
- 52500, "c+c+f+g+",
- 52750, "b+c+e+i+",
- 53000, "b+i+c+g+",
- 53500, "c+g+g-n+",
- 53750, "b+j+d-c+",
- 54250, "b+d-g-j-",
- 54500, "c-f+e+f+",
- 54750, "b+f-+c+g+",
- 55000, "b+g-d-g-",
- 55250, "b+e+e+g+",
- 55500, "b+cd++j+",
- 55750, "b+bh-d-f-",
- 56250, "c+d-b+j-",
- 56500, "c+d+c+i+",
- 56750, "b+e+d++h-",
- 57000, "b+d+g-f+",
- 57250, "b+f-m+d-",
- 57750, "b+i+c+e-",
- 58000, "b+e+d+h+",
- 58250, "c+b+g+g+",
- 58750, "d-e-j--e+",
- 59000, "d-i-+e+",
- 59250, "e--h-m+",
- 59500, "c+c-h+f-",
- 59750, "b+bh-e+i-",
- 60250, "b+bh-e-e-",
- 60500, "c+c-g-g-",
- 60750, "b+e-l-e-",
- 61250, "b+g-g-c+",
- 61750, "b+g-c+g+",
- 62250, "f--+c-i-",
- 62750, "e+f--+g+",
- 64750, "b+f+d+p-",
-};
-int hintabsize = nelem(hintab);
diff --git a/src/cmd/5c/peep.c b/src/cmd/5c/peep.c
deleted file mode 100644
index 1de56b594..000000000
--- a/src/cmd/5c/peep.c
+++ /dev/null
@@ -1,1478 +0,0 @@
-// Inferno utils/5c/peep.c
-// http://code.google.com/p/inferno-os/source/browse/utils/5c/peep.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-
-#include "gc.h"
-
-int xtramodes(Reg*, Addr*);
-
-void
-peep(void)
-{
- Reg *r, *r1, *r2;
- Prog *p, *p1;
- int t;
-/*
- * complete R structure
- */
- t = 0;
- for(r=firstr; r!=R; r=r1) {
- r1 = r->link;
- if(r1 == R)
- break;
- p = r->prog->link;
- while(p != r1->prog)
- switch(p->as) {
- default:
- r2 = rega();
- r->link = r2;
- r2->link = r1;
-
- r2->prog = p;
- r2->p1 = r;
- r->s1 = r2;
- r2->s1 = r1;
- r1->p1 = r2;
-
- r = r2;
- t++;
-
- case ADATA:
- case AGLOBL:
- case ANAME:
- case ASIGNAME:
- p = p->link;
- }
- }
-
-loop1:
- t = 0;
- for(r=firstr; r!=R; r=r->link) {
- p = r->prog;
- if(p->as == ASLL || p->as == ASRL || p->as == ASRA) {
- /*
- * elide shift into D_SHIFT operand of subsequent instruction
- */
- if(shiftprop(r)) {
- excise(r);
- t++;
- }
- }
- if(p->as == AMOVW || p->as == AMOVF || p->as == AMOVD)
- if(regtyp(&p->to)) {
- if(p->from.type == D_CONST)
- constprop(&p->from, &p->to, r->s1);
- else if(regtyp(&p->from))
- if(p->from.type == p->to.type) {
- if(copyprop(r)) {
- excise(r);
- t++;
- } else
- if(subprop(r) && copyprop(r)) {
- excise(r);
- t++;
- }
- }
- }
- }
- if(t)
- goto loop1;
- /*
- * look for MOVB x,R; MOVB R,R
- */
- for(r=firstr; r!=R; r=r->link) {
- p = r->prog;
- switch(p->as) {
- default:
- continue;
- case AEOR:
- /*
- * EOR -1,x,y => MVN x,y
- */
- if(p->from.type == D_CONST && p->from.offset == -1) {
- p->as = AMVN;
- p->from.type = D_REG;
- if(p->reg != NREG)
- p->from.reg = p->reg;
- else
- p->from.reg = p->to.reg;
- p->reg = NREG;
- }
- continue;
- case AMOVH:
- case AMOVHS:
- case AMOVHU:
- case AMOVB:
- case AMOVBS:
- case AMOVBU:
- if(p->to.type != D_REG)
- continue;
- break;
- }
- r1 = r->link;
- if(r1 == R)
- continue;
- p1 = r1->prog;
- if(p1->as != p->as)
- continue;
- if(p1->from.type != D_REG || p1->from.reg != p->to.reg)
- continue;
- if(p1->to.type != D_REG || p1->to.reg != p->to.reg)
- continue;
- excise(r1);
- }
-
- for(r=firstr; r!=R; r=r->link) {
- p = r->prog;
- switch(p->as) {
- case AMOVW:
- case AMOVB:
- case AMOVBS:
- case AMOVBU:
- if(p->from.type == D_OREG && p->from.offset == 0)
- xtramodes(r, &p->from);
- else if(p->to.type == D_OREG && p->to.offset == 0)
- xtramodes(r, &p->to);
- else
- continue;
- break;
- case ACMP:
- /*
- * elide CMP $0,x if calculation of x can set condition codes
- */
- if(p->from.type != D_CONST || p->from.offset != 0)
- continue;
- r2 = r->s1;
- if(r2 == R)
- continue;
- t = r2->prog->as;
- switch(t) {
- default:
- continue;
- case ABEQ:
- case ABNE:
- case ABMI:
- case ABPL:
- break;
- case ABGE:
- t = ABPL;
- break;
- case ABLT:
- t = ABMI;
- break;
- case ABHI:
- t = ABNE;
- break;
- case ABLS:
- t = ABEQ;
- break;
- }
- r1 = r;
- do
- r1 = uniqp(r1);
- while (r1 != R && r1->prog->as == ANOP);
- if(r1 == R)
- continue;
- p1 = r1->prog;
- if(p1->to.type != D_REG)
- continue;
- if(p1->to.reg != p->reg)
- if(!(p1->as == AMOVW && p1->from.type == D_REG && p1->from.reg == p->reg))
- continue;
- switch(p1->as) {
- default:
- continue;
- case AMOVW:
- if(p1->from.type != D_REG)
- continue;
- case AAND:
- case AEOR:
- case AORR:
- case ABIC:
- case AMVN:
- case ASUB:
- case ARSB:
- case AADD:
- case AADC:
- case ASBC:
- case ARSC:
- break;
- }
- p1->scond |= C_SBIT;
- r2->prog->as = t;
- excise(r);
- continue;
- }
- }
-
- predicate();
-}
-
-void
-excise(Reg *r)
-{
- Prog *p;
-
- p = r->prog;
- p->as = ANOP;
- p->scond = zprog.scond;
- p->from = zprog.from;
- p->to = zprog.to;
- p->reg = zprog.reg; /**/
-}
-
-Reg*
-uniqp(Reg *r)
-{
- Reg *r1;
-
- r1 = r->p1;
- if(r1 == R) {
- r1 = r->p2;
- if(r1 == R || r1->p2link != R)
- return R;
- } else
- if(r->p2 != R)
- return R;
- return r1;
-}
-
-Reg*
-uniqs(Reg *r)
-{
- Reg *r1;
-
- r1 = r->s1;
- if(r1 == R) {
- r1 = r->s2;
- if(r1 == R)
- return R;
- } else
- if(r->s2 != R)
- return R;
- return r1;
-}
-
-int
-regtyp(Addr *a)
-{
-
- if(a->type == D_REG)
- return 1;
- if(a->type == D_FREG)
- return 1;
- return 0;
-}
-
-/*
- * the idea is to substitute
- * one register for another
- * from one MOV to another
- * MOV a, R0
- * ADD b, R0 / no use of R1
- * MOV R0, R1
- * would be converted to
- * MOV a, R1
- * ADD b, R1
- * MOV R1, R0
- * hopefully, then the former or latter MOV
- * will be eliminated by copy propagation.
- */
-int
-subprop(Reg *r0)
-{
- Prog *p;
- Addr *v1, *v2;
- Reg *r;
- int t;
-
- p = r0->prog;
- v1 = &p->from;
- if(!regtyp(v1))
- return 0;
- v2 = &p->to;
- if(!regtyp(v2))
- return 0;
- for(r=uniqp(r0); r!=R; r=uniqp(r)) {
- if(uniqs(r) == R)
- break;
- p = r->prog;
- switch(p->as) {
- case ABL:
- return 0;
-
- case ACMP:
- case ACMN:
- case AADD:
- case ASUB:
- case ARSB:
- case ASLL:
- case ASRL:
- case ASRA:
- case AORR:
- case AAND:
- case AEOR:
- case AMUL:
- case ADIV:
- case ADIVU:
-
- case ACMPF:
- case ACMPD:
- case AADDD:
- case AADDF:
- case ASUBD:
- case ASUBF:
- case AMULD:
- case AMULF:
- case ADIVD:
- case ADIVF:
- if(p->to.type == v1->type)
- if(p->to.reg == v1->reg) {
- if(p->reg == NREG)
- p->reg = p->to.reg;
- goto gotit;
- }
- break;
-
- case AMOVF:
- case AMOVD:
- case AMOVW:
- if(p->to.type == v1->type)
- if(p->to.reg == v1->reg)
- goto gotit;
- break;
-
- case AMOVM:
- t = 1<<v2->reg;
- if((p->from.type == D_CONST && (p->from.offset&t)) ||
- (p->to.type == D_CONST && (p->to.offset&t)))
- return 0;
- break;
- }
- if(copyau(&p->from, v2) ||
- copyau1(p, v2) ||
- copyau(&p->to, v2))
- break;
- if(copysub(&p->from, v1, v2, 0) ||
- copysub1(p, v1, v2, 0) ||
- copysub(&p->to, v1, v2, 0))
- break;
- }
- return 0;
-
-gotit:
- copysub(&p->to, v1, v2, 1);
- if(debug['P']) {
- print("gotit: %D->%D\n%P", v1, v2, r->prog);
- if(p->from.type == v2->type)
- print(" excise");
- print("\n");
- }
- for(r=uniqs(r); r!=r0; r=uniqs(r)) {
- p = r->prog;
- copysub(&p->from, v1, v2, 1);
- copysub1(p, v1, v2, 1);
- copysub(&p->to, v1, v2, 1);
- if(debug['P'])
- print("%P\n", r->prog);
- }
- t = v1->reg;
- v1->reg = v2->reg;
- v2->reg = t;
- if(debug['P'])
- print("%P last\n", r->prog);
- return 1;
-}
-
-/*
- * The idea is to remove redundant copies.
- * v1->v2 F=0
- * (use v2 s/v2/v1/)*
- * set v1 F=1
- * use v2 return fail
- * -----------------
- * v1->v2 F=0
- * (use v2 s/v2/v1/)*
- * set v1 F=1
- * set v2 return success
- */
-int
-copyprop(Reg *r0)
-{
- Prog *p;
- Addr *v1, *v2;
- Reg *r;
-
- p = r0->prog;
- v1 = &p->from;
- v2 = &p->to;
- if(copyas(v1, v2))
- return 1;
- for(r=firstr; r!=R; r=r->link)
- r->active = 0;
- return copy1(v1, v2, r0->s1, 0);
-}
-
-int
-copy1(Addr *v1, Addr *v2, Reg *r, int f)
-{
- int t;
- Prog *p;
-
- if(r->active) {
- if(debug['P'])
- print("act set; return 1\n");
- return 1;
- }
- r->active = 1;
- if(debug['P'])
- print("copy %D->%D f=%d\n", v1, v2, f);
- for(; r != R; r = r->s1) {
- p = r->prog;
- if(debug['P'])
- print("%P", p);
- if(!f && uniqp(r) == R) {
- f = 1;
- if(debug['P'])
- print("; merge; f=%d", f);
- }
- t = copyu(p, v2, A);
- switch(t) {
- case 2: /* rar, can't split */
- if(debug['P'])
- print("; %Drar; return 0\n", v2);
- return 0;
-
- case 3: /* set */
- if(debug['P'])
- print("; %Dset; return 1\n", v2);
- return 1;
-
- case 1: /* used, substitute */
- case 4: /* use and set */
- if(f) {
- if(!debug['P'])
- return 0;
- if(t == 4)
- print("; %Dused+set and f=%d; return 0\n", v2, f);
- else
- print("; %Dused and f=%d; return 0\n", v2, f);
- return 0;
- }
- if(copyu(p, v2, v1)) {
- if(debug['P'])
- print("; sub fail; return 0\n");
- return 0;
- }
- if(debug['P'])
- print("; sub%D/%D", v2, v1);
- if(t == 4) {
- if(debug['P'])
- print("; %Dused+set; return 1\n", v2);
- return 1;
- }
- break;
- }
- if(!f) {
- t = copyu(p, v1, A);
- if(!f && (t == 2 || t == 3 || t == 4)) {
- f = 1;
- if(debug['P'])
- print("; %Dset and !f; f=%d", v1, f);
- }
- }
- if(debug['P'])
- print("\n");
- if(r->s2)
- if(!copy1(v1, v2, r->s2, f))
- return 0;
- }
- return 1;
-}
-
-/*
- * The idea is to remove redundant constants.
- * $c1->v1
- * ($c1->v2 s/$c1/v1)*
- * set v1 return
- * The v1->v2 should be eliminated by copy propagation.
- */
-void
-constprop(Addr *c1, Addr *v1, Reg *r)
-{
- Prog *p;
-
- if(debug['C'])
- print("constprop %D->%D\n", c1, v1);
- for(; r != R; r = r->s1) {
- p = r->prog;
- if(debug['C'])
- print("%P", p);
- if(uniqp(r) == R) {
- if(debug['C'])
- print("; merge; return\n");
- return;
- }
- if(p->as == AMOVW && copyas(&p->from, c1)) {
- if(debug['C'])
- print("; sub%D/%D", &p->from, v1);
- p->from = *v1;
- } else if(copyu(p, v1, A) > 1) {
- if(debug['C'])
- print("; %Dset; return\n", v1);
- return;
- }
- if(debug['C'])
- print("\n");
- if(r->s2)
- constprop(c1, v1, r->s2);
- }
-}
-
-/*
- * ASLL x,y,w
- * .. (not use w, not set x y w)
- * AXXX w,a,b (a != w)
- * .. (not use w)
- * (set w)
- * ----------- changed to
- * ..
- * AXXX (x<<y),a,b
- * ..
- */
-#define FAIL(msg) { if(debug['H']) print("\t%s; FAILURE\n", msg); return 0; }
-int
-shiftprop(Reg *r)
-{
- Reg *r1;
- Prog *p, *p1, *p2;
- int n, o;
- Addr a;
-
- p = r->prog;
- if(p->to.type != D_REG)
- FAIL("BOTCH: result not reg");
- n = p->to.reg;
- a = zprog.from;
- if(p->reg != NREG && p->reg != p->to.reg) {
- a.type = D_REG;
- a.reg = p->reg;
- }
- if(debug['H'])
- print("shiftprop\n%P", p);
- r1 = r;
- for(;;) {
- /* find first use of shift result; abort if shift operands or result are changed */
- r1 = uniqs(r1);
- if(r1 == R)
- FAIL("branch");
- if(uniqp(r1) == R)
- FAIL("merge");
- p1 = r1->prog;
- if(debug['H'])
- print("\n%P", p1);
- switch(copyu(p1, &p->to, A)) {
- case 0: /* not used or set */
- if((p->from.type == D_REG && copyu(p1, &p->from, A) > 1) ||
- (a.type == D_REG && copyu(p1, &a, A) > 1))
- FAIL("args modified");
- continue;
- case 3: /* set, not used */
- FAIL("BOTCH: noref");
- }
- break;
- }
- /* check whether substitution can be done */
- switch(p1->as) {
- default:
- FAIL("non-dpi");
- case AAND:
- case AEOR:
- case AADD:
- case AADC:
- case AORR:
- case ASUB:
- case ARSB:
- case ASBC:
- case ARSC:
- if(p1->reg == n || (p1->reg == NREG && p1->to.type == D_REG && p1->to.reg == n)) {
- if(p1->from.type != D_REG)
- FAIL("can't swap");
- p1->reg = p1->from.reg;
- p1->from.reg = n;
- switch(p1->as) {
- case ASUB:
- p1->as = ARSB;
- break;
- case ARSB:
- p1->as = ASUB;
- break;
- case ASBC:
- p1->as = ARSC;
- break;
- case ARSC:
- p1->as = ASBC;
- break;
- }
- if(debug['H'])
- print("\t=>%P", p1);
- }
- case ABIC:
- case ACMP:
- case ACMN:
- if(p1->reg == n)
- FAIL("can't swap");
- if(p1->reg == NREG && p1->to.reg == n)
- FAIL("shift result used twice");
- case AMVN:
- if(p1->from.type == D_SHIFT)
- FAIL("shift result used in shift");
- if(p1->from.type != D_REG || p1->from.reg != n)
- FAIL("BOTCH: where is it used?");
- break;
- }
- /* check whether shift result is used subsequently */
- p2 = p1;
- if(p1->to.reg != n)
- for (;;) {
- r1 = uniqs(r1);
- if(r1 == R)
- FAIL("inconclusive");
- p1 = r1->prog;
- if(debug['H'])
- print("\n%P", p1);
- switch(copyu(p1, &p->to, A)) {
- case 0: /* not used or set */
- continue;
- case 3: /* set, not used */
- break;
- default:/* used */
- FAIL("reused");
- }
- break;
- }
- /* make the substitution */
- p2->from.type = D_SHIFT;
- p2->from.reg = NREG;
- o = p->reg;
- if(o == NREG)
- o = p->to.reg;
- switch(p->from.type){
- case D_CONST:
- o |= (p->from.offset&0x1f)<<7;
- break;
- case D_REG:
- o |= (1<<4) | (p->from.reg<<8);
- break;
- }
- switch(p->as){
- case ASLL:
- o |= 0<<5;
- break;
- case ASRL:
- o |= 1<<5;
- break;
- case ASRA:
- o |= 2<<5;
- break;
- }
- p2->from.offset = o;
- if(debug['H'])
- print("\t=>%P\tSUCCEED\n", p2);
- return 1;
-}
-
-Reg*
-findpre(Reg *r, Addr *v)
-{
- Reg *r1;
-
- for(r1=uniqp(r); r1!=R; r=r1,r1=uniqp(r)) {
- if(uniqs(r1) != r)
- return R;
- switch(copyu(r1->prog, v, A)) {
- case 1: /* used */
- case 2: /* read-alter-rewrite */
- return R;
- case 3: /* set */
- case 4: /* set and used */
- return r1;
- }
- }
- return R;
-}
-
-Reg*
-findinc(Reg *r, Reg *r2, Addr *v)
-{
- Reg *r1;
- Prog *p;
-
-
- for(r1=uniqs(r); r1!=R && r1!=r2; r=r1,r1=uniqs(r)) {
- if(uniqp(r1) != r)
- return R;
- switch(copyu(r1->prog, v, A)) {
- case 0: /* not touched */
- continue;
- case 4: /* set and used */
- p = r1->prog;
- if(p->as == AADD)
- if(p->from.type == D_CONST)
- if(p->from.offset > -4096 && p->from.offset < 4096)
- return r1;
- default:
- return R;
- }
- }
- return R;
-}
-
-int
-nochange(Reg *r, Reg *r2, Prog *p)
-{
- Addr a[3];
- int i, n;
-
- if(r == r2)
- return 1;
- n = 0;
- if(p->reg != NREG && p->reg != p->to.reg) {
- a[n].type = D_REG;
- a[n++].reg = p->reg;
- }
- switch(p->from.type) {
- case D_SHIFT:
- a[n].type = D_REG;
- a[n++].reg = p->from.offset&0xf;
- case D_REG:
- a[n].type = D_REG;
- a[n++].reg = p->from.reg;
- }
- if(n == 0)
- return 1;
- for(; r!=R && r!=r2; r=uniqs(r)) {
- p = r->prog;
- for(i=0; i<n; i++)
- if(copyu(p, &a[i], A) > 1)
- return 0;
- }
- return 1;
-}
-
-int
-findu1(Reg *r, Addr *v)
-{
- for(; r != R; r = r->s1) {
- if(r->active)
- return 0;
- r->active = 1;
- switch(copyu(r->prog, v, A)) {
- case 1: /* used */
- case 2: /* read-alter-rewrite */
- case 4: /* set and used */
- return 1;
- case 3: /* set */
- return 0;
- }
- if(r->s2)
- if (findu1(r->s2, v))
- return 1;
- }
- return 0;
-}
-
-int
-finduse(Reg *r, Addr *v)
-{
- Reg *r1;
-
- for(r1=firstr; r1!=R; r1=r1->link)
- r1->active = 0;
- return findu1(r, v);
-}
-
-int
-xtramodes(Reg *r, Addr *a)
-{
- Reg *r1, *r2, *r3;
- Prog *p, *p1;
- Addr v;
-
- p = r->prog;
- if((p->as == AMOVB || p->as == AMOVBS) && p->from.type == D_OREG) /* byte load */
- return 0;
- v = *a;
- v.type = D_REG;
- r1 = findpre(r, &v);
- if(r1 != R) {
- p1 = r1->prog;
- if(p1->to.type == D_REG && p1->to.reg == v.reg)
- switch(p1->as) {
- case AADD:
- if(p1->from.type == D_REG ||
- (p1->from.type == D_SHIFT && (p1->from.offset&(1<<4)) == 0 &&
- ((p->as != AMOVB && p->as != AMOVBS) || (a == &p->from && (p1->from.offset&~0xf) == 0))) ||
- (p1->from.type == D_CONST &&
- p1->from.offset > -4096 && p1->from.offset < 4096))
- if(nochange(uniqs(r1), r, p1)) {
- if(a != &p->from || v.reg != p->to.reg)
- if (finduse(r->s1, &v)) {
- if(p1->reg == NREG || p1->reg == v.reg)
- /* pre-indexing */
- p->scond |= C_WBIT;
- else return 0;
- }
- switch (p1->from.type) {
- case D_REG:
- /* register offset */
- if(nacl)
- return 0;
- a->type = D_SHIFT;
- a->offset = p1->from.reg;
- break;
- case D_SHIFT:
- /* scaled register offset */
- if(nacl)
- return 0;
- a->type = D_SHIFT;
- case D_CONST:
- /* immediate offset */
- a->offset = p1->from.offset;
- break;
- }
- if(p1->reg != NREG)
- a->reg = p1->reg;
- excise(r1);
- return 1;
- }
- break;
- case AMOVW:
- if(p1->from.type == D_REG)
- if((r2 = findinc(r1, r, &p1->from)) != R) {
- for(r3=uniqs(r2); r3->prog->as==ANOP; r3=uniqs(r3))
- ;
- if(r3 == r) {
- /* post-indexing */
- p1 = r2->prog;
- a->reg = p1->to.reg;
- a->offset = p1->from.offset;
- p->scond |= C_PBIT;
- if(!finduse(r, &r1->prog->to))
- excise(r1);
- excise(r2);
- return 1;
- }
- }
- break;
- }
- }
- if(a != &p->from || a->reg != p->to.reg)
- if((r1 = findinc(r, R, &v)) != R) {
- /* post-indexing */
- p1 = r1->prog;
- a->offset = p1->from.offset;
- p->scond |= C_PBIT;
- excise(r1);
- return 1;
- }
- return 0;
-}
-
-/*
- * return
- * 1 if v only used (and substitute),
- * 2 if read-alter-rewrite
- * 3 if set
- * 4 if set and used
- * 0 otherwise (not touched)
- */
-int
-copyu(Prog *p, Addr *v, Addr *s)
-{
-
- switch(p->as) {
-
- default:
- if(debug['P'])
- print(" (?)");
- return 2;
-
- case AMOVM:
- if(v->type != D_REG)
- return 0;
- if(p->from.type == D_CONST) { /* read reglist, read/rar */
- if(s != A) {
- if(p->from.offset&(1<<v->reg))
- return 1;
- if(copysub(&p->to, v, s, 1))
- return 1;
- return 0;
- }
- if(copyau(&p->to, v)) {
- if(p->scond&C_WBIT)
- return 2;
- return 1;
- }
- if(p->from.offset&(1<<v->reg))
- return 1;
- } else { /* read/rar, write reglist */
- if(s != A) {
- if(p->to.offset&(1<<v->reg))
- return 1;
- if(copysub(&p->from, v, s, 1))
- return 1;
- return 0;
- }
- if(copyau(&p->from, v)) {
- if(p->scond&C_WBIT)
- return 2;
- if(p->to.offset&(1<<v->reg))
- return 4;
- return 1;
- }
- if(p->to.offset&(1<<v->reg))
- return 3;
- }
- return 0;
-
- case ANOP: /* read, write */
- case AMOVW:
- case AMOVF:
- case AMOVD:
- case AMOVH:
- case AMOVHS:
- case AMOVHU:
- case AMOVB:
- case AMOVBS:
- case AMOVBU:
- case AMOVDW:
- case AMOVWD:
- case AMOVFD:
- case AMOVDF:
- if(p->scond&(C_WBIT|C_PBIT))
- if(v->type == D_REG) {
- if(p->from.type == D_OREG || p->from.type == D_SHIFT) {
- if(p->from.reg == v->reg)
- return 2;
- } else {
- if(p->to.reg == v->reg)
- return 2;
- }
- }
- if(s != A) {
- if(copysub(&p->from, v, s, 1))
- return 1;
- if(!copyas(&p->to, v))
- if(copysub(&p->to, v, s, 1))
- return 1;
- return 0;
- }
- if(copyas(&p->to, v)) {
- if(copyau(&p->from, v))
- return 4;
- return 3;
- }
- if(copyau(&p->from, v))
- return 1;
- if(copyau(&p->to, v))
- return 1;
- return 0;
-
-
- case AADD: /* read, read, write */
- case ASUB:
- case ARSB:
- case ASLL:
- case ASRL:
- case ASRA:
- case AORR:
- case AAND:
- case AEOR:
- case AMUL:
- case ADIV:
- case ADIVU:
- case AADDF:
- case AADDD:
- case ASUBF:
- case ASUBD:
- case AMULF:
- case AMULD:
- case ADIVF:
- case ADIVD:
-
- case ACMPF:
- case ACMPD:
- case ACMP:
- case ACMN:
- case ACASE:
- if(s != A) {
- if(copysub(&p->from, v, s, 1))
- return 1;
- if(copysub1(p, v, s, 1))
- return 1;
- if(!copyas(&p->to, v))
- if(copysub(&p->to, v, s, 1))
- return 1;
- return 0;
- }
- if(copyas(&p->to, v)) {
- if(p->reg == NREG)
- p->reg = p->to.reg;
- if(copyau(&p->from, v))
- return 4;
- if(copyau1(p, v))
- return 4;
- return 3;
- }
- if(copyau(&p->from, v))
- return 1;
- if(copyau1(p, v))
- return 1;
- if(copyau(&p->to, v))
- return 1;
- return 0;
-
- case ABEQ: /* read, read */
- case ABNE:
- case ABCS:
- case ABHS:
- case ABCC:
- case ABLO:
- case ABMI:
- case ABPL:
- case ABVS:
- case ABVC:
- case ABHI:
- case ABLS:
- case ABGE:
- case ABLT:
- case ABGT:
- case ABLE:
- case APLD:
- if(s != A) {
- if(copysub(&p->from, v, s, 1))
- return 1;
- return copysub1(p, v, s, 1);
- }
- if(copyau(&p->from, v))
- return 1;
- if(copyau1(p, v))
- return 1;
- return 0;
-
- case AB: /* funny */
- if(s != A) {
- if(copysub(&p->to, v, s, 1))
- return 1;
- return 0;
- }
- if(copyau(&p->to, v))
- return 1;
- return 0;
-
- case ARET: /* funny */
- if(v->type == D_REG)
- if(v->reg == REGRET)
- return 2;
- if(v->type == D_FREG)
- if(v->reg == FREGRET)
- return 2;
-
- case ABL: /* funny */
- if(v->type == D_REG) {
- if(v->reg <= REGEXT && v->reg > exregoffset)
- return 2;
- if(v->reg == REGARG)
- return 2;
- }
- if(v->type == D_FREG)
- if(v->reg <= FREGEXT && v->reg > exfregoffset)
- return 2;
-
- if(s != A) {
- if(copysub(&p->to, v, s, 1))
- return 1;
- return 0;
- }
- if(copyau(&p->to, v))
- return 4;
- return 3;
-
- case ATEXT: /* funny */
- if(v->type == D_REG)
- if(v->reg == REGARG)
- return 3;
- return 0;
- }
-}
-
-int
-a2type(Prog *p)
-{
-
- switch(p->as) {
-
- case ACMP:
- case ACMN:
-
- case AADD:
- case ASUB:
- case ARSB:
- case ASLL:
- case ASRL:
- case ASRA:
- case AORR:
- case AAND:
- case AEOR:
- case AMUL:
- case ADIV:
- case ADIVU:
- return D_REG;
-
- case ACMPF:
- case ACMPD:
-
- case AADDF:
- case AADDD:
- case ASUBF:
- case ASUBD:
- case AMULF:
- case AMULD:
- case ADIVF:
- case ADIVD:
- return D_FREG;
- }
- return D_NONE;
-}
-
-/*
- * direct reference,
- * could be set/use depending on
- * semantics
- */
-int
-copyas(Addr *a, Addr *v)
-{
-
- if(regtyp(v)) {
- if(a->type == v->type)
- if(a->reg == v->reg)
- return 1;
- } else if(v->type == D_CONST) { /* for constprop */
- if(a->type == v->type)
- if(a->name == v->name)
- if(a->sym == v->sym)
- if(a->reg == v->reg)
- if(a->offset == v->offset)
- return 1;
- }
- return 0;
-}
-
-/*
- * either direct or indirect
- */
-int
-copyau(Addr *a, Addr *v)
-{
-
- if(copyas(a, v))
- return 1;
- if(v->type == D_REG) {
- if(a->type == D_OREG) {
- if(v->reg == a->reg)
- return 1;
- } else if(a->type == D_SHIFT) {
- if((a->offset&0xf) == v->reg)
- return 1;
- if((a->offset&(1<<4)) && (a->offset>>8) == v->reg)
- return 1;
- }
- }
- return 0;
-}
-
-int
-copyau1(Prog *p, Addr *v)
-{
-
- if(regtyp(v)) {
- if(a2type(p) == v->type)
- if(p->reg == v->reg) {
- if(a2type(p) != v->type)
- print("botch a2type %P\n", p);
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * substitute s for v in a
- * return failure to substitute
- */
-int
-copysub(Addr *a, Addr *v, Addr *s, int f)
-{
-
- if(f)
- if(copyau(a, v)) {
- if(a->type == D_SHIFT) {
- if((a->offset&0xf) == v->reg)
- a->offset = (a->offset&~0xf)|s->reg;
- if((a->offset&(1<<4)) && (a->offset>>8) == v->reg)
- a->offset = (a->offset&~(0xf<<8))|(s->reg<<8);
- } else
- a->reg = s->reg;
- }
- return 0;
-}
-
-int
-copysub1(Prog *p1, Addr *v, Addr *s, int f)
-{
-
- if(f)
- if(copyau1(p1, v))
- p1->reg = s->reg;
- return 0;
-}
-
-struct {
- int opcode;
- int notopcode;
- int scond;
- int notscond;
-} predinfo[] = {
- { ABEQ, ABNE, 0x0, 0x1, },
- { ABNE, ABEQ, 0x1, 0x0, },
- { ABCS, ABCC, 0x2, 0x3, },
- { ABHS, ABLO, 0x2, 0x3, },
- { ABCC, ABCS, 0x3, 0x2, },
- { ABLO, ABHS, 0x3, 0x2, },
- { ABMI, ABPL, 0x4, 0x5, },
- { ABPL, ABMI, 0x5, 0x4, },
- { ABVS, ABVC, 0x6, 0x7, },
- { ABVC, ABVS, 0x7, 0x6, },
- { ABHI, ABLS, 0x8, 0x9, },
- { ABLS, ABHI, 0x9, 0x8, },
- { ABGE, ABLT, 0xA, 0xB, },
- { ABLT, ABGE, 0xB, 0xA, },
- { ABGT, ABLE, 0xC, 0xD, },
- { ABLE, ABGT, 0xD, 0xC, },
-};
-
-typedef struct {
- Reg *start;
- Reg *last;
- Reg *end;
- int len;
-} Joininfo;
-
-enum {
- Join,
- Split,
- End,
- Branch,
- Setcond,
- Toolong
-};
-
-enum {
- Falsecond,
- Truecond,
- Delbranch,
- Keepbranch
-};
-
-int
-isbranch(Prog *p)
-{
- return (ABEQ <= p->as) && (p->as <= ABLE);
-}
-
-int
-predicable(Prog *p)
-{
- if (isbranch(p)
- || p->as == ANOP
- || p->as == AXXX
- || p->as == ADATA
- || p->as == AGLOBL
- || p->as == AGOK
- || p->as == AHISTORY
- || p->as == ANAME
- || p->as == ASIGNAME
- || p->as == ATEXT
- || p->as == AWORD
- || p->as == ABCASE
- || p->as == ACASE)
- return 0;
- return 1;
-}
-
-/*
- * Depends on an analysis of the encodings performed by 5l.
- * These seem to be all of the opcodes that lead to the "S" bit
- * being set in the instruction encodings.
- *
- * C_SBIT may also have been set explicitly in p->scond.
- */
-int
-modifiescpsr(Prog *p)
-{
- return (p->scond&C_SBIT)
- || p->as == ATST
- || p->as == ATEQ
- || p->as == ACMN
- || p->as == ACMP
- || p->as == AMULU
- || p->as == ADIVU
- || p->as == AMUL
- || p->as == ADIV
- || p->as == AMOD
- || p->as == AMODU
- || p->as == ABL;
-}
-
-/*
- * Find the maximal chain of instructions starting with r which could
- * be executed conditionally
- */
-int
-joinsplit(Reg *r, Joininfo *j)
-{
- j->start = r;
- j->last = r;
- j->len = 0;
- do {
- if (r->p2 && (r->p1 || r->p2->p2link)) {
- j->end = r;
- return Join;
- }
- if (r->s1 && r->s2) {
- j->end = r;
- return Split;
- }
- j->last = r;
- if (r->prog->as != ANOP)
- j->len++;
- if (!r->s1 && !r->s2) {
- j->end = r->link;
- return End;
- }
- if (r->s2) {
- j->end = r->s2;
- return Branch;
- }
- if (modifiescpsr(r->prog)) {
- j->end = r->s1;
- return Setcond;
- }
- r = r->s1;
- } while (j->len < 4);
- j->end = r;
- return Toolong;
-}
-
-Reg *
-successor(Reg *r)
-{
- if (r->s1)
- return r->s1;
- else
- return r->s2;
-}
-
-void
-applypred(Reg *rstart, Joininfo *j, int cond, int branch)
-{
- int pred;
- Reg *r;
-
- if(j->len == 0)
- return;
- if (cond == Truecond)
- pred = predinfo[rstart->prog->as - ABEQ].scond;
- else
- pred = predinfo[rstart->prog->as - ABEQ].notscond;
-
- for (r = j->start; ; r = successor(r)) {
- if (r->prog->as == AB) {
- if (r != j->last || branch == Delbranch)
- excise(r);
- else {
- if (cond == Truecond)
- r->prog->as = predinfo[rstart->prog->as - ABEQ].opcode;
- else
- r->prog->as = predinfo[rstart->prog->as - ABEQ].notopcode;
- }
- }
- else if (predicable(r->prog))
- r->prog->scond = (r->prog->scond&~C_SCOND)|pred;
- if (r->s1 != r->link) {
- r->s1 = r->link;
- r->link->p1 = r;
- }
- if (r == j->last)
- break;
- }
-}
-
-void
-predicate(void)
-{
- Reg *r;
- int t1, t2;
- Joininfo j1, j2;
-
- for(r=firstr; r!=R; r=r->link) {
- if (isbranch(r->prog)) {
- t1 = joinsplit(r->s1, &j1);
- t2 = joinsplit(r->s2, &j2);
- if(j1.last->link != j2.start)
- continue;
- if(j1.end == j2.end)
- if((t1 == Branch && (t2 == Join || t2 == Setcond)) ||
- (t2 == Join && (t1 == Join || t1 == Setcond))) {
- applypred(r, &j1, Falsecond, Delbranch);
- applypred(r, &j2, Truecond, Delbranch);
- excise(r);
- continue;
- }
- if(t1 == End || t1 == Branch) {
- applypred(r, &j1, Falsecond, Keepbranch);
- excise(r);
- continue;
- }
- }
- }
-}
diff --git a/src/cmd/5c/reg.c b/src/cmd/5c/reg.c
deleted file mode 100644
index 9024d5f49..000000000
--- a/src/cmd/5c/reg.c
+++ /dev/null
@@ -1,1210 +0,0 @@
-// Inferno utils/5c/reg.c
-// http://code.google.com/p/inferno-os/source/browse/utils/5c/reg.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-
-#include "gc.h"
-
-void addsplits(void);
-
-Reg*
-rega(void)
-{
- Reg *r;
-
- r = freer;
- if(r == R) {
- r = alloc(sizeof(*r));
- } else
- freer = r->link;
-
- *r = zreg;
- return r;
-}
-
-int
-rcmp(const void *a1, const void *a2)
-{
- Rgn *p1, *p2;
- int c1, c2;
-
- p1 = (Rgn*)a1;
- p2 = (Rgn*)a2;
- c1 = p2->cost;
- c2 = p1->cost;
- if(c1 -= c2)
- return c1;
- return p2->varno - p1->varno;
-}
-
-void
-regopt(Prog *p)
-{
- Reg *r, *r1, *r2;
- Prog *p1;
- int i, z;
- int32 initpc, val, npc;
- uint32 vreg;
- Bits bit;
- struct
- {
- int32 m;
- int32 c;
- Reg* p;
- } log5[6], *lp;
-
- firstr = R;
- lastr = R;
- nvar = 0;
- regbits = 0;
- for(z=0; z<BITS; z++) {
- externs.b[z] = 0;
- params.b[z] = 0;
- consts.b[z] = 0;
- addrs.b[z] = 0;
- }
-
- /*
- * pass 1
- * build aux data structure
- * allocate pcs
- * find use and set of variables
- */
- val = 5L * 5L * 5L * 5L * 5L;
- lp = log5;
- for(i=0; i<5; i++) {
- lp->m = val;
- lp->c = 0;
- lp->p = R;
- val /= 5L;
- lp++;
- }
- val = 0;
- for(; p != P; p = p->link) {
- switch(p->as) {
- case ADATA:
- case AGLOBL:
- case ANAME:
- case ASIGNAME:
- case AFUNCDATA:
- continue;
- }
- r = rega();
- if(firstr == R) {
- firstr = r;
- lastr = r;
- } else {
- lastr->link = r;
- r->p1 = lastr;
- lastr->s1 = r;
- lastr = r;
- }
- r->prog = p;
- r->pc = val;
- val++;
-
- lp = log5;
- for(i=0; i<5; i++) {
- lp->c--;
- if(lp->c <= 0) {
- lp->c = lp->m;
- if(lp->p != R)
- lp->p->log5 = r;
- lp->p = r;
- (lp+1)->c = 0;
- break;
- }
- lp++;
- }
-
- r1 = r->p1;
- if(r1 != R)
- switch(r1->prog->as) {
- case ARET:
- case AB:
- case ARFE:
- r->p1 = R;
- r1->s1 = R;
- }
-
- /*
- * left side always read
- */
- bit = mkvar(&p->from, p->as==AMOVW);
- for(z=0; z<BITS; z++)
- r->use1.b[z] |= bit.b[z];
-
- /*
- * right side depends on opcode
- */
- bit = mkvar(&p->to, 0);
- if(bany(&bit))
- switch(p->as) {
- default:
- diag(Z, "reg: unknown asop: %A", p->as);
- break;
-
- /*
- * right side write
- */
- case ANOP:
- case AMOVB:
- case AMOVBS:
- case AMOVBU:
- case AMOVH:
- case AMOVHS:
- case AMOVHU:
- case AMOVW:
- case AMOVF:
- case AMOVD:
- for(z=0; z<BITS; z++)
- r->set.b[z] |= bit.b[z];
- break;
-
- /*
- * right side read
- */
- case APLD:
- for(z=0; z<BITS; z++)
- r->use2.b[z] |= bit.b[z];
- break;
-
- /*
- * funny
- */
- case ABL:
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- break;
- }
-
- /* the mod/div runtime routines smash R12 */
- switch(p->as) {
- case AMOD:
- case AMODU:
- case ADIV:
- case ADIVU:
- regbits |= RtoB(12);
- break;
- }
-
- if(p->as == AMOVM) {
- if(p->from.type == D_CONST)
- z = p->from.offset;
- else
- z = p->to.offset;
- for(i=0; z; i++) {
- if(z&1)
- regbits |= RtoB(i);
- z >>= 1;
- }
- }
- }
- if(firstr == R)
- return;
- initpc = pc - val;
- npc = val;
-
- /*
- * pass 2
- * turn branch references to pointers
- * build back pointers
- */
- for(r = firstr; r != R; r = r->link) {
- p = r->prog;
- if(p->to.type == D_BRANCH) {
- val = p->to.offset - initpc;
- r1 = firstr;
- while(r1 != R) {
- r2 = r1->log5;
- if(r2 != R && val >= r2->pc) {
- r1 = r2;
- continue;
- }
- if(r1->pc == val)
- break;
- r1 = r1->link;
- }
- if(r1 == R) {
- nearln = p->lineno;
- diag(Z, "ref not found\n%P", p);
- continue;
- }
- if(r1 == r) {
- nearln = p->lineno;
- diag(Z, "ref to self\n%P", p);
- continue;
- }
- r->s2 = r1;
- r->p2link = r1->p2;
- r1->p2 = r;
- }
- }
- if(debug['R']) {
- p = firstr->prog;
- print("\n%L %D\n", p->lineno, &p->from);
- }
-
- /*
- * pass 2.5
- * find looping structure
- */
- for(r = firstr; r != R; r = r->link)
- r->active = 0;
- change = 0;
- loopit(firstr, npc);
-
- /*
- * pass 3
- * iterate propagating usage
- * back until flow graph is complete
- */
-loop1:
- change = 0;
- for(r = firstr; r != R; r = r->link)
- r->active = 0;
- for(r = firstr; r != R; r = r->link)
- if(r->prog->as == ARET)
- prop(r, zbits, zbits);
-loop11:
- /* pick up unreachable code */
- i = 0;
- for(r = firstr; r != R; r = r1) {
- r1 = r->link;
- if(r1 && r1->active && !r->active) {
- prop(r, zbits, zbits);
- i = 1;
- }
- }
- if(i)
- goto loop11;
- if(change)
- goto loop1;
-
-
- /*
- * pass 4
- * iterate propagating register/variable synchrony
- * forward until graph is complete
- */
-loop2:
- change = 0;
- for(r = firstr; r != R; r = r->link)
- r->active = 0;
- synch(firstr, zbits);
- if(change)
- goto loop2;
-
- addsplits();
-
- if(debug['R'] && debug['v']) {
- print("\nprop structure:\n");
- for(r = firstr; r != R; r = r->link) {
- print("%d:%P", r->loop, r->prog);
- for(z=0; z<BITS; z++)
- bit.b[z] = r->set.b[z] |
- r->refahead.b[z] | r->calahead.b[z] |
- r->refbehind.b[z] | r->calbehind.b[z] |
- r->use1.b[z] | r->use2.b[z];
- if(bany(&bit)) {
- print("\t");
- if(bany(&r->use1))
- print(" u1=%B", r->use1);
- if(bany(&r->use2))
- print(" u2=%B", r->use2);
- if(bany(&r->set))
- print(" st=%B", r->set);
- if(bany(&r->refahead))
- print(" ra=%B", r->refahead);
- if(bany(&r->calahead))
- print(" ca=%B", r->calahead);
- if(bany(&r->refbehind))
- print(" rb=%B", r->refbehind);
- if(bany(&r->calbehind))
- print(" cb=%B", r->calbehind);
- }
- print("\n");
- }
- }
-
- /*
- * pass 5
- * isolate regions
- * calculate costs (paint1)
- */
- r = firstr;
- if(r) {
- for(z=0; z<BITS; z++)
- bit.b[z] = (r->refahead.b[z] | r->calahead.b[z]) &
- ~(externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z]);
- if(bany(&bit)) {
- nearln = r->prog->lineno;
- warn(Z, "used and not set: %B", bit);
- if(debug['R'] && !debug['w'])
- print("used and not set: %B\n", bit);
- }
- }
-
- for(r = firstr; r != R; r = r->link)
- r->act = zbits;
- rgp = region;
- nregion = 0;
- for(r = firstr; r != R; r = r->link) {
- for(z=0; z<BITS; z++)
- bit.b[z] = r->set.b[z] &
- ~(r->refahead.b[z] | r->calahead.b[z] | addrs.b[z]);
- if(bany(&bit)) {
- nearln = r->prog->lineno;
- warn(Z, "set and not used: %B", bit);
- if(debug['R'])
- print("set and not used: %B\n", bit);
- excise(r);
- }
- for(z=0; z<BITS; z++)
- bit.b[z] = LOAD(r) & ~(r->act.b[z] | addrs.b[z]);
- while(bany(&bit)) {
- i = bnum(bit);
- rgp->enter = r;
- rgp->varno = i;
- change = 0;
- if(debug['R'] && debug['v'])
- print("\n");
- paint1(r, i);
- bit.b[i/32] &= ~(1L<<(i%32));
- if(change <= 0) {
- if(debug['R'])
- print("%L $%d: %B\n",
- r->prog->lineno, change, blsh(i));
- continue;
- }
- rgp->cost = change;
- nregion++;
- if(nregion >= NRGN) {
- fatal(Z, "too many regions");
- goto brk;
- }
- rgp++;
- }
- }
-brk:
- qsort(region, nregion, sizeof(region[0]), rcmp);
-
- /*
- * pass 6
- * determine used registers (paint2)
- * replace code (paint3)
- */
- rgp = region;
- for(i=0; i<nregion; i++) {
- bit = blsh(rgp->varno);
- vreg = paint2(rgp->enter, rgp->varno);
- vreg = allreg(vreg, rgp);
- if(debug['R']) {
- if(rgp->regno >= NREG)
- print("%L $%d F%d: %B\n",
- rgp->enter->prog->lineno,
- rgp->cost,
- rgp->regno-NREG,
- bit);
- else
- print("%L $%d R%d: %B\n",
- rgp->enter->prog->lineno,
- rgp->cost,
- rgp->regno,
- bit);
- }
- if(rgp->regno != 0)
- paint3(rgp->enter, rgp->varno, vreg, rgp->regno);
- rgp++;
- }
- /*
- * pass 7
- * peep-hole on basic block
- */
- if(!debug['R'] || debug['P'])
- peep();
-
- /*
- * pass 8
- * recalculate pc
- */
- val = initpc;
- for(r = firstr; r != R; r = r1) {
- r->pc = val;
- p = r->prog;
- p1 = P;
- r1 = r->link;
- if(r1 != R)
- p1 = r1->prog;
- for(; p != p1; p = p->link) {
- switch(p->as) {
- default:
- val++;
- break;
-
- case ANOP:
- case ADATA:
- case AGLOBL:
- case ANAME:
- case ASIGNAME:
- case AFUNCDATA:
- break;
- }
- }
- }
- pc = val;
-
- /*
- * fix up branches
- */
- if(debug['R'])
- if(bany(&addrs))
- print("addrs: %B\n", addrs);
-
- r1 = 0; /* set */
- for(r = firstr; r != R; r = r->link) {
- p = r->prog;
- if(p->to.type == D_BRANCH) {
- p->to.offset = r->s2->pc;
- p->to.u.branch = r->s2->prog;
- }
- r1 = r;
- }
-
- /*
- * last pass
- * eliminate nops
- * free aux structures
- */
- for(p = firstr->prog; p != P; p = p->link){
- while(p->link && p->link->as == ANOP)
- p->link = p->link->link;
- }
- if(r1 != R) {
- r1->link = freer;
- freer = firstr;
- }
-}
-
-void
-addsplits(void)
-{
- Reg *r, *r1;
- int z, i;
- Bits bit;
-
- for(r = firstr; r != R; r = r->link) {
- if(r->loop > 1)
- continue;
- if(r->prog->as == ABL)
- continue;
- for(r1 = r->p2; r1 != R; r1 = r1->p2link) {
- if(r1->loop <= 1)
- continue;
- for(z=0; z<BITS; z++)
- bit.b[z] = r1->calbehind.b[z] &
- (r->refahead.b[z] | r->use1.b[z] | r->use2.b[z]) &
- ~(r->calahead.b[z] & addrs.b[z]);
- while(bany(&bit)) {
- i = bnum(bit);
- bit.b[i/32] &= ~(1L << (i%32));
- }
- }
- }
-}
-
-/*
- * add mov b,rn
- * just after r
- */
-void
-addmove(Reg *r, int bn, int rn, int f)
-{
- Prog *p, *p1;
- Addr *a;
- Var *v;
-
- p1 = alloc(sizeof(*p1));
- *p1 = zprog;
- p = r->prog;
-
- p1->link = p->link;
- p->link = p1;
- p1->lineno = p->lineno;
-
- v = var + bn;
-
- a = &p1->to;
- a->sym = v->sym;
- a->name = v->name;
- a->offset = v->offset;
- a->etype = v->etype;
- a->type = D_OREG;
- if(a->etype == TARRAY || a->sym == nil)
- a->type = D_CONST;
-
- p1->as = AMOVW;
- if(v->etype == TCHAR || v->etype == TUCHAR)
- p1->as = AMOVBS;
- if(v->etype == TSHORT || v->etype == TUSHORT)
- p1->as = AMOVHS;
- if(v->etype == TFLOAT)
- p1->as = AMOVF;
- if(v->etype == TDOUBLE)
- p1->as = AMOVD;
-
- p1->from.type = D_REG;
- p1->from.reg = rn;
- if(rn >= NREG) {
- p1->from.type = D_FREG;
- p1->from.reg = rn-NREG;
- }
- if(!f) {
- p1->from = *a;
- *a = zprog.from;
- a->type = D_REG;
- a->reg = rn;
- if(rn >= NREG) {
- a->type = D_FREG;
- a->reg = rn-NREG;
- }
- if(v->etype == TUCHAR)
- p1->as = AMOVBU;
- if(v->etype == TUSHORT)
- p1->as = AMOVHU;
- }
- if(debug['R'])
- print("%P\t.a%P\n", p, p1);
-}
-
-Bits
-mkvar(Addr *a, int docon)
-{
- Var *v;
- int i, t, n, et, z;
- int32 o;
- Bits bit;
- LSym *s;
-
- t = a->type;
- if(t == D_REG && a->reg != NREG)
- regbits |= RtoB(a->reg);
- if(t == D_FREG && a->reg != NREG)
- regbits |= FtoB(a->reg);
- s = a->sym;
- o = a->offset;
- et = a->etype;
- if(s == nil) {
- if(t != D_CONST || !docon || a->reg != NREG)
- goto none;
- et = TLONG;
- }
- if(t == D_CONST) {
- if(s == nil && sval(o))
- goto none;
- }
-
- n = a->name;
- v = var;
- for(i=0; i<nvar; i++) {
- if(s == v->sym)
- if(n == v->name)
- if(o == v->offset)
- goto out;
- v++;
- }
- if(s)
- if(s->name[0] == '.')
- goto none;
- if(nvar >= NVAR)
- fatal(Z, "variable not optimized: %s", s->name);
- i = nvar;
- nvar++;
- v = &var[i];
- v->sym = s;
- v->offset = o;
- v->etype = et;
- v->name = n;
- if(debug['R'])
- print("bit=%2d et=%2d %D\n", i, et, a);
-out:
- bit = blsh(i);
- if(n == D_EXTERN || n == D_STATIC)
- for(z=0; z<BITS; z++)
- externs.b[z] |= bit.b[z];
- if(n == D_PARAM)
- for(z=0; z<BITS; z++)
- params.b[z] |= bit.b[z];
- if(v->etype != et || !typechlpfd[et]) /* funny punning */
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- if(t == D_CONST) {
- if(s == nil) {
- for(z=0; z<BITS; z++)
- consts.b[z] |= bit.b[z];
- return bit;
- }
- if(et != TARRAY)
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- for(z=0; z<BITS; z++)
- params.b[z] |= bit.b[z];
- return bit;
- }
- if(t == D_OREG)
- return bit;
-
-none:
- return zbits;
-}
-
-void
-prop(Reg *r, Bits ref, Bits cal)
-{
- Reg *r1, *r2;
- int z;
-
- for(r1 = r; r1 != R; r1 = r1->p1) {
- for(z=0; z<BITS; z++) {
- ref.b[z] |= r1->refahead.b[z];
- if(ref.b[z] != r1->refahead.b[z]) {
- r1->refahead.b[z] = ref.b[z];
- change++;
- }
- cal.b[z] |= r1->calahead.b[z];
- if(cal.b[z] != r1->calahead.b[z]) {
- r1->calahead.b[z] = cal.b[z];
- change++;
- }
- }
- switch(r1->prog->as) {
- case ABL:
- for(z=0; z<BITS; z++) {
- cal.b[z] |= ref.b[z] | externs.b[z];
- ref.b[z] = 0;
- }
- break;
-
- case ATEXT:
- for(z=0; z<BITS; z++) {
- cal.b[z] = 0;
- ref.b[z] = 0;
- }
- break;
-
- case ARET:
- for(z=0; z<BITS; z++) {
- cal.b[z] = externs.b[z];
- ref.b[z] = 0;
- }
- }
- for(z=0; z<BITS; z++) {
- ref.b[z] = (ref.b[z] & ~r1->set.b[z]) |
- r1->use1.b[z] | r1->use2.b[z];
- cal.b[z] &= ~(r1->set.b[z] | r1->use1.b[z] | r1->use2.b[z]);
- r1->refbehind.b[z] = ref.b[z];
- r1->calbehind.b[z] = cal.b[z];
- }
- if(r1->active)
- break;
- r1->active = 1;
- }
- for(; r != r1; r = r->p1)
- for(r2 = r->p2; r2 != R; r2 = r2->p2link)
- prop(r2, r->refbehind, r->calbehind);
-}
-
-/*
- * find looping structure
- *
- * 1) find reverse postordering
- * 2) find approximate dominators,
- * the actual dominators if the flow graph is reducible
- * otherwise, dominators plus some other non-dominators.
- * See Matthew S. Hecht and Jeffrey D. Ullman,
- * "Analysis of a Simple Algorithm for Global Data Flow Problems",
- * Conf. Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
- * Oct. 1-3, 1973, pp. 207-217.
- * 3) find all nodes with a predecessor dominated by the current node.
- * such a node is a loop head.
- * recursively, all preds with a greater rpo number are in the loop
- */
-int32
-postorder(Reg *r, Reg **rpo2r, int32 n)
-{
- Reg *r1;
-
- r->rpo = 1;
- r1 = r->s1;
- if(r1 && !r1->rpo)
- n = postorder(r1, rpo2r, n);
- r1 = r->s2;
- if(r1 && !r1->rpo)
- n = postorder(r1, rpo2r, n);
- rpo2r[n] = r;
- n++;
- return n;
-}
-
-int32
-rpolca(int32 *idom, int32 rpo1, int32 rpo2)
-{
- int32 t;
-
- if(rpo1 == -1)
- return rpo2;
- while(rpo1 != rpo2){
- if(rpo1 > rpo2){
- t = rpo2;
- rpo2 = rpo1;
- rpo1 = t;
- }
- while(rpo1 < rpo2){
- t = idom[rpo2];
- if(t >= rpo2)
- fatal(Z, "bad idom");
- rpo2 = t;
- }
- }
- return rpo1;
-}
-
-int
-doms(int32 *idom, int32 r, int32 s)
-{
- while(s > r)
- s = idom[s];
- return s == r;
-}
-
-int
-loophead(int32 *idom, Reg *r)
-{
- int32 src;
-
- src = r->rpo;
- if(r->p1 != R && doms(idom, src, r->p1->rpo))
- return 1;
- for(r = r->p2; r != R; r = r->p2link)
- if(doms(idom, src, r->rpo))
- return 1;
- return 0;
-}
-
-void
-loopmark(Reg **rpo2r, int32 head, Reg *r)
-{
- if(r->rpo < head || r->active == head)
- return;
- r->active = head;
- r->loop += LOOP;
- if(r->p1 != R)
- loopmark(rpo2r, head, r->p1);
- for(r = r->p2; r != R; r = r->p2link)
- loopmark(rpo2r, head, r);
-}
-
-void
-loopit(Reg *r, int32 nr)
-{
- Reg *r1;
- int32 i, d, me;
-
- if(nr > maxnr) {
- rpo2r = alloc(nr * sizeof(Reg*));
- idom = alloc(nr * sizeof(int32));
- maxnr = nr;
- }
- d = postorder(r, rpo2r, 0);
- if(d > nr)
- fatal(Z, "too many reg nodes");
- nr = d;
- for(i = 0; i < nr / 2; i++){
- r1 = rpo2r[i];
- rpo2r[i] = rpo2r[nr - 1 - i];
- rpo2r[nr - 1 - i] = r1;
- }
- for(i = 0; i < nr; i++)
- rpo2r[i]->rpo = i;
-
- idom[0] = 0;
- for(i = 0; i < nr; i++){
- r1 = rpo2r[i];
- me = r1->rpo;
- d = -1;
- if(r1->p1 != R && r1->p1->rpo < me)
- d = r1->p1->rpo;
- for(r1 = r1->p2; r1 != nil; r1 = r1->p2link)
- if(r1->rpo < me)
- d = rpolca(idom, d, r1->rpo);
- idom[i] = d;
- }
-
- for(i = 0; i < nr; i++){
- r1 = rpo2r[i];
- r1->loop++;
- if(r1->p2 != R && loophead(idom, r1))
- loopmark(rpo2r, i, r1);
- }
-}
-
-void
-synch(Reg *r, Bits dif)
-{
- Reg *r1;
- int z;
-
- for(r1 = r; r1 != R; r1 = r1->s1) {
- for(z=0; z<BITS; z++) {
- dif.b[z] = (dif.b[z] &
- ~(~r1->refbehind.b[z] & r1->refahead.b[z])) |
- r1->set.b[z] | r1->regdiff.b[z];
- if(dif.b[z] != r1->regdiff.b[z]) {
- r1->regdiff.b[z] = dif.b[z];
- change++;
- }
- }
- if(r1->active)
- break;
- r1->active = 1;
- for(z=0; z<BITS; z++)
- dif.b[z] &= ~(~r1->calbehind.b[z] & r1->calahead.b[z]);
- if(r1->s2 != R)
- synch(r1->s2, dif);
- }
-}
-
-uint32
-allreg(uint32 b, Rgn *r)
-{
- Var *v;
- int i;
-
- v = var + r->varno;
- r->regno = 0;
- switch(v->etype) {
-
- default:
- diag(Z, "unknown etype %d/%d", bitno(b), v->etype);
- break;
-
- case TCHAR:
- case TUCHAR:
- case TSHORT:
- case TUSHORT:
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- case TARRAY:
- i = BtoR(~b);
- if(i && r->cost >= 0) {
- r->regno = i;
- return RtoB(i);
- }
- break;
-
- case TVLONG:
- case TDOUBLE:
- case TFLOAT:
- i = BtoF(~b);
- if(i && r->cost >= 0) {
- r->regno = i+NREG;
- return FtoB(i);
- }
- break;
- }
- return 0;
-}
-
-void
-paint1(Reg *r, int bn)
-{
- Reg *r1;
- Prog *p;
- int z;
- uint32 bb;
-
- z = bn/32;
- bb = 1L<<(bn%32);
- if(r->act.b[z] & bb)
- return;
- for(;;) {
- if(!(r->refbehind.b[z] & bb))
- break;
- r1 = r->p1;
- if(r1 == R)
- break;
- if(!(r1->refahead.b[z] & bb))
- break;
- if(r1->act.b[z] & bb)
- break;
- r = r1;
- }
-
- if(LOAD(r) & ~(r->set.b[z] & ~(r->use1.b[z]|r->use2.b[z])) & bb) {
- change -= CLOAD * r->loop;
- if(debug['R'] && debug['v'])
- print("%d%P\td %B $%d\n", r->loop,
- r->prog, blsh(bn), change);
- }
- for(;;) {
- r->act.b[z] |= bb;
- p = r->prog;
-
- if(r->use1.b[z] & bb) {
- change += CREF * r->loop;
- if(debug['R'] && debug['v'])
- print("%d%P\tu1 %B $%d\n", r->loop,
- p, blsh(bn), change);
- }
-
- if((r->use2.b[z]|r->set.b[z]) & bb) {
- change += CREF * r->loop;
- if(debug['R'] && debug['v'])
- print("%d%P\tu2 %B $%d\n", r->loop,
- p, blsh(bn), change);
- }
-
- if(STORE(r) & r->regdiff.b[z] & bb) {
- change -= CLOAD * r->loop;
- if(debug['R'] && debug['v'])
- print("%d%P\tst %B $%d\n", r->loop,
- p, blsh(bn), change);
- }
-
- if(r->refbehind.b[z] & bb)
- for(r1 = r->p2; r1 != R; r1 = r1->p2link)
- if(r1->refahead.b[z] & bb)
- paint1(r1, bn);
-
- if(!(r->refahead.b[z] & bb))
- break;
- r1 = r->s2;
- if(r1 != R)
- if(r1->refbehind.b[z] & bb)
- paint1(r1, bn);
- r = r->s1;
- if(r == R)
- break;
- if(r->act.b[z] & bb)
- break;
- if(!(r->refbehind.b[z] & bb))
- break;
- }
-}
-
-uint32
-paint2(Reg *r, int bn)
-{
- Reg *r1;
- int z;
- uint32 bb, vreg;
-
- z = bn/32;
- bb = 1L << (bn%32);
- vreg = regbits;
- if(!(r->act.b[z] & bb))
- return vreg;
- for(;;) {
- if(!(r->refbehind.b[z] & bb))
- break;
- r1 = r->p1;
- if(r1 == R)
- break;
- if(!(r1->refahead.b[z] & bb))
- break;
- if(!(r1->act.b[z] & bb))
- break;
- r = r1;
- }
- for(;;) {
- r->act.b[z] &= ~bb;
-
- vreg |= r->regu;
-
- if(r->refbehind.b[z] & bb)
- for(r1 = r->p2; r1 != R; r1 = r1->p2link)
- if(r1->refahead.b[z] & bb)
- vreg |= paint2(r1, bn);
-
- if(!(r->refahead.b[z] & bb))
- break;
- r1 = r->s2;
- if(r1 != R)
- if(r1->refbehind.b[z] & bb)
- vreg |= paint2(r1, bn);
- r = r->s1;
- if(r == R)
- break;
- if(!(r->act.b[z] & bb))
- break;
- if(!(r->refbehind.b[z] & bb))
- break;
- }
- return vreg;
-}
-
-void
-paint3(Reg *r, int bn, int32 rb, int rn)
-{
- Reg *r1;
- Prog *p;
- int z;
- uint32 bb;
-
- z = bn/32;
- bb = 1L << (bn%32);
- if(r->act.b[z] & bb)
- return;
- for(;;) {
- if(!(r->refbehind.b[z] & bb))
- break;
- r1 = r->p1;
- if(r1 == R)
- break;
- if(!(r1->refahead.b[z] & bb))
- break;
- if(r1->act.b[z] & bb)
- break;
- r = r1;
- }
-
- if(LOAD(r) & ~(r->set.b[z] & ~(r->use1.b[z]|r->use2.b[z])) & bb)
- addmove(r, bn, rn, 0);
- for(;;) {
- r->act.b[z] |= bb;
- p = r->prog;
-
- if(r->use1.b[z] & bb) {
- if(debug['R'])
- print("%P", p);
- addreg(&p->from, rn);
- if(debug['R'])
- print("\t.c%P\n", p);
- }
- if((r->use2.b[z]|r->set.b[z]) & bb) {
- if(debug['R'])
- print("%P", p);
- addreg(&p->to, rn);
- if(debug['R'])
- print("\t.c%P\n", p);
- }
-
- if(STORE(r) & r->regdiff.b[z] & bb)
- addmove(r, bn, rn, 1);
- r->regu |= rb;
-
- if(r->refbehind.b[z] & bb)
- for(r1 = r->p2; r1 != R; r1 = r1->p2link)
- if(r1->refahead.b[z] & bb)
- paint3(r1, bn, rb, rn);
-
- if(!(r->refahead.b[z] & bb))
- break;
- r1 = r->s2;
- if(r1 != R)
- if(r1->refbehind.b[z] & bb)
- paint3(r1, bn, rb, rn);
- r = r->s1;
- if(r == R)
- break;
- if(r->act.b[z] & bb)
- break;
- if(!(r->refbehind.b[z] & bb))
- break;
- }
-}
-
-void
-addreg(Addr *a, int rn)
-{
-
- a->sym = 0;
- a->name = D_NONE;
- a->type = D_REG;
- a->reg = rn;
- if(rn >= NREG) {
- a->type = D_FREG;
- a->reg = rn-NREG;
- }
-}
-
-/*
- * bit reg
- * 0 R0
- * 1 R1
- * ... ...
- * 10 R10
- * 12 R12
- */
-int32
-RtoB(int r)
-{
-
- if(r < 2 || (r >= REGTMP-2 && r != 12)) // excluded R9 and R10 for m and g, but not R12
- return 0;
- return 1L << r;
-}
-
-int
-BtoR(int32 b)
-{
- b &= 0x11fcL; // excluded R9 and R10 for m and g, but not R12
- if(b == 0)
- return 0;
- return bitno(b);
-}
-
-/*
- * bit reg
- * 18 F2
- * 19 F3
- * ... ...
- * 31 F15
- */
-int32
-FtoB(int f)
-{
-
- if(f < 2 || f > NFREG-1)
- return 0;
- return 1L << (f + 16);
-}
-
-int
-BtoF(int32 b)
-{
-
- b &= 0xfffc0000L;
- if(b == 0)
- return 0;
- return bitno(b) - 16;
-}
diff --git a/src/cmd/5c/sgen.c b/src/cmd/5c/sgen.c
deleted file mode 100644
index a36612caa..000000000
--- a/src/cmd/5c/sgen.c
+++ /dev/null
@@ -1,265 +0,0 @@
-// Inferno utils/5c/sgen.c
-// http://code.google.com/p/inferno-os/source/browse/utils/5c/sgen.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-
-#include "gc.h"
-
-Prog*
-gtext(Sym *s, int32 stkoff)
-{
- int32 a;
-
- a = argsize(1);
- if((textflag & NOSPLIT) != 0 && stkoff >= 128)
- yyerror("stack frame too large for NOSPLIT function");
-
- gpseudo(ATEXT, s, nodconst(stkoff));
- p->to.type = D_CONST2;
- p->to.offset2 = a;
- return p;
-}
-
-void
-noretval(int n)
-{
-
- if(n & 1) {
- gins(ANOP, Z, Z);
- p->to.type = D_REG;
- p->to.reg = REGRET;
- }
- if(n & 2) {
- gins(ANOP, Z, Z);
- p->to.type = D_FREG;
- p->to.reg = FREGRET;
- }
-}
-
-/*
- * calculate addressability as follows
- * CONST ==> 20 $value
- * NAME ==> 10 name
- * REGISTER ==> 11 register
- * INDREG ==> 12 *[(reg)+offset]
- * &10 ==> 2 $name
- * ADD(2, 20) ==> 2 $name+offset
- * ADD(3, 20) ==> 3 $(reg)+offset
- * &12 ==> 3 $(reg)+offset
- * *11 ==> 11 ??
- * *2 ==> 10 name
- * *3 ==> 12 *(reg)+offset
- * calculate complexity (number of registers)
- */
-void
-xcom(Node *n)
-{
- Node *l, *r;
- int t;
-
- if(n == Z)
- return;
- l = n->left;
- r = n->right;
- n->addable = 0;
- n->complex = 0;
- switch(n->op) {
- case OCONST:
- n->addable = 20;
- return;
-
- case OREGISTER:
- n->addable = 11;
- return;
-
- case OINDREG:
- n->addable = 12;
- return;
-
- case ONAME:
- n->addable = 10;
- return;
-
- case OADDR:
- xcom(l);
- if(l->addable == 10)
- n->addable = 2;
- if(l->addable == 12)
- n->addable = 3;
- break;
-
- case OIND:
- xcom(l);
- if(l->addable == 11)
- n->addable = 12;
- if(l->addable == 3)
- n->addable = 12;
- if(l->addable == 2)
- n->addable = 10;
- break;
-
- case OADD:
- xcom(l);
- xcom(r);
- if(l->addable == 20) {
- if(r->addable == 2)
- n->addable = 2;
- if(r->addable == 3)
- n->addable = 3;
- }
- if(r->addable == 20) {
- if(l->addable == 2)
- n->addable = 2;
- if(l->addable == 3)
- n->addable = 3;
- }
- break;
-
- case OASLMUL:
- case OASMUL:
- xcom(l);
- xcom(r);
- t = vlog(r);
- if(t >= 0) {
- n->op = OASASHL;
- r->vconst = t;
- r->type = types[TINT];
- }
- break;
-
- case OMUL:
- case OLMUL:
- xcom(l);
- xcom(r);
- t = vlog(r);
- if(t >= 0) {
- n->op = OASHL;
- r->vconst = t;
- r->type = types[TINT];
- }
- t = vlog(l);
- if(t >= 0) {
- n->op = OASHL;
- n->left = r;
- n->right = l;
- r = l;
- l = n->left;
- r->vconst = t;
- r->type = types[TINT];
- }
- break;
-
- case OASLDIV:
- xcom(l);
- xcom(r);
- t = vlog(r);
- if(t >= 0) {
- n->op = OASLSHR;
- r->vconst = t;
- r->type = types[TINT];
- }
- break;
-
- case OLDIV:
- xcom(l);
- xcom(r);
- t = vlog(r);
- if(t >= 0) {
- n->op = OLSHR;
- r->vconst = t;
- r->type = types[TINT];
- }
- break;
-
- case OASLMOD:
- xcom(l);
- xcom(r);
- t = vlog(r);
- if(t >= 0) {
- n->op = OASAND;
- r->vconst--;
- }
- break;
-
- case OLMOD:
- xcom(l);
- xcom(r);
- t = vlog(r);
- if(t >= 0) {
- n->op = OAND;
- r->vconst--;
- }
- break;
-
- default:
- if(l != Z)
- xcom(l);
- if(r != Z)
- xcom(r);
- break;
- }
- if(n->addable >= 10)
- return;
-
- if(l != Z)
- n->complex = l->complex;
- if(r != Z) {
- if(r->complex == n->complex)
- n->complex = r->complex+1;
- else
- if(r->complex > n->complex)
- n->complex = r->complex;
- }
- if(n->complex == 0)
- n->complex++;
-
- if(com64(n))
- return;
-
- switch(n->op) {
- case OFUNC:
- n->complex = FNX;
- break;
-
- case OADD:
- case OXOR:
- case OAND:
- case OOR:
- case OEQ:
- case ONE:
- /*
- * immediate operators, make const on right
- */
- if(l->op == OCONST) {
- n->left = r;
- n->right = l;
- }
- break;
- }
-}
diff --git a/src/cmd/5c/swt.c b/src/cmd/5c/swt.c
deleted file mode 100644
index f39963b8f..000000000
--- a/src/cmd/5c/swt.c
+++ /dev/null
@@ -1,461 +0,0 @@
-// Inferno utils/5c/swt.c
-// http://code.google.com/p/inferno-os/source/browse/utils/5c/swt.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-void
-swit1(C1 *q, int nc, int32 def, Node *n)
-{
- Node nreg;
-
- if(typev[n->type->etype]) {
- regsalloc(&nreg, n);
- nreg.type = types[TVLONG];
- cgen(n, &nreg);
- swit2(q, nc, def, &nreg);
- return;
- }
-
- regalloc(&nreg, n, Z);
- nreg.type = types[TLONG];
- cgen(n, &nreg);
- swit2(q, nc, def, &nreg);
- regfree(&nreg);
-}
-
-void
-swit2(C1 *q, int nc, int32 def, Node *n)
-{
- C1 *r;
- int i;
- int32 v;
- Prog *sp;
-
- if(nc >= 3) {
- i = (q+nc-1)->val - (q+0)->val;
- if(!nacl && i > 0 && i < nc*2)
- goto direct;
- }
- if(nc < 5) {
- for(i=0; i<nc; i++) {
- if(debug['W'])
- print("case = %.8ux\n", q->val);
- gopcode(OEQ, nodconst(q->val), n, Z);
- patch(p, q->label);
- q++;
- }
- gbranch(OGOTO);
- patch(p, def);
- return;
- }
-
- i = nc / 2;
- r = q+i;
- if(debug['W'])
- print("case > %.8ux\n", r->val);
- gopcode(OGT, nodconst(r->val), n, Z);
- sp = p;
- gopcode(OEQ, nodconst(r->val), n, Z); /* just gen the B.EQ */
- patch(p, r->label);
- swit2(q, i, def, n);
-
- if(debug['W'])
- print("case < %.8ux\n", r->val);
- patch(sp, pc);
- swit2(r+1, nc-i-1, def, n);
- return;
-
-direct:
- v = q->val;
- if(v != 0)
- gopcode(OSUB, nodconst(v), Z, n);
- gopcode(OCASE, nodconst((q+nc-1)->val - v), n, Z);
- patch(p, def);
- for(i=0; i<nc; i++) {
- if(debug['W'])
- print("case = %.8ux\n", q->val);
- while(q->val != v) {
- nextpc();
- p->as = ABCASE;
- patch(p, def);
- v++;
- }
- nextpc();
- p->as = ABCASE;
- patch(p, q->label);
- q++;
- v++;
- }
- gbranch(OGOTO); /* so that regopt() won't be confused */
- patch(p, def);
-}
-
-void
-bitload(Node *b, Node *n1, Node *n2, Node *n3, Node *nn)
-{
- int sh;
- int32 v;
- Node *l;
-
- /*
- * n1 gets adjusted/masked value
- * n2 gets address of cell
- * n3 gets contents of cell
- */
- l = b->left;
- if(n2 != Z) {
- regalloc(n1, l, nn);
- reglcgen(n2, l, Z);
- regalloc(n3, l, Z);
- gopcode(OAS, n2, Z, n3);
- gopcode(OAS, n3, Z, n1);
- } else {
- regalloc(n1, l, nn);
- cgen(l, n1);
- }
- if(b->type->shift == 0 && typeu[b->type->etype]) {
- v = ~0 + (1L << b->type->nbits);
- gopcode(OAND, nodconst(v), Z, n1);
- } else {
- sh = 32 - b->type->shift - b->type->nbits;
- if(sh > 0)
- gopcode(OASHL, nodconst(sh), Z, n1);
- sh += b->type->shift;
- if(sh > 0)
- if(typeu[b->type->etype])
- gopcode(OLSHR, nodconst(sh), Z, n1);
- else
- gopcode(OASHR, nodconst(sh), Z, n1);
- }
-}
-
-void
-bitstore(Node *b, Node *n1, Node *n2, Node *n3, Node *nn)
-{
- int32 v;
- Node nod, *l;
- int sh;
-
- /*
- * n1 has adjusted/masked value
- * n2 has address of cell
- * n3 has contents of cell
- */
- l = b->left;
- regalloc(&nod, l, Z);
- v = ~0 + (1L << b->type->nbits);
- gopcode(OAND, nodconst(v), Z, n1);
- gopcode(OAS, n1, Z, &nod);
- if(nn != Z)
- gopcode(OAS, n1, Z, nn);
- sh = b->type->shift;
- if(sh > 0)
- gopcode(OASHL, nodconst(sh), Z, &nod);
- v <<= sh;
- gopcode(OAND, nodconst(~v), Z, n3);
- gopcode(OOR, n3, Z, &nod);
- gopcode(OAS, &nod, Z, n2);
-
- regfree(&nod);
- regfree(n1);
- regfree(n2);
- regfree(n3);
-}
-
-int32
-outstring(char *s, int32 n)
-{
- int32 r;
-
- if(suppress)
- return nstring;
- r = nstring;
- while(n) {
- string[mnstring] = *s++;
- mnstring++;
- nstring++;
- if(mnstring >= NSNAME) {
- gpseudo(ADATA, symstring, nodconst(0L));
- p->from.offset += nstring - NSNAME;
- p->reg = NSNAME;
- p->to.type = D_SCONST;
- memmove(p->to.u.sval, string, NSNAME);
- mnstring = 0;
- }
- n--;
- }
- return r;
-}
-
-int
-mulcon(Node *n, Node *nn)
-{
- Node *l, *r, nod1, nod2;
- Multab *m;
- int32 v, vs;
- int o;
- char code[sizeof(m->code)+2], *p;
-
- if(typefd[n->type->etype])
- return 0;
- l = n->left;
- r = n->right;
- if(l->op == OCONST) {
- l = r;
- r = n->left;
- }
- if(r->op != OCONST)
- return 0;
- v = convvtox(r->vconst, n->type->etype);
- if(v != r->vconst) {
- if(debug['M'])
- print("%L multiply conv: %lld\n", n->lineno, r->vconst);
- return 0;
- }
- m = mulcon0(v);
- if(!m) {
- if(debug['M'])
- print("%L multiply table: %lld\n", n->lineno, r->vconst);
- return 0;
- }
- if(debug['M'] && debug['v'])
- print("%L multiply: %d\n", n->lineno, v);
-
- memmove(code, m->code, sizeof(m->code));
- code[sizeof(m->code)] = 0;
-
- p = code;
- if(p[1] == 'i')
- p += 2;
- regalloc(&nod1, n, nn);
- cgen(l, &nod1);
- vs = v;
- regalloc(&nod2, n, Z);
-
-loop:
- switch(*p) {
- case 0:
- regfree(&nod2);
- if(vs < 0) {
- gopcode(OAS, &nod1, Z, &nod1);
- gopcode(OSUB, &nod1, nodconst(0), nn);
- } else
- gopcode(OAS, &nod1, Z, nn);
- regfree(&nod1);
- return 1;
- case '+':
- o = OADD;
- goto addsub;
- case '-':
- o = OSUB;
- addsub: /* number is r,n,l */
- v = p[1] - '0';
- r = &nod1;
- if(v&4)
- r = &nod2;
- n = &nod1;
- if(v&2)
- n = &nod2;
- l = &nod1;
- if(v&1)
- l = &nod2;
- gopcode(o, l, n, r);
- break;
- default: /* op is shiftcount, number is r,l */
- v = p[1] - '0';
- r = &nod1;
- if(v&2)
- r = &nod2;
- l = &nod1;
- if(v&1)
- l = &nod2;
- v = *p - 'a';
- if(v < 0 || v >= 32) {
- diag(n, "mulcon unknown op: %c%c", p[0], p[1]);
- break;
- }
- gopcode(OASHL, nodconst(v), l, r);
- break;
- }
- p += 2;
- goto loop;
-}
-
-void
-sextern(Sym *s, Node *a, int32 o, int32 w)
-{
- int32 e, lw;
-
- for(e=0; e<w; e+=NSNAME) {
- lw = NSNAME;
- if(w-e < lw)
- lw = w-e;
- gpseudo(ADATA, s, nodconst(0));
- p->from.offset += o+e;
- p->reg = lw;
- p->to.type = D_SCONST;
- memmove(p->to.u.sval, a->cstring+e, lw);
- }
-}
-
-void
-gextern(Sym *s, Node *a, int32 o, int32 w)
-{
-
- if(a->op == OCONST && typev[a->type->etype]) {
- if(isbigendian)
- gpseudo(ADATA, s, nod32const(a->vconst>>32));
- else
- gpseudo(ADATA, s, nod32const(a->vconst));
- p->from.offset += o;
- p->reg = 4;
- if(isbigendian)
- gpseudo(ADATA, s, nod32const(a->vconst));
- else
- gpseudo(ADATA, s, nod32const(a->vconst>>32));
- p->from.offset += o + 4;
- p->reg = 4;
- return;
- }
- gpseudo(ADATA, s, a);
- p->from.offset += o;
- p->reg = w;
- if(p->to.type == D_OREG)
- p->to.type = D_CONST;
-}
-
-void
-outcode(void)
-{
- Bprint(&outbuf, "go object %s %s %s\n", getgoos(), getgoarch(), getgoversion());
- if(pragcgobuf.to > pragcgobuf.start) {
- Bprint(&outbuf, "\n");
- Bprint(&outbuf, "$$ // exports\n\n");
- Bprint(&outbuf, "$$ // local types\n\n");
- Bprint(&outbuf, "$$ // cgo\n");
- Bprint(&outbuf, "%s", fmtstrflush(&pragcgobuf));
- Bprint(&outbuf, "\n$$\n\n");
- }
- Bprint(&outbuf, "!\n");
-
- writeobj(ctxt, &outbuf);
- lastp = P;
-}
-
-int32
-align(int32 i, Type *t, int op, int32 *maxalign)
-{
- int32 o;
- Type *v;
- int w, packw;
-
- o = i;
- w = 1;
- packw = 0;
- switch(op) {
- default:
- diag(Z, "unknown align opcode %d", op);
- break;
-
- case Asu2: /* padding at end of a struct */
- w = *maxalign;
- if(w < 1)
- w = 1;
- if(packflg)
- packw = packflg;
- break;
-
- case Ael1: /* initial align of struct element */
- for(v=t; v->etype==TARRAY; v=v->link)
- ;
- if(v->etype == TSTRUCT || v->etype == TUNION)
- w = v->align;
- else {
- w = ewidth[v->etype];
- if(w == 8)
- w = 4;
- }
- if(w < 1 || w > SZ_LONG)
- fatal(Z, "align");
- if(packflg)
- packw = packflg;
- break;
-
- case Ael2: /* width of a struct element */
- o += t->width;
- break;
-
- case Aarg0: /* initial passbyptr argument in arg list */
- if(typesuv[t->etype]) {
- o = align(o, types[TIND], Aarg1, nil);
- o = align(o, types[TIND], Aarg2, nil);
- }
- break;
-
- case Aarg1: /* initial align of parameter */
- w = ewidth[t->etype];
- if(w <= 0 || w >= SZ_LONG) {
- w = SZ_LONG;
- break;
- }
- w = 1; /* little endian no adjustment */
- break;
-
- case Aarg2: /* width of a parameter */
- o += t->width;
- w = t->width;
- if(w > SZ_LONG)
- w = SZ_LONG;
- break;
-
- case Aaut3: /* total align of automatic */
- o = align(o, t, Ael2, nil);
- o = align(o, t, Ael1, nil);
- w = SZ_LONG; /* because of a pun in cc/dcl.c:contig() */
- break;
- }
- if(packw != 0 && xround(o, w) != xround(o, packw))
- diag(Z, "#pragma pack changes offset of %T", t);
- o = xround(o, w);
- if(maxalign != nil && *maxalign < w)
- *maxalign = w;
- if(debug['A'])
- print("align %s %d %T = %d\n", bnames[op], i, t, o);
- return o;
-}
-
-int32
-maxround(int32 max, int32 v)
-{
- v = xround(v, SZ_LONG);
- if(v > max)
- return v;
- return max;
-}
diff --git a/src/cmd/5c/txt.c b/src/cmd/5c/txt.c
deleted file mode 100644
index af40220cc..000000000
--- a/src/cmd/5c/txt.c
+++ /dev/null
@@ -1,1361 +0,0 @@
-// Inferno utils/5c/txt.c
-// http://code.google.com/p/inferno-os/source/browse/utils/5c/txt.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-
-#include "gc.h"
-
-
-int thechar = '5';
-char *thestring = "arm";
-
-LinkArch *thelinkarch = &linkarm;
-
-void
-linkarchinit(void)
-{
-}
-
-void
-ginit(void)
-{
- Type *t;
-
- exregoffset = REGEXT;
- exfregoffset = FREGEXT;
- listinit();
- nstring = 0;
- mnstring = 0;
- nrathole = 0;
- pc = 0;
- breakpc = -1;
- continpc = -1;
- cases = C;
- lastp = P;
- tfield = types[TLONG];
-
- zprog.link = P;
- zprog.as = AGOK;
- zprog.reg = NREG;
- zprog.from.type = D_NONE;
- zprog.from.name = D_NONE;
- zprog.from.reg = NREG;
- zprog.to = zprog.from;
- zprog.scond = 0xE;
-
- regnode.op = OREGISTER;
- regnode.class = CEXREG;
- regnode.reg = REGTMP;
- regnode.complex = 0;
- regnode.addable = 11;
- regnode.type = types[TLONG];
-
- constnode.op = OCONST;
- constnode.class = CXXX;
- constnode.complex = 0;
- constnode.addable = 20;
- constnode.type = types[TLONG];
-
- fconstnode.op = OCONST;
- fconstnode.class = CXXX;
- fconstnode.complex = 0;
- fconstnode.addable = 20;
- fconstnode.type = types[TDOUBLE];
-
- nodsafe = new(ONAME, Z, Z);
- nodsafe->sym = slookup(".safe");
- nodsafe->type = types[TINT];
- nodsafe->etype = types[TINT]->etype;
- nodsafe->class = CAUTO;
- complex(nodsafe);
-
- t = typ(TARRAY, types[TCHAR]);
- symrathole = slookup(".rathole");
- symrathole->class = CGLOBL;
- symrathole->type = t;
-
- nodrat = new(ONAME, Z, Z);
- nodrat->sym = symrathole;
- nodrat->type = types[TIND];
- nodrat->etype = TVOID;
- nodrat->class = CGLOBL;
- complex(nodrat);
- nodrat->type = t;
-
- nodret = new(ONAME, Z, Z);
- nodret->sym = slookup(".ret");
- nodret->type = types[TIND];
- nodret->etype = TIND;
- nodret->class = CPARAM;
- nodret = new(OIND, nodret, Z);
- complex(nodret);
-
- com64init();
-
- memset(reg, 0, sizeof(reg));
-}
-
-void
-gclean(void)
-{
- int i;
- Sym *s;
-
- for(i=0; i<NREG; i++)
- if(reg[i])
- diag(Z, "reg %d left allocated", i);
- for(i=NREG; i<NREG+NFREG; i++)
- if(reg[i])
- diag(Z, "freg %d left allocated", i-NREG);
- while(mnstring)
- outstring("", 1L);
- symstring->type->width = nstring;
- symrathole->type->width = nrathole;
- for(i=0; i<NHASH; i++)
- for(s = hash[i]; s != S; s = s->link) {
- if(s->type == T)
- continue;
- if(s->type->width == 0)
- continue;
- if(s->class != CGLOBL && s->class != CSTATIC)
- continue;
- if(s->type == types[TENUM])
- continue;
- gpseudo(AGLOBL, s, nodconst(s->type->width));
- }
- nextpc();
- p->as = AEND;
- outcode();
-}
-
-void
-nextpc(void)
-{
- Plist *pl;
-
- p = alloc(sizeof(*p));
- *p = zprog;
- p->lineno = nearln;
- pc++;
- if(lastp == nil) {
- pl = linknewplist(ctxt);
- pl->firstpc = p;
- } else
- lastp->link = p;
- lastp = p;
-}
-
-void
-gargs(Node *n, Node *tn1, Node *tn2)
-{
- int32 regs;
- Node fnxargs[20], *fnxp;
-
- regs = cursafe;
-
- fnxp = fnxargs;
- garg1(n, tn1, tn2, 0, &fnxp); /* compile fns to temps */
-
- curarg = 0;
- fnxp = fnxargs;
- garg1(n, tn1, tn2, 1, &fnxp); /* compile normal args and temps */
-
- cursafe = regs;
-}
-
-void
-garg1(Node *n, Node *tn1, Node *tn2, int f, Node **fnxp)
-{
- Node nod;
-
- if(n == Z)
- return;
- if(n->op == OLIST) {
- garg1(n->left, tn1, tn2, f, fnxp);
- garg1(n->right, tn1, tn2, f, fnxp);
- return;
- }
- if(f == 0) {
- if(n->complex >= FNX) {
- regsalloc(*fnxp, n);
- nod = znode;
- nod.op = OAS;
- nod.left = *fnxp;
- nod.right = n;
- nod.type = n->type;
- cgen(&nod, Z);
- (*fnxp)++;
- }
- return;
- }
- if(typesuv[n->type->etype]) {
- regaalloc(tn2, n);
- if(n->complex >= FNX) {
- sugen(*fnxp, tn2, n->type->width);
- (*fnxp)++;
- } else
- sugen(n, tn2, n->type->width);
- return;
- }
- if(REGARG >= 0 && curarg == 0 && typechlp[n->type->etype]) {
- regaalloc1(tn1, n);
- if(n->complex >= FNX) {
- cgen(*fnxp, tn1);
- (*fnxp)++;
- } else
- cgen(n, tn1);
- return;
- }
- regalloc(tn1, n, Z);
- if(n->complex >= FNX) {
- cgen(*fnxp, tn1);
- (*fnxp)++;
- } else
- cgen(n, tn1);
- regaalloc(tn2, n);
- gopcode(OAS, tn1, Z, tn2);
- regfree(tn1);
-}
-
-Node*
-nodconst(int32 v)
-{
- constnode.vconst = v;
- return &constnode;
-}
-
-Node*
-nod32const(vlong v)
-{
- constnode.vconst = v & MASK(32);
- return &constnode;
-}
-
-Node*
-nodfconst(double d)
-{
- fconstnode.fconst = d;
- return &fconstnode;
-}
-
-void
-nodreg(Node *n, Node *nn, int reg)
-{
- *n = regnode;
- n->reg = reg;
- n->type = nn->type;
- n->lineno = nn->lineno;
-}
-
-void
-regret(Node *n, Node *nn, Type *t, int mode)
-{
- int r;
-
- if(mode == 0 || hasdotdotdot(t) || nn->type->width == 0) {
- r = REGRET;
- if(typefd[nn->type->etype])
- r = FREGRET+NREG;
- nodreg(n, nn, r);
- reg[r]++;
- return;
- }
-
- if(mode == 1) {
- // fetch returned value after call.
- // already called gargs, so curarg is set.
- curarg = (curarg+3) & ~3;
- regaalloc(n, nn);
- return;
- }
-
- if(mode == 2) {
- // store value to be returned.
- // must compute arg offset.
- if(t->etype != TFUNC)
- fatal(Z, "bad regret func %T", t);
- *n = *nn;
- n->op = ONAME;
- n->class = CPARAM;
- n->sym = slookup(".ret");
- n->complex = nodret->complex;
- n->xoffset = argsize(0);
- n->addable = 20;
- return;
- }
-
- fatal(Z, "bad regret");
-}
-
-int
-tmpreg(void)
-{
- int i;
-
- for(i=REGRET+1; i<NREG; i++)
- if(reg[i] == 0)
- return i;
- diag(Z, "out of fixed registers");
- return 0;
-}
-
-void
-regalloc(Node *n, Node *tn, Node *o)
-{
- int i;
-
- switch(tn->type->etype) {
- case TCHAR:
- case TUCHAR:
- case TSHORT:
- case TUSHORT:
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- if(o != Z && o->op == OREGISTER) {
- i = o->reg;
- if(i >= 0 && i < NREG)
- goto out;
- }
- for(i=REGRET+1; i<=REGEXT-2; i++)
- if(reg[i] == 0)
- goto out;
- diag(tn, "out of fixed registers");
- goto err;
-
- case TFLOAT:
- case TDOUBLE:
- case TVLONG:
- if(o != Z && o->op == OREGISTER) {
- i = o->reg;
- if(i >= NREG && i < NREG+NFREG)
- goto out;
- }
- for(i=NREG; i<NREG+NFREG; i++)
- if(reg[i] == 0)
- goto out;
- diag(tn, "out of float registers");
- goto err;
- }
- diag(tn, "unknown type in regalloc: %T", tn->type);
-err:
- nodreg(n, tn, 0);
- return;
-out:
- reg[i]++;
- nodreg(n, tn, i);
-}
-
-void
-regialloc(Node *n, Node *tn, Node *o)
-{
- Node nod;
-
- nod = *tn;
- nod.type = types[TIND];
- regalloc(n, &nod, o);
-}
-
-void
-regfree(Node *n)
-{
- int i;
-
- i = 0;
- if(n->op != OREGISTER && n->op != OINDREG)
- goto err;
- i = n->reg;
- if(i < 0 || i >= nelem(reg))
- goto err;
- if(reg[i] <= 0)
- goto err;
- reg[i]--;
- return;
-err:
- diag(n, "error in regfree: %d", i);
-}
-
-void
-regsalloc(Node *n, Node *nn)
-{
- cursafe = align(cursafe, nn->type, Aaut3, nil);
- maxargsafe = maxround(maxargsafe, cursafe+curarg);
- *n = *nodsafe;
- n->xoffset = -(stkoff + cursafe);
- n->type = nn->type;
- n->etype = nn->type->etype;
- n->lineno = nn->lineno;
-}
-
-void
-regaalloc1(Node *n, Node *nn)
-{
- if(REGARG < 0) {
- fatal(n, "regaalloc1 and REGARG<0");
- return;
- }
- nodreg(n, nn, REGARG);
- reg[REGARG]++;
- curarg = align(curarg, nn->type, Aarg1, nil);
- curarg = align(curarg, nn->type, Aarg2, nil);
- maxargsafe = maxround(maxargsafe, cursafe+curarg);
-}
-
-void
-regaalloc(Node *n, Node *nn)
-{
- curarg = align(curarg, nn->type, Aarg1, nil);
- *n = *nn;
- n->op = OINDREG;
- n->reg = REGSP;
- n->xoffset = curarg + SZ_LONG;
- n->complex = 0;
- n->addable = 20;
- curarg = align(curarg, nn->type, Aarg2, nil);
- maxargsafe = maxround(maxargsafe, cursafe+curarg);
-}
-
-void
-regind(Node *n, Node *nn)
-{
-
- if(n->op != OREGISTER) {
- diag(n, "regind not OREGISTER");
- return;
- }
- n->op = OINDREG;
- n->type = nn->type;
-}
-
-void
-raddr(Node *n, Prog *p)
-{
- Addr a;
-
- naddr(n, &a);
- if(R0ISZERO && a.type == D_CONST && a.offset == 0) {
- a.type = D_REG;
- a.reg = 0;
- }
- if(a.type != D_REG && a.type != D_FREG) {
- if(n)
- diag(n, "bad in raddr: %O", n->op);
- else
- diag(n, "bad in raddr: <null>");
- p->reg = NREG;
- } else
- p->reg = a.reg;
-}
-
-void
-naddr(Node *n, Addr *a)
-{
- int32 v;
-
- a->type = D_NONE;
- if(n == Z)
- return;
- switch(n->op) {
- default:
- bad:
- diag(n, "bad in naddr: %O", n->op);
- break;
-
- case OREGISTER:
- a->type = D_REG;
- a->sym = nil;
- a->reg = n->reg;
- if(a->reg >= NREG) {
- a->type = D_FREG;
- a->reg -= NREG;
- }
- break;
-
- case OIND:
- naddr(n->left, a);
- if(a->type == D_REG) {
- a->type = D_OREG;
- break;
- }
- if(a->type == D_CONST) {
- a->type = D_OREG;
- break;
- }
- goto bad;
-
- case OINDREG:
- a->type = D_OREG;
- a->sym = nil;
- a->offset = n->xoffset;
- a->reg = n->reg;
- break;
-
- case ONAME:
- a->etype = n->etype;
- a->type = D_OREG;
- a->name = D_STATIC;
- a->sym = linksym(n->sym);
- a->offset = n->xoffset;
- if(n->class == CSTATIC)
- break;
- if(n->class == CEXTERN || n->class == CGLOBL) {
- a->name = D_EXTERN;
- break;
- }
- if(n->class == CAUTO) {
- a->name = D_AUTO;
- break;
- }
- if(n->class == CPARAM) {
- a->name = D_PARAM;
- break;
- }
- goto bad;
-
- case OCONST:
- a->sym = nil;
- a->reg = NREG;
- if(typefd[n->type->etype]) {
- a->type = D_FCONST;
- a->u.dval = n->fconst;
- } else {
- a->type = D_CONST;
- a->offset = n->vconst;
- }
- break;
-
- case OADDR:
- naddr(n->left, a);
- if(a->type == D_OREG) {
- a->type = D_CONST;
- break;
- }
- goto bad;
-
- case OADD:
- if(n->left->op == OCONST) {
- naddr(n->left, a);
- v = a->offset;
- naddr(n->right, a);
- } else {
- naddr(n->right, a);
- v = a->offset;
- naddr(n->left, a);
- }
- a->offset += v;
- break;
-
- }
-}
-
-void
-fop(int as, int f1, int f2, Node *t)
-{
- Node nod1, nod2, nod3;
-
- nodreg(&nod1, t, NREG+f1);
- nodreg(&nod2, t, NREG+f2);
- regalloc(&nod3, t, t);
- gopcode(as, &nod1, &nod2, &nod3);
- gmove(&nod3, t);
- regfree(&nod3);
-}
-
-void
-gmovm(Node *f, Node *t, int w)
-{
- gins(AMOVM, f, t);
- p->scond |= C_UBIT;
- if(w)
- p->scond |= C_WBIT;
-}
-
-void
-gmove(Node *f, Node *t)
-{
- int ft, tt, a;
- Node nod, nod1;
- Prog *p1;
-
- ft = f->type->etype;
- tt = t->type->etype;
-
- if(ft == TDOUBLE && f->op == OCONST) {
- }
- if(ft == TFLOAT && f->op == OCONST) {
- }
-
- /*
- * a load --
- * put it into a register then
- * worry what to do with it.
- */
- if(f->op == ONAME || f->op == OINDREG || f->op == OIND) {
- switch(ft) {
- default:
- a = AMOVW;
- break;
- case TFLOAT:
- a = AMOVF;
- break;
- case TDOUBLE:
- a = AMOVD;
- break;
- case TCHAR:
- a = AMOVBS;
- break;
- case TUCHAR:
- a = AMOVBU;
- break;
- case TSHORT:
- a = AMOVHS;
- break;
- case TUSHORT:
- a = AMOVHU;
- break;
- }
- if(typechlp[ft] && typeilp[tt])
- regalloc(&nod, t, t);
- else
- regalloc(&nod, f, t);
- gins(a, f, &nod);
- gmove(&nod, t);
- regfree(&nod);
- return;
- }
-
- /*
- * a store --
- * put it into a register then
- * store it.
- */
- if(t->op == ONAME || t->op == OINDREG || t->op == OIND) {
- switch(tt) {
- default:
- a = AMOVW;
- break;
- case TUCHAR:
- a = AMOVBU;
- break;
- case TCHAR:
- a = AMOVBS;
- break;
- case TUSHORT:
- a = AMOVHU;
- break;
- case TSHORT:
- a = AMOVHS;
- break;
- case TFLOAT:
- a = AMOVF;
- break;
- case TVLONG:
- case TDOUBLE:
- a = AMOVD;
- break;
- }
- if(ft == tt)
- regalloc(&nod, t, f);
- else
- regalloc(&nod, t, Z);
- gmove(f, &nod);
- gins(a, &nod, t);
- regfree(&nod);
- return;
- }
-
- /*
- * type x type cross table
- */
- a = AGOK;
- switch(ft) {
- case TDOUBLE:
- case TVLONG:
- case TFLOAT:
- switch(tt) {
- case TDOUBLE:
- case TVLONG:
- a = AMOVD;
- if(ft == TFLOAT)
- a = AMOVFD;
- break;
- case TFLOAT:
- a = AMOVDF;
- if(ft == TFLOAT)
- a = AMOVF;
- break;
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- a = AMOVDW;
- if(ft == TFLOAT)
- a = AMOVFW;
- break;
- case TSHORT:
- case TUSHORT:
- case TCHAR:
- case TUCHAR:
- a = AMOVDW;
- if(ft == TFLOAT)
- a = AMOVFW;
- break;
- }
- break;
- case TUINT:
- case TULONG:
- if(tt == TFLOAT || tt == TDOUBLE) {
- // ugly and probably longer than necessary,
- // but vfp has a single instruction for this,
- // so hopefully it won't last long.
- //
- // tmp = f
- // tmp1 = tmp & 0x80000000
- // tmp ^= tmp1
- // t = float(int32(tmp))
- // if(tmp1)
- // t += 2147483648.
- //
- regalloc(&nod, f, Z);
- regalloc(&nod1, f, Z);
- gins(AMOVW, f, &nod);
- gins(AMOVW, &nod, &nod1);
- gins(AAND, nodconst(0x80000000), &nod1);
- gins(AEOR, &nod1, &nod);
- if(tt == TFLOAT)
- gins(AMOVWF, &nod, t);
- else
- gins(AMOVWD, &nod, t);
- gins(ACMP, nodconst(0), Z);
- raddr(&nod1, p);
- gins(ABEQ, Z, Z);
- regfree(&nod);
- regfree(&nod1);
- p1 = p;
- regalloc(&nod, t, Z);
- gins(AMOVF, nodfconst(2147483648.), &nod);
- gins(AADDF, &nod, t);
- regfree(&nod);
- patch(p1, pc);
- return;
- }
- // fall through
-
- case TINT:
- case TLONG:
- case TIND:
- switch(tt) {
- case TDOUBLE:
- gins(AMOVWD, f, t);
- return;
- case TFLOAT:
- gins(AMOVWF, f, t);
- return;
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- case TSHORT:
- case TUSHORT:
- case TCHAR:
- case TUCHAR:
- a = AMOVW;
- break;
- }
- break;
- case TSHORT:
- switch(tt) {
- case TDOUBLE:
- regalloc(&nod, f, Z);
- gins(AMOVHS, f, &nod);
- gins(AMOVWD, &nod, t);
- regfree(&nod);
- return;
- case TFLOAT:
- regalloc(&nod, f, Z);
- gins(AMOVHS, f, &nod);
- gins(AMOVWF, &nod, t);
- regfree(&nod);
- return;
- case TUINT:
- case TINT:
- case TULONG:
- case TLONG:
- case TIND:
- a = AMOVHS;
- break;
- case TSHORT:
- case TUSHORT:
- case TCHAR:
- case TUCHAR:
- a = AMOVW;
- break;
- }
- break;
- case TUSHORT:
- switch(tt) {
- case TDOUBLE:
- regalloc(&nod, f, Z);
- gins(AMOVHU, f, &nod);
- gins(AMOVWD, &nod, t);
- regfree(&nod);
- return;
- case TFLOAT:
- regalloc(&nod, f, Z);
- gins(AMOVHU, f, &nod);
- gins(AMOVWF, &nod, t);
- regfree(&nod);
- return;
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- a = AMOVHU;
- break;
- case TSHORT:
- case TUSHORT:
- case TCHAR:
- case TUCHAR:
- a = AMOVW;
- break;
- }
- break;
- case TCHAR:
- switch(tt) {
- case TDOUBLE:
- regalloc(&nod, f, Z);
- gins(AMOVBS, f, &nod);
- gins(AMOVWD, &nod, t);
- regfree(&nod);
- return;
- case TFLOAT:
- regalloc(&nod, f, Z);
- gins(AMOVBS, f, &nod);
- gins(AMOVWF, &nod, t);
- regfree(&nod);
- return;
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- case TSHORT:
- case TUSHORT:
- a = AMOVBS;
- break;
- case TCHAR:
- case TUCHAR:
- a = AMOVW;
- break;
- }
- break;
- case TUCHAR:
- switch(tt) {
- case TDOUBLE:
- regalloc(&nod, f, Z);
- gins(AMOVBU, f, &nod);
- gins(AMOVWD, &nod, t);
- regfree(&nod);
- return;
- case TFLOAT:
- regalloc(&nod, f, Z);
- gins(AMOVBU, f, &nod);
- gins(AMOVWF, &nod, t);
- regfree(&nod);
- return;
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- case TSHORT:
- case TUSHORT:
- a = AMOVBU;
- break;
- case TCHAR:
- case TUCHAR:
- a = AMOVW;
- break;
- }
- break;
- }
- if(a == AGOK)
- diag(Z, "bad opcode in gmove %T -> %T", f->type, t->type);
- if(a == AMOVW || a == AMOVF || a == AMOVD)
- if(samaddr(f, t))
- return;
- gins(a, f, t);
-}
-
-void
-gmover(Node *f, Node *t)
-{
- int ft, tt, a;
-
- ft = f->type->etype;
- tt = t->type->etype;
- a = AGOK;
- if(typechlp[ft] && typechlp[tt] && ewidth[ft] >= ewidth[tt]){
- switch(tt){
- case TSHORT:
- a = AMOVHS;
- break;
- case TUSHORT:
- a = AMOVHU;
- break;
- case TCHAR:
- a = AMOVBS;
- break;
- case TUCHAR:
- a = AMOVBU;
- break;
- }
- }
- if(a == AGOK)
- gmove(f, t);
- else
- gins(a, f, t);
-}
-
-void
-gins(int a, Node *f, Node *t)
-{
-
- nextpc();
- p->as = a;
- if(f != Z)
- naddr(f, &p->from);
- if(t != Z)
- naddr(t, &p->to);
- if(debug['g'])
- print("%P\n", p);
-}
-
-void
-gopcode(int o, Node *f1, Node *f2, Node *t)
-{
- int a, et;
- Addr ta;
-
- et = TLONG;
- if(f1 != Z && f1->type != T)
- et = f1->type->etype;
- a = AGOK;
- switch(o) {
- case OAS:
- gmove(f1, t);
- return;
-
- case OASADD:
- case OADD:
- a = AADD;
- if(et == TFLOAT)
- a = AADDF;
- else
- if(et == TDOUBLE || et == TVLONG)
- a = AADDD;
- break;
-
- case OASSUB:
- case OSUB:
- if(f2 && f2->op == OCONST) {
- Node *t = f1;
- f1 = f2;
- f2 = t;
- a = ARSB;
- } else
- a = ASUB;
- if(et == TFLOAT)
- a = ASUBF;
- else
- if(et == TDOUBLE || et == TVLONG)
- a = ASUBD;
- break;
-
- case OASOR:
- case OOR:
- a = AORR;
- break;
-
- case OASAND:
- case OAND:
- a = AAND;
- break;
-
- case OASXOR:
- case OXOR:
- a = AEOR;
- break;
-
- case OASLSHR:
- case OLSHR:
- a = ASRL;
- break;
-
- case OASASHR:
- case OASHR:
- a = ASRA;
- break;
-
- case OASASHL:
- case OASHL:
- a = ASLL;
- break;
-
- case OFUNC:
- a = ABL;
- break;
-
- case OASMUL:
- case OMUL:
- a = AMUL;
- if(et == TFLOAT)
- a = AMULF;
- else
- if(et == TDOUBLE || et == TVLONG)
- a = AMULD;
- break;
-
- case OASDIV:
- case ODIV:
- a = ADIV;
- if(et == TFLOAT)
- a = ADIVF;
- else
- if(et == TDOUBLE || et == TVLONG)
- a = ADIVD;
- break;
-
- case OASMOD:
- case OMOD:
- a = AMOD;
- break;
-
- case OASLMUL:
- case OLMUL:
- a = AMULU;
- break;
-
- case OASLMOD:
- case OLMOD:
- a = AMODU;
- break;
-
- case OASLDIV:
- case OLDIV:
- a = ADIVU;
- break;
-
- case OCASE:
- case OEQ:
- case ONE:
- case OLT:
- case OLE:
- case OGE:
- case OGT:
- case OLO:
- case OLS:
- case OHS:
- case OHI:
- a = ACMP;
- if(et == TFLOAT)
- a = ACMPF;
- else
- if(et == TDOUBLE || et == TVLONG)
- a = ACMPD;
- nextpc();
- p->as = a;
- naddr(f1, &p->from);
- if(a == ACMP && f1->op == OCONST && p->from.offset < 0) {
- p->as = ACMN;
- p->from.offset = -p->from.offset;
- }
- raddr(f2, p);
- switch(o) {
- case OEQ:
- a = ABEQ;
- break;
- case ONE:
- a = ABNE;
- break;
- case OLT:
- a = ABLT;
- break;
- case OLE:
- a = ABLE;
- break;
- case OGE:
- a = ABGE;
- break;
- case OGT:
- a = ABGT;
- break;
- case OLO:
- a = ABLO;
- break;
- case OLS:
- a = ABLS;
- break;
- case OHS:
- a = ABHS;
- break;
- case OHI:
- a = ABHI;
- break;
- case OCASE:
- nextpc();
- p->as = ACASE;
- p->scond = 0x9;
- naddr(f2, &p->from);
- a = ABHI;
- break;
- }
- f1 = Z;
- f2 = Z;
- break;
- }
- if(a == AGOK)
- diag(Z, "bad in gopcode %O", o);
- nextpc();
- p->as = a;
- if(f1 != Z)
- naddr(f1, &p->from);
- if(f2 != Z) {
- naddr(f2, &ta);
- p->reg = ta.reg;
- }
- if(t != Z)
- naddr(t, &p->to);
- if(debug['g'])
- print("%P\n", p);
-}
-
-int
-samaddr(Node *f, Node *t)
-{
-
- if(f->op != t->op)
- return 0;
- switch(f->op) {
-
- case OREGISTER:
- if(f->reg != t->reg)
- break;
- return 1;
- }
- return 0;
-}
-
-void
-gbranch(int o)
-{
- int a;
-
- a = AGOK;
- switch(o) {
- case ORETURN:
- a = ARET;
- break;
- case OGOTO:
- a = AB;
- break;
- }
- nextpc();
- if(a == AGOK) {
- diag(Z, "bad in gbranch %O", o);
- nextpc();
- }
- p->as = a;
-}
-
-void
-patch(Prog *op, int32 pc)
-{
-
- op->to.offset = pc;
- op->to.type = D_BRANCH;
-}
-
-void
-gpseudo(int a, Sym *s, Node *n)
-{
- nextpc();
- p->as = a;
- p->from.type = D_OREG;
- p->from.sym = linksym(s);
- p->from.name = D_EXTERN;
-
- switch(a) {
- case ATEXT:
- p->reg = textflag;
- textflag = 0;
- break;
- case AGLOBL:
- p->reg = s->dataflag;
- break;
- }
-
- if(s->class == CSTATIC)
- p->from.name = D_STATIC;
- naddr(n, &p->to);
- if(a == ADATA || a == AGLOBL)
- pc--;
-}
-
-void
-gpcdata(int index, int value)
-{
- Node n1;
-
- n1 = *nodconst(index);
- gins(APCDATA, &n1, nodconst(value));
-}
-
-void
-gprefetch(Node *n)
-{
- Node n1;
-
- regalloc(&n1, n, Z);
- gmove(n, &n1);
- n1.op = OINDREG;
- gins(APLD, &n1, Z);
- regfree(&n1);
-}
-
-int
-sconst(Node *n)
-{
- vlong vv;
-
- if(n->op == OCONST) {
- if(!typefd[n->type->etype]) {
- vv = n->vconst;
- if(vv >= (vlong)(-32766) && vv < (vlong)32766)
- return 1;
- /*
- * should be specialised for constant values which will
- * fit in different instructionsl; for now, let 5l
- * sort it out
- */
- return 1;
- }
- }
- return 0;
-}
-
-int
-sval(int32 v)
-{
- int i;
-
- for(i=0; i<16; i++) {
- if((v & ~0xff) == 0)
- return 1;
- if((~v & ~0xff) == 0)
- return 1;
- v = (v<<2) | ((uint32)v>>30);
- }
- return 0;
-}
-
-int32
-exreg(Type *t)
-{
- int32 o;
-
- if(typechlp[t->etype]) {
- if(exregoffset <= REGEXT-4)
- return 0;
- o = exregoffset;
- exregoffset--;
- return o;
- }
- if(typefd[t->etype]) {
- if(exfregoffset <= NFREG-1)
- return 0;
- o = exfregoffset + NREG;
- exfregoffset--;
- return o;
- }
- return 0;
-}
-
-schar ewidth[NTYPE] =
-{
- -1, /* [TXXX] */
- SZ_CHAR, /* [TCHAR] */
- SZ_CHAR, /* [TUCHAR] */
- SZ_SHORT, /* [TSHORT] */
- SZ_SHORT, /* [TUSHORT] */
- SZ_INT, /* [TINT] */
- SZ_INT, /* [TUINT] */
- SZ_LONG, /* [TLONG] */
- SZ_LONG, /* [TULONG] */
- SZ_VLONG, /* [TVLONG] */
- SZ_VLONG, /* [TUVLONG] */
- SZ_FLOAT, /* [TFLOAT] */
- SZ_DOUBLE, /* [TDOUBLE] */
- SZ_IND, /* [TIND] */
- 0, /* [TFUNC] */
- -1, /* [TARRAY] */
- 0, /* [TVOID] */
- -1, /* [TSTRUCT] */
- -1, /* [TUNION] */
- SZ_INT, /* [TENUM] */
-};
-
-int32 ncast[NTYPE] =
-{
- 0, /* [TXXX] */
- BCHAR|BUCHAR, /* [TCHAR] */
- BCHAR|BUCHAR, /* [TUCHAR] */
- BSHORT|BUSHORT, /* [TSHORT] */
- BSHORT|BUSHORT, /* [TUSHORT] */
- BINT|BUINT|BLONG|BULONG|BIND, /* [TINT] */
- BINT|BUINT|BLONG|BULONG|BIND, /* [TUINT] */
- BINT|BUINT|BLONG|BULONG|BIND, /* [TLONG] */
- BINT|BUINT|BLONG|BULONG|BIND, /* [TULONG] */
- BVLONG|BUVLONG, /* [TVLONG] */
- BVLONG|BUVLONG, /* [TUVLONG] */
- BFLOAT, /* [TFLOAT] */
- BDOUBLE, /* [TDOUBLE] */
- BLONG|BULONG|BIND, /* [TIND] */
- 0, /* [TFUNC] */
- 0, /* [TARRAY] */
- 0, /* [TVOID] */
- BSTRUCT, /* [TSTRUCT] */
- BUNION, /* [TUNION] */
- 0, /* [TENUM] */
-};
diff --git a/src/cmd/6c/Makefile b/src/cmd/6c/Makefile
deleted file mode 100644
index 3f528d751..000000000
--- a/src/cmd/6c/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright 2012 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../Make.dist
diff --git a/src/cmd/6c/cgen.c b/src/cmd/6c/cgen.c
deleted file mode 100644
index 68dd7bb5f..000000000
--- a/src/cmd/6c/cgen.c
+++ /dev/null
@@ -1,2046 +0,0 @@
-// Inferno utils/6c/cgen.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/cgen.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-#include "../../runtime/funcdata.h"
-
-/* ,x/^(print|prtree)\(/i/\/\/ */
-int castup(Type*, Type*);
-int vaddr(Node *n, int a);
-
-void
-cgen(Node *n, Node *nn)
-{
- Node *l, *r, *t;
- Prog *p1;
- Node nod, nod1, nod2, nod3, nod4;
- int o, hardleft;
- int32 v, curs;
- vlong c;
-
- if(debug['g']) {
- prtree(nn, "cgen lhs");
- prtree(n, "cgen");
- }
- if(n == Z || n->type == T)
- return;
- if(typesu[n->type->etype] && (n->op != OFUNC || nn != Z)) {
- sugen(n, nn, n->type->width);
- return;
- }
- l = n->left;
- r = n->right;
- o = n->op;
-
- if(n->op == OEXREG || (nn != Z && nn->op == OEXREG)) {
- gmove(n, nn);
- return;
- }
-
- if(n->addable >= INDEXED) {
- if(nn == Z) {
- switch(o) {
- default:
- nullwarn(Z, Z);
- break;
- case OINDEX:
- nullwarn(l, r);
- break;
- }
- return;
- }
- gmove(n, nn);
- return;
- }
- curs = cursafe;
-
- if(l->complex >= FNX)
- if(r != Z && r->complex >= FNX)
- switch(o) {
- default:
- if(cond(o) && typesu[l->type->etype])
- break;
-
- regret(&nod, r, 0, 0);
- cgen(r, &nod);
-
- regsalloc(&nod1, r);
- gmove(&nod, &nod1);
-
- regfree(&nod);
- nod = *n;
- nod.right = &nod1;
-
- cgen(&nod, nn);
- return;
-
- case OFUNC:
- case OCOMMA:
- case OANDAND:
- case OOROR:
- case OCOND:
- case ODOT:
- break;
- }
-
- hardleft = l->addable < INDEXED || l->complex >= FNX;
- switch(o) {
- default:
- diag(n, "unknown op in cgen: %O", o);
- break;
-
- case ONEG:
- case OCOM:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- gopcode(o, n->type, Z, &nod);
- gmove(&nod, nn);
- regfree(&nod);
- break;
-
- case OAS:
- if(l->op == OBIT)
- goto bitas;
- if(!hardleft) {
- if(nn != Z || r->addable < INDEXED || hardconst(r)) {
- if(r->complex >= FNX && nn == Z)
- regret(&nod, r, 0, 0);
- else
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- gmove(&nod, l);
- if(nn != Z)
- gmove(&nod, nn);
- regfree(&nod);
- } else
- gmove(r, l);
- break;
- }
- if(l->complex >= r->complex) {
- if(l->op == OINDEX && immconst(r)) {
- gmove(r, l);
- break;
- }
- reglcgen(&nod1, l, Z);
- if(r->addable >= INDEXED && !hardconst(r)) {
- gmove(r, &nod1);
- if(nn != Z)
- gmove(r, nn);
- regfree(&nod1);
- break;
- }
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- } else {
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- reglcgen(&nod1, l, Z);
- }
- gmove(&nod, &nod1);
- regfree(&nod);
- regfree(&nod1);
- break;
-
- bitas:
- n = l->left;
- regalloc(&nod, r, nn);
- if(l->complex >= r->complex) {
- reglcgen(&nod1, n, Z);
- cgen(r, &nod);
- } else {
- cgen(r, &nod);
- reglcgen(&nod1, n, Z);
- }
- regalloc(&nod2, n, Z);
- gmove(&nod1, &nod2);
- bitstore(l, &nod, &nod1, &nod2, nn);
- break;
-
- case OBIT:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- bitload(n, &nod, Z, Z, nn);
- gmove(&nod, nn);
- regfree(&nod);
- break;
-
- case OLSHR:
- case OASHL:
- case OASHR:
- if(nn == Z) {
- nullwarn(l, r);
- break;
- }
- if(r->op == OCONST) {
- if(r->vconst == 0) {
- cgen(l, nn);
- break;
- }
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- if(o == OASHL && r->vconst == 1)
- gopcode(OADD, n->type, &nod, &nod);
- else
- gopcode(o, n->type, r, &nod);
- gmove(&nod, nn);
- regfree(&nod);
- break;
- }
-
- /*
- * get nod to be D_CX
- */
- if(nodreg(&nod, nn, D_CX)) {
- regsalloc(&nod1, n);
- gmove(&nod, &nod1);
- cgen(n, &nod); /* probably a bug */
- gmove(&nod, nn);
- gmove(&nod1, &nod);
- break;
- }
- reg[D_CX]++;
- if(nn->op == OREGISTER && nn->reg == D_CX)
- regalloc(&nod1, l, Z);
- else
- regalloc(&nod1, l, nn);
- if(r->complex >= l->complex) {
- cgen(r, &nod);
- cgen(l, &nod1);
- } else {
- cgen(l, &nod1);
- cgen(r, &nod);
- }
- gopcode(o, n->type, &nod, &nod1);
- gmove(&nod1, nn);
- regfree(&nod);
- regfree(&nod1);
- break;
-
- case OADD:
- case OSUB:
- case OOR:
- case OXOR:
- case OAND:
- if(nn == Z) {
- nullwarn(l, r);
- break;
- }
- if(typefd[n->type->etype])
- goto fop;
- if(r->op == OCONST) {
- if(r->vconst == 0 && o != OAND) {
- cgen(l, nn);
- break;
- }
- }
- if(n->op == OOR && l->op == OASHL && r->op == OLSHR
- && l->right->op == OCONST && r->right->op == OCONST
- && l->left->op == ONAME && r->left->op == ONAME
- && l->left->sym == r->left->sym
- && l->right->vconst + r->right->vconst == 8 * l->left->type->width) {
- regalloc(&nod, l->left, nn);
- cgen(l->left, &nod);
- gopcode(OROTL, n->type, l->right, &nod);
- gmove(&nod, nn);
- regfree(&nod);
- break;
- }
- if(n->op == OADD && l->op == OASHL && l->right->op == OCONST
- && (r->op != OCONST || r->vconst < -128 || r->vconst > 127)) {
- c = l->right->vconst;
- if(c > 0 && c <= 3) {
- if(l->left->complex >= r->complex) {
- regalloc(&nod, l->left, nn);
- cgen(l->left, &nod);
- if(r->addable < INDEXED) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- genmuladd(&nod, &nod, 1 << c, &nod1);
- regfree(&nod1);
- }
- else
- genmuladd(&nod, &nod, 1 << c, r);
- }
- else {
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- regalloc(&nod1, l->left, Z);
- cgen(l->left, &nod1);
- genmuladd(&nod, &nod1, 1 << c, &nod);
- regfree(&nod1);
- }
- gmove(&nod, nn);
- regfree(&nod);
- break;
- }
- }
- if(r->addable >= INDEXED && !hardconst(r)) {
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- gopcode(o, n->type, r, &nod);
- gmove(&nod, nn);
- regfree(&nod);
- break;
- }
- if(l->complex >= r->complex) {
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- gopcode(o, n->type, &nod1, &nod);
- } else {
- regalloc(&nod1, r, nn);
- cgen(r, &nod1);
- regalloc(&nod, l, Z);
- cgen(l, &nod);
- gopcode(o, n->type, &nod1, &nod);
- }
- gmove(&nod, nn);
- regfree(&nod);
- regfree(&nod1);
- break;
-
- case OLMOD:
- case OMOD:
- case OLMUL:
- case OLDIV:
- case OMUL:
- case ODIV:
- if(nn == Z) {
- nullwarn(l, r);
- break;
- }
- if(typefd[n->type->etype])
- goto fop;
- if(r->op == OCONST && typechl[n->type->etype]) { /* TO DO */
- SET(v);
- switch(o) {
- case ODIV:
- case OMOD:
- c = r->vconst;
- if(c < 0)
- c = -c;
- v = xlog2(c);
- if(v < 0)
- break;
- /* fall thru */
- case OMUL:
- case OLMUL:
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- switch(o) {
- case OMUL:
- case OLMUL:
- mulgen(n->type, r, &nod);
- break;
- case ODIV:
- sdiv2(r->vconst, v, l, &nod);
- break;
- case OMOD:
- smod2(r->vconst, v, l, &nod);
- break;
- }
- gmove(&nod, nn);
- regfree(&nod);
- goto done;
- case OLDIV:
- c = r->vconst;
- if((c & 0x80000000) == 0)
- break;
- regalloc(&nod1, l, Z);
- cgen(l, &nod1);
- regalloc(&nod, l, nn);
- zeroregm(&nod);
- gins(ACMPL, &nod1, nodconst(c));
- gins(ASBBL, nodconst(-1), &nod);
- regfree(&nod1);
- gmove(&nod, nn);
- regfree(&nod);
- goto done;
- }
- }
-
- if(o == OMUL || o == OLMUL) {
- if(l->addable >= INDEXED) {
- t = l;
- l = r;
- r = t;
- }
- reg[D_DX]++; // for gopcode case OMUL
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- if(r->addable < INDEXED || hardconst(r)) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- gopcode(OMUL, n->type, &nod1, &nod);
- regfree(&nod1);
- }else
- gopcode(OMUL, n->type, r, &nod); /* addressible */
- gmove(&nod, nn);
- regfree(&nod);
- reg[D_DX]--;
- break;
- }
-
- /*
- * get nod to be D_AX
- * get nod1 to be D_DX
- */
- if(nodreg(&nod, nn, D_AX)) {
- regsalloc(&nod2, n);
- gmove(&nod, &nod2);
- v = reg[D_AX];
- reg[D_AX] = 0;
-
- if(isreg(l, D_AX)) {
- nod3 = *n;
- nod3.left = &nod2;
- cgen(&nod3, nn);
- } else
- if(isreg(r, D_AX)) {
- nod3 = *n;
- nod3.right = &nod2;
- cgen(&nod3, nn);
- } else
- cgen(n, nn);
-
- gmove(&nod2, &nod);
- reg[D_AX] = v;
- break;
- }
- if(nodreg(&nod1, nn, D_DX)) {
- regsalloc(&nod2, n);
- gmove(&nod1, &nod2);
- v = reg[D_DX];
- reg[D_DX] = 0;
-
- if(isreg(l, D_DX)) {
- nod3 = *n;
- nod3.left = &nod2;
- cgen(&nod3, nn);
- } else
- if(isreg(r, D_DX)) {
- nod3 = *n;
- nod3.right = &nod2;
- cgen(&nod3, nn);
- } else
- cgen(n, nn);
-
- gmove(&nod2, &nod1);
- reg[D_DX] = v;
- break;
- }
- reg[D_AX]++;
-
- if(r->op == OCONST && (o == ODIV || o == OLDIV) && immconst(r) && typechl[r->type->etype]) {
- reg[D_DX]++;
- if(l->addable < INDEXED) {
- regalloc(&nod2, l, Z);
- cgen(l, &nod2);
- l = &nod2;
- }
- if(o == ODIV)
- sdivgen(l, r, &nod, &nod1);
- else
- udivgen(l, r, &nod, &nod1);
- gmove(&nod1, nn);
- if(l == &nod2)
- regfree(l);
- goto freeaxdx;
- }
-
- if(l->complex >= r->complex) {
- cgen(l, &nod);
- reg[D_DX]++;
- if(o == ODIV || o == OMOD)
- gins(typechl[l->type->etype]? ACDQ: ACQO, Z, Z);
- if(o == OLDIV || o == OLMOD)
- zeroregm(&nod1);
- if(r->addable < INDEXED || r->op == OCONST) {
- regsalloc(&nod3, r);
- cgen(r, &nod3);
- gopcode(o, n->type, &nod3, Z);
- } else
- gopcode(o, n->type, r, Z);
- } else {
- regsalloc(&nod3, r);
- cgen(r, &nod3);
- cgen(l, &nod);
- reg[D_DX]++;
- if(o == ODIV || o == OMOD)
- gins(typechl[l->type->etype]? ACDQ: ACQO, Z, Z);
- if(o == OLDIV || o == OLMOD)
- zeroregm(&nod1);
- gopcode(o, n->type, &nod3, Z);
- }
- if(o == OMOD || o == OLMOD)
- gmove(&nod1, nn);
- else
- gmove(&nod, nn);
- freeaxdx:
- regfree(&nod);
- regfree(&nod1);
- break;
-
- case OASLSHR:
- case OASASHL:
- case OASASHR:
- if(r->op == OCONST)
- goto asand;
- if(l->op == OBIT)
- goto asbitop;
- if(typefd[n->type->etype])
- goto asand; /* can this happen? */
-
- /*
- * get nod to be D_CX
- */
- if(nodreg(&nod, nn, D_CX)) {
- regsalloc(&nod1, n);
- gmove(&nod, &nod1);
- cgen(n, &nod);
- if(nn != Z)
- gmove(&nod, nn);
- gmove(&nod1, &nod);
- break;
- }
- reg[D_CX]++;
-
- if(r->complex >= l->complex) {
- cgen(r, &nod);
- if(hardleft)
- reglcgen(&nod1, l, Z);
- else
- nod1 = *l;
- } else {
- if(hardleft)
- reglcgen(&nod1, l, Z);
- else
- nod1 = *l;
- cgen(r, &nod);
- }
-
- gopcode(o, l->type, &nod, &nod1);
- regfree(&nod);
- if(nn != Z)
- gmove(&nod1, nn);
- if(hardleft)
- regfree(&nod1);
- break;
-
- case OASAND:
- case OASADD:
- case OASSUB:
- case OASXOR:
- case OASOR:
- asand:
- if(l->op == OBIT)
- goto asbitop;
- if(typefd[l->type->etype] || typefd[r->type->etype])
- goto asfop;
- if(l->complex >= r->complex) {
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
- if(!immconst(r)) {
- regalloc(&nod1, r, nn);
- cgen(r, &nod1);
- gopcode(o, l->type, &nod1, &nod);
- regfree(&nod1);
- } else
- gopcode(o, l->type, r, &nod);
- } else {
- regalloc(&nod1, r, nn);
- cgen(r, &nod1);
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
- gopcode(o, l->type, &nod1, &nod);
- regfree(&nod1);
- }
- if(nn != Z)
- gmove(&nod, nn);
- if(hardleft)
- regfree(&nod);
- break;
-
- asfop:
- if(l->complex >= r->complex) {
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
- if(r->addable < INDEXED){
- regalloc(&nod1, r, nn);
- cgen(r, &nod1);
- }else
- nod1 = *r;
- regalloc(&nod2, r, Z);
- gmove(&nod, &nod2);
- gopcode(o, r->type, &nod1, &nod2);
- gmove(&nod2, &nod);
- regfree(&nod2);
- if(r->addable < INDEXED)
- regfree(&nod1);
- } else {
- regalloc(&nod1, r, nn);
- cgen(r, &nod1);
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
- if(o != OASMUL && o != OASADD) {
- regalloc(&nod2, r, Z);
- gmove(&nod, &nod2);
- gopcode(o, r->type, &nod1, &nod2);
- regfree(&nod1);
- gmove(&nod2, &nod);
- regfree(&nod2);
- } else {
- gopcode(o, r->type, &nod, &nod1);
- gmove(&nod1, &nod);
- regfree(&nod1);
- }
- }
- if(nn != Z)
- gmove(&nod, nn);
- if(hardleft)
- regfree(&nod);
- break;
-
- case OASLMUL:
- case OASLDIV:
- case OASLMOD:
- case OASMUL:
- case OASDIV:
- case OASMOD:
- if(l->op == OBIT)
- goto asbitop;
- if(typefd[n->type->etype] || typefd[r->type->etype])
- goto asfop;
- if(r->op == OCONST && typechl[n->type->etype]) {
- SET(v);
- switch(o) {
- case OASDIV:
- case OASMOD:
- c = r->vconst;
- if(c < 0)
- c = -c;
- v = xlog2(c);
- if(v < 0)
- break;
- /* fall thru */
- case OASMUL:
- case OASLMUL:
- if(hardleft)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- regalloc(&nod, l, nn);
- cgen(&nod2, &nod);
- switch(o) {
- case OASMUL:
- case OASLMUL:
- mulgen(n->type, r, &nod);
- break;
- case OASDIV:
- sdiv2(r->vconst, v, l, &nod);
- break;
- case OASMOD:
- smod2(r->vconst, v, l, &nod);
- break;
- }
- havev:
- gmove(&nod, &nod2);
- if(nn != Z)
- gmove(&nod, nn);
- if(hardleft)
- regfree(&nod2);
- regfree(&nod);
- goto done;
- case OASLDIV:
- c = r->vconst;
- if((c & 0x80000000) == 0)
- break;
- if(hardleft)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- regalloc(&nod1, l, nn);
- cgen(&nod2, &nod1);
- regalloc(&nod, l, nn);
- zeroregm(&nod);
- gins(ACMPL, &nod1, nodconst(c));
- gins(ASBBL, nodconst(-1), &nod);
- regfree(&nod1);
- goto havev;
- }
- }
-
- if(o == OASMUL) {
- /* should favour AX */
- regalloc(&nod, l, nn);
- if(r->complex >= FNX) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- r = &nod1;
- }
- if(hardleft)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- cgen(&nod2, &nod);
- if(r->addable < INDEXED || hardconst(r)) {
- if(r->complex < FNX) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- }
- gopcode(OASMUL, n->type, &nod1, &nod);
- regfree(&nod1);
- }
- else
- gopcode(OASMUL, n->type, r, &nod);
- if(r == &nod1)
- regfree(r);
- gmove(&nod, &nod2);
- if(nn != Z)
- gmove(&nod, nn);
- regfree(&nod);
- if(hardleft)
- regfree(&nod2);
- break;
- }
-
- /*
- * get nod to be D_AX
- * get nod1 to be D_DX
- */
- if(nodreg(&nod, nn, D_AX)) {
- regsalloc(&nod2, n);
- gmove(&nod, &nod2);
- v = reg[D_AX];
- reg[D_AX] = 0;
-
- if(isreg(l, D_AX)) {
- nod3 = *n;
- nod3.left = &nod2;
- cgen(&nod3, nn);
- } else
- if(isreg(r, D_AX)) {
- nod3 = *n;
- nod3.right = &nod2;
- cgen(&nod3, nn);
- } else
- cgen(n, nn);
-
- gmove(&nod2, &nod);
- reg[D_AX] = v;
- break;
- }
- if(nodreg(&nod1, nn, D_DX)) {
- regsalloc(&nod2, n);
- gmove(&nod1, &nod2);
- v = reg[D_DX];
- reg[D_DX] = 0;
-
- if(isreg(l, D_DX)) {
- nod3 = *n;
- nod3.left = &nod2;
- cgen(&nod3, nn);
- } else
- if(isreg(r, D_DX)) {
- nod3 = *n;
- nod3.right = &nod2;
- cgen(&nod3, nn);
- } else
- cgen(n, nn);
-
- gmove(&nod2, &nod1);
- reg[D_DX] = v;
- break;
- }
- reg[D_AX]++;
- reg[D_DX]++;
-
- if(l->complex >= r->complex) {
- if(hardleft)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- cgen(&nod2, &nod);
- if(r->op == OCONST && typechl[r->type->etype]) {
- switch(o) {
- case OASDIV:
- sdivgen(&nod2, r, &nod, &nod1);
- goto divdone;
- case OASLDIV:
- udivgen(&nod2, r, &nod, &nod1);
- divdone:
- gmove(&nod1, &nod2);
- if(nn != Z)
- gmove(&nod1, nn);
- goto freelxaxdx;
- }
- }
- if(o == OASDIV || o == OASMOD)
- gins(typechl[l->type->etype]? ACDQ: ACQO, Z, Z);
- if(o == OASLDIV || o == OASLMOD)
- zeroregm(&nod1);
- if(r->addable < INDEXED || r->op == OCONST ||
- !typeil[r->type->etype]) {
- regalloc(&nod3, r, Z);
- cgen(r, &nod3);
- gopcode(o, l->type, &nod3, Z);
- regfree(&nod3);
- } else
- gopcode(o, n->type, r, Z);
- } else {
- regalloc(&nod3, r, Z);
- cgen(r, &nod3);
- if(hardleft)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- cgen(&nod2, &nod);
- if(o == OASDIV || o == OASMOD)
- gins(typechl[l->type->etype]? ACDQ: ACQO, Z, Z);
- if(o == OASLDIV || o == OASLMOD)
- zeroregm(&nod1);
- gopcode(o, l->type, &nod3, Z);
- regfree(&nod3);
- }
- if(o == OASMOD || o == OASLMOD) {
- gmove(&nod1, &nod2);
- if(nn != Z)
- gmove(&nod1, nn);
- } else {
- gmove(&nod, &nod2);
- if(nn != Z)
- gmove(&nod, nn);
- }
- freelxaxdx:
- if(hardleft)
- regfree(&nod2);
- regfree(&nod);
- regfree(&nod1);
- break;
-
- fop:
- if(l->complex >= r->complex) {
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- if(r->addable < INDEXED) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- gopcode(o, n->type, &nod1, &nod);
- regfree(&nod1);
- } else
- gopcode(o, n->type, r, &nod);
- } else {
- /* TO DO: could do better with r->addable >= INDEXED */
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- gopcode(o, n->type, &nod1, &nod);
- regfree(&nod1);
- }
- gmove(&nod, nn);
- regfree(&nod);
- break;
-
- asbitop:
- regalloc(&nod4, n, nn);
- if(l->complex >= r->complex) {
- bitload(l, &nod, &nod1, &nod2, &nod4);
- regalloc(&nod3, r, Z);
- cgen(r, &nod3);
- } else {
- regalloc(&nod3, r, Z);
- cgen(r, &nod3);
- bitload(l, &nod, &nod1, &nod2, &nod4);
- }
- gmove(&nod, &nod4);
-
- { /* TO DO: check floating point source */
- Node onod;
-
- /* incredible grot ... */
- onod = nod3;
- onod.op = o;
- onod.complex = 2;
- onod.addable = 0;
- onod.type = tfield;
- onod.left = &nod4;
- onod.right = &nod3;
- cgen(&onod, Z);
- }
- regfree(&nod3);
- gmove(&nod4, &nod);
- regfree(&nod4);
- bitstore(l, &nod, &nod1, &nod2, nn);
- break;
-
- case OADDR:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- lcgen(l, nn);
- break;
-
- case OFUNC:
- if(l->complex >= FNX) {
- if(l->op != OIND)
- diag(n, "bad function call");
-
- regret(&nod, l->left, 0, 0);
- cgen(l->left, &nod);
- regsalloc(&nod1, l->left);
- gmove(&nod, &nod1);
- regfree(&nod);
-
- nod = *n;
- nod.left = &nod2;
- nod2 = *l;
- nod2.left = &nod1;
- nod2.complex = 1;
- cgen(&nod, nn);
-
- return;
- }
- gargs(r, &nod, &nod1);
- if(l->addable < INDEXED) {
- reglcgen(&nod, l, nn);
- nod.op = OREGISTER;
- gopcode(OFUNC, n->type, Z, &nod);
- regfree(&nod);
- } else
- gopcode(OFUNC, n->type, Z, l);
- if(REGARG >= 0 && reg[REGARG])
- reg[REGARG]--;
- regret(&nod, n, l->type, 1); // update maxarg if nothing else
- if(nn != Z)
- gmove(&nod, nn);
- if(nod.op == OREGISTER)
- regfree(&nod);
- break;
-
- case OIND:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- regialloc(&nod, n, nn);
- r = l;
- while(r->op == OADD)
- r = r->right;
- if(sconst(r)) {
- v = r->vconst;
- r->vconst = 0;
- cgen(l, &nod);
- nod.xoffset += v;
- r->vconst = v;
- } else
- cgen(l, &nod);
- regind(&nod, n);
- gmove(&nod, nn);
- regfree(&nod);
- break;
-
- case OEQ:
- case ONE:
- case OLE:
- case OLT:
- case OGE:
- case OGT:
- case OLO:
- case OLS:
- case OHI:
- case OHS:
- if(nn == Z) {
- nullwarn(l, r);
- break;
- }
- boolgen(n, 1, nn);
- break;
-
- case OANDAND:
- case OOROR:
- boolgen(n, 1, nn);
- if(nn == Z)
- patch(p, pc);
- break;
-
- case ONOT:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- boolgen(n, 1, nn);
- break;
-
- case OCOMMA:
- cgen(l, Z);
- cgen(r, nn);
- break;
-
- case OCAST:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- /*
- * convert from types l->n->nn
- */
- if(nocast(l->type, n->type) && nocast(n->type, nn->type)) {
- /* both null, gen l->nn */
- cgen(l, nn);
- break;
- }
- if(ewidth[n->type->etype] < ewidth[l->type->etype]){
- if(l->type->etype == TIND && typechlp[n->type->etype])
- warn(n, "conversion of pointer to shorter integer");
- }else if(0){
- if(nocast(n->type, nn->type) || castup(n->type, nn->type)){
- if(typefd[l->type->etype] != typefd[nn->type->etype])
- regalloc(&nod, l, nn);
- else
- regalloc(&nod, nn, nn);
- cgen(l, &nod);
- gmove(&nod, nn);
- regfree(&nod);
- break;
- }
- }
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- regalloc(&nod1, n, &nod);
- gmove(&nod, &nod1);
- gmove(&nod1, nn);
- regfree(&nod1);
- regfree(&nod);
- break;
-
- case ODOT:
- sugen(l, nodrat, l->type->width);
- if(nn == Z)
- break;
- warn(n, "non-interruptable temporary");
- nod = *nodrat;
- if(!r || r->op != OCONST) {
- diag(n, "DOT and no offset");
- break;
- }
- nod.xoffset += (int32)r->vconst;
- nod.type = n->type;
- cgen(&nod, nn);
- break;
-
- case OCOND:
- bcgen(l, 1);
- p1 = p;
- cgen(r->left, nn);
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- cgen(r->right, nn);
- patch(p1, pc);
- break;
-
- case OPOSTINC:
- case OPOSTDEC:
- v = 1;
- if(l->type->etype == TIND)
- v = l->type->link->width;
- if(o == OPOSTDEC)
- v = -v;
- if(l->op == OBIT)
- goto bitinc;
- if(nn == Z)
- goto pre;
-
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
-
- gmove(&nod, nn);
- if(typefd[n->type->etype]) {
- regalloc(&nod1, l, Z);
- gmove(&nod, &nod1);
- if(v < 0)
- gopcode(OSUB, n->type, nodfconst(-v), &nod1);
- else
- gopcode(OADD, n->type, nodfconst(v), &nod1);
- gmove(&nod1, &nod);
- regfree(&nod1);
- } else
- gopcode(OADD, n->type, nodconst(v), &nod);
- if(hardleft)
- regfree(&nod);
- break;
-
- case OPREINC:
- case OPREDEC:
- v = 1;
- if(l->type->etype == TIND)
- v = l->type->link->width;
- if(o == OPREDEC)
- v = -v;
- if(l->op == OBIT)
- goto bitinc;
-
- pre:
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
- if(typefd[n->type->etype]) {
- regalloc(&nod1, l, Z);
- gmove(&nod, &nod1);
- if(v < 0)
- gopcode(OSUB, n->type, nodfconst(-v), &nod1);
- else
- gopcode(OADD, n->type, nodfconst(v), &nod1);
- gmove(&nod1, &nod);
- regfree(&nod1);
- } else
- gopcode(OADD, n->type, nodconst(v), &nod);
- if(nn != Z)
- gmove(&nod, nn);
- if(hardleft)
- regfree(&nod);
- break;
-
- bitinc:
- if(nn != Z && (o == OPOSTINC || o == OPOSTDEC)) {
- bitload(l, &nod, &nod1, &nod2, Z);
- gmove(&nod, nn);
- gopcode(OADD, tfield, nodconst(v), &nod);
- bitstore(l, &nod, &nod1, &nod2, Z);
- break;
- }
- bitload(l, &nod, &nod1, &nod2, nn);
- gopcode(OADD, tfield, nodconst(v), &nod);
- bitstore(l, &nod, &nod1, &nod2, nn);
- break;
- }
-done:
- cursafe = curs;
-}
-
-void
-reglcgen(Node *t, Node *n, Node *nn)
-{
- Node *r;
- int32 v;
-
- regialloc(t, n, nn);
- if(n->op == OIND) {
- r = n->left;
- while(r->op == OADD)
- r = r->right;
- if(sconst(r)) {
- v = r->vconst;
- r->vconst = 0;
- lcgen(n, t);
- t->xoffset += v;
- r->vconst = v;
- regind(t, n);
- return;
- }
- }
- lcgen(n, t);
- regind(t, n);
-}
-
-void
-lcgen(Node *n, Node *nn)
-{
- Prog *p1;
- Node nod;
-
- if(debug['g']) {
- prtree(nn, "lcgen lhs");
- prtree(n, "lcgen");
- }
- if(n == Z || n->type == T)
- return;
- if(nn == Z) {
- nn = &nod;
- regalloc(&nod, n, Z);
- }
- switch(n->op) {
- default:
- if(n->addable < INDEXED) {
- diag(n, "unknown op in lcgen: %O", n->op);
- break;
- }
- gopcode(OADDR, n->type, n, nn);
- break;
-
- case OCOMMA:
- cgen(n->left, n->left);
- lcgen(n->right, nn);
- break;
-
- case OIND:
- cgen(n->left, nn);
- break;
-
- case OCOND:
- bcgen(n->left, 1);
- p1 = p;
- lcgen(n->right->left, nn);
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- lcgen(n->right->right, nn);
- patch(p1, pc);
- break;
- }
-}
-
-void
-bcgen(Node *n, int true)
-{
-
- if(n->type == T)
- gbranch(OGOTO);
- else
- boolgen(n, true, Z);
-}
-
-void
-boolgen(Node *n, int true, Node *nn)
-{
- int o;
- Prog *p1, *p2, *p3;
- Node *l, *r, nod, nod1;
- int32 curs;
-
- if(debug['g']) {
- print("boolgen %d\n", true);
- prtree(nn, "boolgen lhs");
- prtree(n, "boolgen");
- }
- curs = cursafe;
- l = n->left;
- r = n->right;
- switch(n->op) {
-
- default:
- o = ONE;
- if(true)
- o = OEQ;
- /* bad, 13 is address of external that becomes constant */
- if(n->addable >= INDEXED && n->addable != 13) {
- if(typefd[n->type->etype]) {
- regalloc(&nod1, n, Z);
- gmove(nodfconst(0.0), &nod1); /* TO DO: FREGZERO */
- gopcode(o, n->type, n, &nod1);
- regfree(&nod1);
- } else
- gopcode(o, n->type, n, nodconst(0));
- goto com;
- }
- regalloc(&nod, n, nn);
- cgen(n, &nod);
- if(typefd[n->type->etype]) {
- regalloc(&nod1, n, Z);
- gmove(nodfconst(0.0), &nod1); /* TO DO: FREGZERO */
- gopcode(o, n->type, &nod, &nod1);
- regfree(&nod1);
- } else
- gopcode(o, n->type, &nod, nodconst(0));
- regfree(&nod);
- goto com;
-
- case OCONST:
- o = vconst(n);
- if(!true)
- o = !o;
- gbranch(OGOTO);
- if(o) {
- p1 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- }
- goto com;
-
- case OCOMMA:
- cgen(l, Z);
- boolgen(r, true, nn);
- break;
-
- case ONOT:
- boolgen(l, !true, nn);
- break;
-
- case OCOND:
- bcgen(l, 1);
- p1 = p;
- bcgen(r->left, true);
- p2 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- bcgen(r->right, !true);
- patch(p2, pc);
- p2 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- patch(p2, pc);
- goto com;
-
- case OANDAND:
- if(!true)
- goto caseor;
-
- caseand:
- bcgen(l, true);
- p1 = p;
- bcgen(r, !true);
- p2 = p;
- patch(p1, pc);
- gbranch(OGOTO);
- patch(p2, pc);
- goto com;
-
- case OOROR:
- if(!true)
- goto caseand;
-
- caseor:
- bcgen(l, !true);
- p1 = p;
- bcgen(r, !true);
- p2 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- patch(p2, pc);
- goto com;
-
- case OEQ:
- case ONE:
- case OLE:
- case OLT:
- case OGE:
- case OGT:
- case OHI:
- case OHS:
- case OLO:
- case OLS:
- o = n->op;
- if(true && typefd[l->type->etype] && (o == OEQ || o == ONE)) {
- // Cannot rewrite !(l == r) into l != r with float64; it breaks NaNs.
- // Jump around instead.
- boolgen(n, 0, Z);
- p1 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- goto com;
- }
- if(true)
- o = comrel[relindex(o)];
- if(l->complex >= FNX && r->complex >= FNX) {
- regret(&nod, r, 0, 0);
- cgen(r, &nod);
- regsalloc(&nod1, r);
- gmove(&nod, &nod1);
- regfree(&nod);
- nod = *n;
- nod.right = &nod1;
- boolgen(&nod, true, nn);
- break;
- }
- if(immconst(l)) {
- // NOTE: Reversing the comparison here is wrong
- // for floating point ordering comparisons involving NaN,
- // but we don't have any of those yet so we don't
- // bother worrying about it.
- o = invrel[relindex(o)];
- /* bad, 13 is address of external that becomes constant */
- if(r->addable < INDEXED || r->addable == 13) {
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- gopcode(o, l->type, &nod, l);
- regfree(&nod);
- } else
- gopcode(o, l->type, r, l);
- goto com;
- }
- if(typefd[l->type->etype])
- o = invrel[relindex(logrel[relindex(o)])];
- if(l->complex >= r->complex) {
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- if(r->addable < INDEXED || hardconst(r) || typefd[l->type->etype]) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- gopcode(o, l->type, &nod, &nod1);
- regfree(&nod1);
- } else {
- gopcode(o, l->type, &nod, r);
- }
- regfree(&nod);
- goto fixfloat;
- }
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- if(l->addable < INDEXED || l->addable == 13 || hardconst(l)) {
- regalloc(&nod1, l, Z);
- cgen(l, &nod1);
- if(typechl[l->type->etype] && ewidth[l->type->etype] <= ewidth[TINT])
- gopcode(o, types[TINT], &nod1, &nod);
- else
- gopcode(o, l->type, &nod1, &nod);
- regfree(&nod1);
- } else
- gopcode(o, l->type, l, &nod);
- regfree(&nod);
- fixfloat:
- if(typefd[l->type->etype]) {
- switch(o) {
- case OEQ:
- // Already emitted AJEQ; want AJEQ and AJPC.
- p1 = p;
- gbranch(OGOTO);
- p2 = p;
- patch(p1, pc);
- gins(AJPC, Z, Z);
- patch(p2, pc);
- break;
-
- case ONE:
- // Already emitted AJNE; want AJNE or AJPS.
- p1 = p;
- gins(AJPS, Z, Z);
- p2 = p;
- gbranch(OGOTO);
- p3 = p;
- patch(p1, pc);
- patch(p2, pc);
- gbranch(OGOTO);
- patch(p3, pc);
- break;
- }
- }
-
- com:
- if(nn != Z) {
- p1 = p;
- gmove(nodconst(1L), nn);
- gbranch(OGOTO);
- p2 = p;
- patch(p1, pc);
- gmove(nodconst(0L), nn);
- patch(p2, pc);
- }
- break;
- }
- cursafe = curs;
-}
-
-void
-sugen(Node *n, Node *nn, int32 w)
-{
- Prog *p1;
- Node nod0, nod1, nod2, nod3, nod4, *l, *r;
- Type *t;
- int c, mt, mo;
- vlong o0, o1;
-
- if(n == Z || n->type == T)
- return;
- if(debug['g']) {
- prtree(nn, "sugen lhs");
- prtree(n, "sugen");
- }
- if(nn == nodrat)
- if(w > nrathole)
- nrathole = w;
- switch(n->op) {
- case OIND:
- if(nn == Z) {
- nullwarn(n->left, Z);
- break;
- }
-
- default:
- goto copy;
-
- case OCONST:
- goto copy;
-
- case ODOT:
- l = n->left;
- sugen(l, nodrat, l->type->width);
- if(nn == Z)
- break;
- warn(n, "non-interruptable temporary");
- nod1 = *nodrat;
- r = n->right;
- if(!r || r->op != OCONST) {
- diag(n, "DOT and no offset");
- break;
- }
- nod1.xoffset += (int32)r->vconst;
- nod1.type = n->type;
- sugen(&nod1, nn, w);
- break;
-
- case OSTRUCT:
- /*
- * rewrite so lhs has no fn call
- */
- if(nn != Z && side(nn)) {
- nod1 = *n;
- nod1.type = typ(TIND, n->type);
- regret(&nod2, &nod1, 0, 0);
- lcgen(nn, &nod2);
- regsalloc(&nod0, &nod1);
- cgen(&nod2, &nod0);
- regfree(&nod2);
-
- nod1 = *n;
- nod1.op = OIND;
- nod1.left = &nod0;
- nod1.right = Z;
- nod1.complex = 1;
-
- sugen(n, &nod1, w);
- return;
- }
-
- r = n->left;
- for(t = n->type->link; t != T; t = t->down) {
- l = r;
- if(r->op == OLIST) {
- l = r->left;
- r = r->right;
- }
- if(nn == Z) {
- cgen(l, nn);
- continue;
- }
- /*
- * hand craft *(&nn + o) = l
- */
- nod0 = znode;
- nod0.op = OAS;
- nod0.type = t;
- nod0.left = &nod1;
- nod0.right = nil;
-
- nod1 = znode;
- nod1.op = OIND;
- nod1.type = t;
- nod1.left = &nod2;
-
- nod2 = znode;
- nod2.op = OADD;
- nod2.type = typ(TIND, t);
- nod2.left = &nod3;
- nod2.right = &nod4;
-
- nod3 = znode;
- nod3.op = OADDR;
- nod3.type = nod2.type;
- nod3.left = nn;
-
- nod4 = znode;
- nod4.op = OCONST;
- nod4.type = nod2.type;
- nod4.vconst = t->offset;
-
- ccom(&nod0);
- acom(&nod0);
- xcom(&nod0);
- nod0.addable = 0;
- nod0.right = l;
-
- // prtree(&nod0, "hand craft");
- cgen(&nod0, Z);
- }
- break;
-
- case OAS:
- if(nn == Z) {
- if(n->addable < INDEXED)
- sugen(n->right, n->left, w);
- break;
- }
-
- sugen(n->right, nodrat, w);
- warn(n, "non-interruptable temporary");
- sugen(nodrat, n->left, w);
- sugen(nodrat, nn, w);
- break;
-
- case OFUNC:
- if(!hasdotdotdot(n->left->type)) {
- cgen(n, Z);
- if(nn != Z) {
- curarg -= n->type->width;
- regret(&nod1, n, n->left->type, 1);
- if(nn->complex >= FNX) {
- regsalloc(&nod2, n);
- cgen(&nod1, &nod2);
- nod1 = nod2;
- }
- cgen(&nod1, nn);
- }
- break;
- }
- if(nn == Z) {
- sugen(n, nodrat, w);
- break;
- }
- if(nn->op != OIND) {
- nn = new1(OADDR, nn, Z);
- nn->type = types[TIND];
- nn->addable = 0;
- } else
- nn = nn->left;
- n = new(OFUNC, n->left, new(OLIST, nn, n->right));
- n->type = types[TVOID];
- n->left->type = types[TVOID];
- cgen(n, Z);
- break;
-
- case OCOND:
- bcgen(n->left, 1);
- p1 = p;
- sugen(n->right->left, nn, w);
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- sugen(n->right->right, nn, w);
- patch(p1, pc);
- break;
-
- case OCOMMA:
- cgen(n->left, Z);
- sugen(n->right, nn, w);
- break;
- }
- return;
-
-copy:
- if(nn == Z) {
- switch(n->op) {
- case OASADD:
- case OASSUB:
- case OASAND:
- case OASOR:
- case OASXOR:
-
- case OASMUL:
- case OASLMUL:
-
-
- case OASASHL:
- case OASASHR:
- case OASLSHR:
- break;
-
- case OPOSTINC:
- case OPOSTDEC:
- case OPREINC:
- case OPREDEC:
- break;
-
- default:
- return;
- }
- }
-
- if(n->complex >= FNX && nn != nil && nn->complex >= FNX) {
- t = nn->type;
- nn->type = types[TIND];
- regialloc(&nod1, nn, Z);
- lcgen(nn, &nod1);
- regsalloc(&nod2, nn);
- nn->type = t;
-
- gins(AMOVQ, &nod1, &nod2);
- regfree(&nod1);
-
- nod2.type = typ(TIND, t);
-
- nod1 = nod2;
- nod1.op = OIND;
- nod1.left = &nod2;
- nod1.right = Z;
- nod1.complex = 1;
- nod1.type = t;
-
- sugen(n, &nod1, w);
- return;
- }
-
- if(w <= 32) {
- c = cursafe;
- if(n->left != Z && n->left->complex >= FNX
- && n->right != Z && n->right->complex >= FNX) {
- regsalloc(&nod1, n->right);
- cgen(n->right, &nod1);
- nod2 = *n;
- nod2.right = &nod1;
- cgen(&nod2, nn);
- cursafe = c;
- return;
- }
- if(w & 7) {
- mt = TLONG;
- mo = AMOVL;
- } else {
- mt = TVLONG;
- mo = AMOVQ;
- }
- if(n->complex > nn->complex) {
- t = n->type;
- n->type = types[mt];
- regalloc(&nod0, n, Z);
- if(!vaddr(n, 0)) {
- reglcgen(&nod1, n, Z);
- n->type = t;
- n = &nod1;
- }
- else
- n->type = t;
-
- t = nn->type;
- nn->type = types[mt];
- if(!vaddr(nn, 0)) {
- reglcgen(&nod2, nn, Z);
- nn->type = t;
- nn = &nod2;
- }
- else
- nn->type = t;
- } else {
- t = nn->type;
- nn->type = types[mt];
- regalloc(&nod0, nn, Z);
- if(!vaddr(nn, 0)) {
- reglcgen(&nod2, nn, Z);
- nn->type = t;
- nn = &nod2;
- }
- else
- nn->type = t;
-
- t = n->type;
- n->type = types[mt];
- if(!vaddr(n, 0)) {
- reglcgen(&nod1, n, Z);
- n->type = t;
- n = &nod1;
- }
- else
- n->type = t;
- }
- o0 = n->xoffset;
- o1 = nn->xoffset;
- w /= ewidth[mt];
- while(--w >= 0) {
- gins(mo, n, &nod0);
- gins(mo, &nod0, nn);
- n->xoffset += ewidth[mt];
- nn->xoffset += ewidth[mt];
- }
- n->xoffset = o0;
- nn->xoffset = o1;
- if(nn == &nod2)
- regfree(&nod2);
- if(n == &nod1)
- regfree(&nod1);
- regfree(&nod0);
- return;
- }
-
- /* botch, need to save in .safe */
- c = 0;
- if(n->complex > nn->complex) {
- t = n->type;
- n->type = types[TIND];
- nodreg(&nod1, n, D_SI);
- if(reg[D_SI]) {
- gins(APUSHQ, &nod1, Z);
- c |= 1;
- reg[D_SI]++;
- }
- lcgen(n, &nod1);
- n->type = t;
-
- t = nn->type;
- nn->type = types[TIND];
- nodreg(&nod2, nn, D_DI);
- if(reg[D_DI]) {
-warn(Z, "DI botch");
- gins(APUSHQ, &nod2, Z);
- c |= 2;
- reg[D_DI]++;
- }
- lcgen(nn, &nod2);
- nn->type = t;
- } else {
- t = nn->type;
- nn->type = types[TIND];
- nodreg(&nod2, nn, D_DI);
- if(reg[D_DI]) {
-warn(Z, "DI botch");
- gins(APUSHQ, &nod2, Z);
- c |= 2;
- reg[D_DI]++;
- }
- lcgen(nn, &nod2);
- nn->type = t;
-
- t = n->type;
- n->type = types[TIND];
- nodreg(&nod1, n, D_SI);
- if(reg[D_SI]) {
- gins(APUSHQ, &nod1, Z);
- c |= 1;
- reg[D_SI]++;
- }
- lcgen(n, &nod1);
- n->type = t;
- }
- nodreg(&nod3, n, D_CX);
- if(reg[D_CX]) {
- gins(APUSHQ, &nod3, Z);
- c |= 4;
- reg[D_CX]++;
- }
- gins(AMOVL, nodconst(w/SZ_INT), &nod3);
- gins(ACLD, Z, Z);
- gins(AREP, Z, Z);
- gins(AMOVSL, Z, Z);
- if(c & 4) {
- gins(APOPQ, Z, &nod3);
- reg[D_CX]--;
- }
- if(c & 2) {
- gins(APOPQ, Z, &nod2);
- reg[nod2.reg]--;
- }
- if(c & 1) {
- gins(APOPQ, Z, &nod1);
- reg[nod1.reg]--;
- }
-}
-
-/*
- * TO DO
- */
-void
-layout(Node *f, Node *t, int c, int cv, Node *cn)
-{
- Node t1, t2;
-
- while(c > 3) {
- layout(f, t, 2, 0, Z);
- c -= 2;
- }
-
- regalloc(&t1, &lregnode, Z);
- regalloc(&t2, &lregnode, Z);
- if(c > 0) {
- gmove(f, &t1);
- f->xoffset += SZ_INT;
- }
- if(cn != Z)
- gmove(nodconst(cv), cn);
- if(c > 1) {
- gmove(f, &t2);
- f->xoffset += SZ_INT;
- }
- if(c > 0) {
- gmove(&t1, t);
- t->xoffset += SZ_INT;
- }
- if(c > 2) {
- gmove(f, &t1);
- f->xoffset += SZ_INT;
- }
- if(c > 1) {
- gmove(&t2, t);
- t->xoffset += SZ_INT;
- }
- if(c > 2) {
- gmove(&t1, t);
- t->xoffset += SZ_INT;
- }
- regfree(&t1);
- regfree(&t2);
-}
-
-/*
- * constant is not vlong or fits as 32-bit signed immediate
- */
-int
-immconst(Node *n)
-{
- int32 v;
-
- if(n->op != OCONST || !typechlpv[n->type->etype])
- return 0;
- if(typechl[n->type->etype])
- return 1;
- v = n->vconst;
- return n->vconst == (vlong)v;
-}
-
-/*
- * if a constant and vlong, doesn't fit as 32-bit signed immediate
- */
-int
-hardconst(Node *n)
-{
- return n->op == OCONST && !immconst(n);
-}
-
-/*
- * casting up to t2 covers an intermediate cast to t1
- */
-int
-castup(Type *t1, Type *t2)
-{
- int ft;
-
- if(!nilcast(t1, t2))
- return 0;
- /* known to be small to large */
- ft = t1->etype;
- switch(t2->etype){
- case TINT:
- case TLONG:
- return ft == TLONG || ft == TINT || ft == TSHORT || ft == TCHAR;
- case TUINT:
- case TULONG:
- return ft == TULONG || ft == TUINT || ft == TUSHORT || ft == TUCHAR;
- case TVLONG:
- return ft == TLONG || ft == TINT || ft == TSHORT;
- case TUVLONG:
- return ft == TULONG || ft == TUINT || ft == TUSHORT;
- }
- return 0;
-}
-
-void
-zeroregm(Node *n)
-{
- gins(AMOVL, nodconst(0), n);
-}
-
-/* do we need to load the address of a vlong? */
-int
-vaddr(Node *n, int a)
-{
- switch(n->op) {
- case ONAME:
- if(a)
- return 1;
- return !(n->class == CEXTERN || n->class == CGLOBL || n->class == CSTATIC);
-
- case OCONST:
- case OREGISTER:
- case OINDREG:
- return 1;
- }
- return 0;
-}
-
-int32
-hi64v(Node *n)
-{
- if(align(0, types[TCHAR], Aarg1, nil)) /* isbigendian */
- return (int32)(n->vconst) & ~0L;
- else
- return (int32)((uvlong)n->vconst>>32) & ~0L;
-}
-
-int32
-lo64v(Node *n)
-{
- if(align(0, types[TCHAR], Aarg1, nil)) /* isbigendian */
- return (int32)((uvlong)n->vconst>>32) & ~0L;
- else
- return (int32)(n->vconst) & ~0L;
-}
-
-Node *
-hi64(Node *n)
-{
- return nodconst(hi64v(n));
-}
-
-Node *
-lo64(Node *n)
-{
- return nodconst(lo64v(n));
-}
-
-int
-cond(int op)
-{
- switch(op) {
- case OANDAND:
- case OOROR:
- case ONOT:
- return 1;
-
- case OEQ:
- case ONE:
- case OLE:
- case OLT:
- case OGE:
- case OGT:
- case OHI:
- case OHS:
- case OLO:
- case OLS:
- return 1;
- }
- return 0;
-}
diff --git a/src/cmd/6c/div.c b/src/cmd/6c/div.c
deleted file mode 100644
index bad6c5e27..000000000
--- a/src/cmd/6c/div.c
+++ /dev/null
@@ -1,236 +0,0 @@
-// Inferno utils/6c/div.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/div.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-/*
- * Based on: Granlund, T.; Montgomery, P.L.
- * "Division by Invariant Integers using Multiplication".
- * SIGPLAN Notices, Vol. 29, June 1994, page 61.
- */
-
-#define TN(n) ((uvlong)1 << (n))
-#define T31 TN(31)
-#define T32 TN(32)
-
-int
-multiplier(uint32 d, int p, uvlong *mp)
-{
- int l;
- uvlong mlo, mhi, tlo, thi;
-
- l = topbit(d - 1) + 1;
- mlo = (((TN(l) - d) << 32) / d) + T32;
- if(l + p == 64)
- mhi = (((TN(l) + 1 - d) << 32) / d) + T32;
- else
- mhi = (TN(32 + l) + TN(32 + l - p)) / d;
- /*assert(mlo < mhi);*/
- while(l > 0) {
- tlo = mlo >> 1;
- thi = mhi >> 1;
- if(tlo == thi)
- break;
- mlo = tlo;
- mhi = thi;
- l--;
- }
- *mp = mhi;
- return l;
-}
-
-int
-sdiv(uint32 d, uint32 *mp, int *sp)
-{
- int s;
- uvlong m;
-
- s = multiplier(d, 32 - 1, &m);
- *mp = m;
- *sp = s;
- if(m >= T31)
- return 1;
- else
- return 0;
-}
-
-int
-udiv(uint32 d, uint32 *mp, int *sp, int *pp)
-{
- int p, s;
- uvlong m;
-
- s = multiplier(d, 32, &m);
- p = 0;
- if(m >= T32) {
- while((d & 1) == 0) {
- d >>= 1;
- p++;
- }
- s = multiplier(d, 32 - p, &m);
- }
- *mp = m;
- *pp = p;
- if(m >= T32) {
- /*assert(p == 0);*/
- *sp = s - 1;
- return 1;
- }
- else {
- *sp = s;
- return 0;
- }
-}
-
-void
-sdivgen(Node *l, Node *r, Node *ax, Node *dx)
-{
- int a, s;
- uint32 m;
- vlong c;
-
- c = r->vconst;
- if(c < 0)
- c = -c;
- a = sdiv(c, &m, &s);
-//print("a=%d i=%d s=%d m=%ux\n", a, (long)r->vconst, s, m);
- gins(AMOVL, nodconst(m), ax);
- gins(AIMULL, l, Z);
- gins(AMOVL, l, ax);
- if(a)
- gins(AADDL, ax, dx);
- gins(ASHRL, nodconst(31), ax);
- gins(ASARL, nodconst(s), dx);
- gins(AADDL, ax, dx);
- if(r->vconst < 0)
- gins(ANEGL, Z, dx);
-}
-
-void
-udivgen(Node *l, Node *r, Node *ax, Node *dx)
-{
- int a, s, t;
- uint32 m;
- Node nod;
-
- a = udiv(r->vconst, &m, &s, &t);
-//print("a=%ud i=%d p=%d s=%d m=%ux\n", a, (long)r->vconst, t, s, m);
- if(t != 0) {
- gins(AMOVL, l, ax);
- gins(ASHRL, nodconst(t), ax);
- gins(AMOVL, nodconst(m), dx);
- gins(AMULL, dx, Z);
- }
- else if(a) {
- if(l->op != OREGISTER) {
- regalloc(&nod, l, Z);
- gins(AMOVL, l, &nod);
- l = &nod;
- }
- gins(AMOVL, nodconst(m), ax);
- gins(AMULL, l, Z);
- gins(AADDL, l, dx);
- gins(ARCRL, nodconst(1), dx);
- if(l == &nod)
- regfree(l);
- }
- else {
- gins(AMOVL, nodconst(m), ax);
- gins(AMULL, l, Z);
- }
- if(s != 0)
- gins(ASHRL, nodconst(s), dx);
-}
-
-void
-sext(Node *d, Node *s, Node *l)
-{
- if(s->reg == D_AX && !nodreg(d, Z, D_DX)) {
- reg[D_DX]++;
- gins(ACDQ, Z, Z);
- }
- else {
- regalloc(d, l, Z);
- gins(AMOVL, s, d);
- gins(ASARL, nodconst(31), d);
- }
-}
-
-void
-sdiv2(int32 c, int v, Node *l, Node *n)
-{
- Node nod;
-
- if(v > 0) {
- if(v > 1) {
- sext(&nod, n, l);
- gins(AANDL, nodconst((1 << v) - 1), &nod);
- gins(AADDL, &nod, n);
- regfree(&nod);
- }
- else {
- gins(ACMPL, n, nodconst(0x80000000));
- gins(ASBBL, nodconst(-1), n);
- }
- gins(ASARL, nodconst(v), n);
- }
- if(c < 0)
- gins(ANEGL, Z, n);
-}
-
-void
-smod2(int32 c, int v, Node *l, Node *n)
-{
- Node nod;
-
- if(c == 1) {
- zeroregm(n);
- return;
- }
-
- sext(&nod, n, l);
- if(v == 0) {
- zeroregm(n);
- gins(AXORL, &nod, n);
- gins(ASUBL, &nod, n);
- }
- else if(v > 1) {
- gins(AANDL, nodconst((1 << v) - 1), &nod);
- gins(AADDL, &nod, n);
- gins(AANDL, nodconst((1 << v) - 1), n);
- gins(ASUBL, &nod, n);
- }
- else {
- gins(AANDL, nodconst(1), n);
- gins(AXORL, &nod, n);
- gins(ASUBL, &nod, n);
- }
- regfree(&nod);
-}
diff --git a/src/cmd/6c/doc.go b/src/cmd/6c/doc.go
deleted file mode 100644
index e0a22e78b..000000000
--- a/src/cmd/6c/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-
-6c is a version of the Plan 9 C compiler. The original is documented at
-
- http://plan9.bell-labs.com/magic/man2html/1/8c
-
-Its target architecture is the x86-64, referred to by these tools as amd64.
-
-*/
-package main
diff --git a/src/cmd/6c/gc.h b/src/cmd/6c/gc.h
deleted file mode 100644
index aa9d95d21..000000000
--- a/src/cmd/6c/gc.h
+++ /dev/null
@@ -1,359 +0,0 @@
-// Inferno utils/6c/gc.h
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/gc.h
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "../cc/cc.h"
-#include "../6l/6.out.h"
-
-/*
- * 6c/amd64
- * Intel 386 with AMD64 extensions
- */
-#define SZ_CHAR 1
-#define SZ_SHORT 2
-#define SZ_INT 4
-#define SZ_LONG 4
-#define SZ_IND 8
-#define SZ_FLOAT 4
-#define SZ_VLONG 8
-#define SZ_DOUBLE 8
-#define FNX 100
-
-typedef struct Case Case;
-typedef struct C1 C1;
-typedef struct Reg Reg;
-typedef struct Rgn Rgn;
-typedef struct Renv Renv;
-
-EXTERN struct
-{
- Node* regtree;
- Node* basetree;
- short scale;
- short reg;
- short ptr;
-} idx;
-
-#define INDEXED 9
-
-#define A ((Addr*)0)
-#define P ((Prog*)0)
-
-struct Case
-{
- Case* link;
- vlong val;
- int32 label;
- char def;
- char isv;
-};
-#define C ((Case*)0)
-
-struct C1
-{
- vlong val;
- int32 label;
-};
-
-struct Reg
-{
- int32 pc;
- int32 rpo; /* reverse post ordering */
-
- Bits set;
- Bits use1;
- Bits use2;
-
- Bits refbehind;
- Bits refahead;
- Bits calbehind;
- Bits calahead;
- Bits regdiff;
- Bits act;
-
- int32 regu;
- int32 loop; /* could be shorter */
-
- Reg* log5;
- int32 active;
-
- Reg* p1;
- Reg* p2;
- Reg* p2link;
- Reg* s1;
- Reg* s2;
- Reg* link;
- Prog* prog;
-};
-#define R ((Reg*)0)
-
-struct Renv
-{
- int safe;
- Node base;
- Node* saved;
- Node* scope;
-};
-
-#define NRGN 600
-struct Rgn
-{
- Reg* enter;
- short cost;
- short varno;
- short regno;
-};
-
-EXTERN int32 breakpc;
-EXTERN int32 nbreak;
-EXTERN Case* cases;
-EXTERN Node constnode;
-EXTERN Node fconstnode;
-EXTERN Node vconstnode;
-EXTERN int32 continpc;
-EXTERN int32 curarg;
-EXTERN int32 cursafe;
-EXTERN Prog* lastp;
-EXTERN int32 maxargsafe;
-EXTERN int mnstring;
-EXTERN Node* nodrat;
-EXTERN Node* nodret;
-EXTERN Node* nodsafe;
-EXTERN int32 nrathole;
-EXTERN int32 nstring;
-EXTERN Prog* p;
-EXTERN int32 pc;
-EXTERN Node lregnode;
-EXTERN Node qregnode;
-EXTERN char string[NSNAME];
-EXTERN Sym* symrathole;
-EXTERN Node znode;
-EXTERN Prog zprog;
-EXTERN int reg[D_NONE];
-EXTERN int32 exregoffset;
-EXTERN int32 exfregoffset;
-EXTERN uchar typechlpv[NTYPE];
-
-#define BLOAD(r) band(bnot(r->refbehind), r->refahead)
-#define BSTORE(r) band(bnot(r->calbehind), r->calahead)
-#define LOAD(r) (~r->refbehind.b[z] & r->refahead.b[z])
-#define STORE(r) (~r->calbehind.b[z] & r->calahead.b[z])
-
-#define bset(a,n) ((a).b[(n)/32]&(1L<<(n)%32))
-
-#define CLOAD 5
-#define CREF 5
-#define CINF 1000
-#define LOOP 3
-
-EXTERN Rgn region[NRGN];
-EXTERN Rgn* rgp;
-EXTERN int nregion;
-EXTERN int nvar;
-
-EXTERN Bits externs;
-EXTERN Bits params;
-EXTERN Bits consts;
-EXTERN Bits addrs;
-
-EXTERN int32 regbits;
-EXTERN int32 exregbits;
-
-EXTERN int change;
-EXTERN int suppress;
-
-EXTERN Reg* firstr;
-EXTERN Reg* lastr;
-EXTERN Reg zreg;
-EXTERN Reg* freer;
-EXTERN int32* idom;
-EXTERN Reg** rpo2r;
-EXTERN int32 maxnr;
-
-extern char* anames[];
-
-/*
- * sgen.c
- */
-void codgen(Node*, Node*);
-void gen(Node*);
-void noretval(int);
-void usedset(Node*, int);
-void xcom(Node*);
-void indx(Node*);
-int bcomplex(Node*, Node*);
-Prog* gtext(Sym*, int32);
-vlong argsize(int);
-
-/*
- * cgen.c
- */
-void zeroregm(Node*);
-void cgen(Node*, Node*);
-void reglcgen(Node*, Node*, Node*);
-void lcgen(Node*, Node*);
-void bcgen(Node*, int);
-void boolgen(Node*, int, Node*);
-void sugen(Node*, Node*, int32);
-int needreg(Node*, int);
-int hardconst(Node*);
-int immconst(Node*);
-
-/*
- * txt.c
- */
-void ginit(void);
-void gclean(void);
-void nextpc(void);
-void gargs(Node*, Node*, Node*);
-void garg1(Node*, Node*, Node*, int, Node**);
-Node* nodconst(int32);
-Node* nodfconst(double);
-Node* nodgconst(vlong, Type*);
-int nodreg(Node*, Node*, int);
-int isreg(Node*, int);
-void regret(Node*, Node*, Type*, int);
-void regalloc(Node*, Node*, Node*);
-void regfree(Node*);
-void regialloc(Node*, Node*, Node*);
-void regsalloc(Node*, Node*);
-void regaalloc1(Node*, Node*);
-void regaalloc(Node*, Node*);
-void regind(Node*, Node*);
-void gprep(Node*, Node*);
-void naddr(Node*, Addr*);
-void gcmp(int, Node*, vlong);
-void gmove(Node*, Node*);
-void gins(int a, Node*, Node*);
-void gopcode(int, Type*, Node*, Node*);
-int samaddr(Node*, Node*);
-void gbranch(int);
-void patch(Prog*, int32);
-int sconst(Node*);
-void gpseudo(int, Sym*, Node*);
-void gprefetch(Node*);
-void gpcdata(int, int);
-
-/*
- * swt.c
- */
-int swcmp(const void*, const void*);
-void doswit(Node*);
-void swit1(C1*, int, int32, Node*);
-void swit2(C1*, int, int32, Node*);
-void newcase(void);
-void bitload(Node*, Node*, Node*, Node*, Node*);
-void bitstore(Node*, Node*, Node*, Node*, Node*);
-int32 outstring(char*, int32);
-void nullwarn(Node*, Node*);
-void sextern(Sym*, Node*, int32, int32);
-void gextern(Sym*, Node*, int32, int32);
-void outcode(void);
-
-/*
- * list
- */
-void listinit(void);
-
-/*
- * reg.c
- */
-Reg* rega(void);
-int rcmp(const void*, const void*);
-void regopt(Prog*);
-void addmove(Reg*, int, int, int);
-Bits mkvar(Reg*, Addr*);
-void prop(Reg*, Bits, Bits);
-void loopit(Reg*, int32);
-void synch(Reg*, Bits);
-uint32 allreg(uint32, Rgn*);
-void paint1(Reg*, int);
-uint32 paint2(Reg*, int);
-void paint3(Reg*, int, int32, int);
-void addreg(Addr*, int);
-
-/*
- * peep.c
- */
-void peep(void);
-void excise(Reg*);
-Reg* uniqp(Reg*);
-Reg* uniqs(Reg*);
-int regtyp(Addr*);
-int anyvar(Addr*);
-int subprop(Reg*);
-int copyprop(Reg*);
-int copy1(Addr*, Addr*, Reg*, int);
-int copyu(Prog*, Addr*, Addr*);
-
-int copyas(Addr*, Addr*);
-int copyau(Addr*, Addr*);
-int copysub(Addr*, Addr*, Addr*, int);
-int copysub1(Prog*, Addr*, Addr*, int);
-
-int32 RtoB(int);
-int32 FtoB(int);
-int BtoR(int32);
-int BtoF(int32);
-
-#define D_HI D_NONE
-#define D_LO D_NONE
-
-/*
- * bound
- */
-void comtarg(void);
-
-/*
- * com64
- */
-int cond(int);
-int com64(Node*);
-void com64init(void);
-void bool64(Node*);
-int32 lo64v(Node*);
-int32 hi64v(Node*);
-Node* lo64(Node*);
-Node* hi64(Node*);
-
-/*
- * div/mul
- */
-void sdivgen(Node*, Node*, Node*, Node*);
-void udivgen(Node*, Node*, Node*, Node*);
-void sdiv2(int32, int, Node*, Node*);
-void smod2(int32, int, Node*, Node*);
-void mulgen(Type*, Node*, Node*);
-void genmuladd(Node*, Node*, int, Node*);
-void shiftit(Type*, Node*, Node*);
-
-#define D_X7 (D_X0+7)
-
-void fgopcode(int, Node*, Node*, int, int);
diff --git a/src/cmd/6c/list.c b/src/cmd/6c/list.c
deleted file mode 100644
index 28f5b8df7..000000000
--- a/src/cmd/6c/list.c
+++ /dev/null
@@ -1,38 +0,0 @@
-// Inferno utils/6c/list.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/list.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#define EXTERN
-#include "gc.h"
-
-void
-listinit(void)
-{
- listinit6();
-}
diff --git a/src/cmd/6c/machcap.c b/src/cmd/6c/machcap.c
deleted file mode 100644
index 820d9a0aa..000000000
--- a/src/cmd/6c/machcap.c
+++ /dev/null
@@ -1,107 +0,0 @@
-// Inferno utils/6c/machcap.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/machcap.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-int
-machcap(Node *n)
-{
-
- if(n == Z)
- return 1; /* test */
-
- switch(n->op) {
- case OMUL:
- case OLMUL:
- case OASMUL:
- case OASLMUL:
- if(typechl[n->type->etype])
- return 1;
- if(typev[n->type->etype])
- return 1;
- break;
-
- case OCOM:
- case ONEG:
- case OADD:
- case OAND:
- case OOR:
- case OSUB:
- case OXOR:
- case OASHL:
- case OLSHR:
- case OASHR:
- if(typechlv[n->left->type->etype])
- return 1;
- break;
-
- case OCAST:
- return 1;
-
- case OCOND:
- case OCOMMA:
- case OLIST:
- case OANDAND:
- case OOROR:
- case ONOT:
- return 1;
-
- case OASADD:
- case OASSUB:
- case OASAND:
- case OASOR:
- case OASXOR:
- return 1;
-
- case OASASHL:
- case OASASHR:
- case OASLSHR:
- return 1;
-
- case OPOSTINC:
- case OPOSTDEC:
- case OPREINC:
- case OPREDEC:
- return 1;
-
- case OEQ:
- case ONE:
- case OLE:
- case OGT:
- case OLT:
- case OGE:
- case OHI:
- case OHS:
- case OLO:
- case OLS:
- return 1;
- }
- return 0;
-}
diff --git a/src/cmd/6c/mul.c b/src/cmd/6c/mul.c
deleted file mode 100644
index 510edc05c..000000000
--- a/src/cmd/6c/mul.c
+++ /dev/null
@@ -1,458 +0,0 @@
-// Inferno utils/6c/mul.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/mul.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-typedef struct Malg Malg;
-typedef struct Mparam Mparam;
-
-struct Malg
-{
- schar vals[10];
-};
-
-struct Mparam
-{
- uint32 value;
- schar alg;
- char neg;
- char shift;
- char arg;
- schar off;
-};
-
-static Mparam multab[32];
-static int mulptr;
-
-static Malg malgs[] =
-{
- {0, 100},
- {-1, 1, 100},
- {-9, -5, -3, 3, 5, 9, 100},
- {6, 10, 12, 18, 20, 24, 36, 40, 72, 100},
- {-8, -4, -2, 2, 4, 8, 100},
-};
-
-/*
- * return position of lowest 1
- */
-int
-lowbit(uint32 v)
-{
- int s, i;
- uint32 m;
-
- s = 0;
- m = 0xFFFFFFFFUL;
- for(i = 16; i > 0; i >>= 1) {
- m >>= i;
- if((v & m) == 0) {
- v >>= i;
- s += i;
- }
- }
- return s;
-}
-
-void
-genmuladd(Node *d, Node *s, int m, Node *a)
-{
- Node nod;
-
- nod.op = OINDEX;
- nod.left = a;
- nod.right = s;
- nod.scale = m;
- nod.type = types[TIND];
- nod.xoffset = 0;
- xcom(&nod);
- gopcode(OADDR, d->type, &nod, d);
-}
-
-void
-mulparam(uint32 m, Mparam *mp)
-{
- int c, i, j, n, o, q, s;
- int bc, bi, bn, bo, bq, bs, bt;
- schar *p;
- int32 u;
- uint32 t;
-
- bc = bq = 10;
- bi = bn = bo = bs = bt = 0;
- for(i = 0; i < nelem(malgs); i++) {
- for(p = malgs[i].vals, j = 0; (o = p[j]) < 100; j++)
- for(s = 0; s < 2; s++) {
- c = 10;
- q = 10;
- u = m - o;
- if(u == 0)
- continue;
- if(s) {
- o = -o;
- if(o > 0)
- continue;
- u = -u;
- }
- n = lowbit(u);
- t = (uint32)u >> n;
- switch(i) {
- case 0:
- if(t == 1) {
- c = s + 1;
- q = 0;
- break;
- }
- switch(t) {
- case 3:
- case 5:
- case 9:
- c = s + 1;
- if(n)
- c++;
- q = 0;
- break;
- }
- if(s)
- break;
- switch(t) {
- case 15:
- case 25:
- case 27:
- case 45:
- case 81:
- c = 2;
- if(n)
- c++;
- q = 1;
- break;
- }
- break;
- case 1:
- if(t == 1) {
- c = 3;
- q = 3;
- break;
- }
- switch(t) {
- case 3:
- case 5:
- case 9:
- c = 3;
- q = 2;
- break;
- }
- break;
- case 2:
- if(t == 1) {
- c = 3;
- q = 2;
- break;
- }
- break;
- case 3:
- if(s)
- break;
- if(t == 1) {
- c = 3;
- q = 1;
- break;
- }
- break;
- case 4:
- if(t == 1) {
- c = 3;
- q = 0;
- break;
- }
- break;
- }
- if(c < bc || (c == bc && q > bq)) {
- bc = c;
- bi = i;
- bn = n;
- bo = o;
- bq = q;
- bs = s;
- bt = t;
- }
- }
- }
- mp->value = m;
- if(bc <= 3) {
- mp->alg = bi;
- mp->shift = bn;
- mp->off = bo;
- mp->neg = bs;
- mp->arg = bt;
- }
- else
- mp->alg = -1;
-}
-
-int
-m0(int a)
-{
- switch(a) {
- case -2:
- case 2:
- return 2;
- case -3:
- case 3:
- return 2;
- case -4:
- case 4:
- return 4;
- case -5:
- case 5:
- return 4;
- case 6:
- return 2;
- case -8:
- case 8:
- return 8;
- case -9:
- case 9:
- return 8;
- case 10:
- return 4;
- case 12:
- return 2;
- case 15:
- return 2;
- case 18:
- return 8;
- case 20:
- return 4;
- case 24:
- return 2;
- case 25:
- return 4;
- case 27:
- return 2;
- case 36:
- return 8;
- case 40:
- return 4;
- case 45:
- return 4;
- case 72:
- return 8;
- case 81:
- return 8;
- }
- diag(Z, "bad m0");
- return 0;
-}
-
-int
-m1(int a)
-{
- switch(a) {
- case 15:
- return 4;
- case 25:
- return 4;
- case 27:
- return 8;
- case 45:
- return 8;
- case 81:
- return 8;
- }
- diag(Z, "bad m1");
- return 0;
-}
-
-int
-m2(int a)
-{
- switch(a) {
- case 6:
- return 2;
- case 10:
- return 2;
- case 12:
- return 4;
- case 18:
- return 2;
- case 20:
- return 4;
- case 24:
- return 8;
- case 36:
- return 4;
- case 40:
- return 8;
- case 72:
- return 8;
- }
- diag(Z, "bad m2");
- return 0;
-}
-
-void
-shiftit(Type *t, Node *s, Node *d)
-{
- int32 c;
-
- c = (int32)s->vconst & 31;
- switch(c) {
- case 0:
- break;
- case 1:
- gopcode(OADD, t, d, d);
- break;
- default:
- gopcode(OASHL, t, s, d);
- }
-}
-
-static int
-mulgen1(uint32 v, Node *n)
-{
- int i, o;
- Mparam *p;
- Node nod, nods;
-
- for(i = 0; i < nelem(multab); i++) {
- p = &multab[i];
- if(p->value == v)
- goto found;
- }
-
- p = &multab[mulptr];
- if(++mulptr == nelem(multab))
- mulptr = 0;
-
- mulparam(v, p);
-
-found:
-// print("v=%.x a=%d n=%d s=%d g=%d o=%d \n", p->value, p->alg, p->neg, p->shift, p->arg, p->off);
- if(p->alg < 0)
- return 0;
-
- nods = *nodconst(p->shift);
-
- o = OADD;
- if(p->alg > 0) {
- regalloc(&nod, n, Z);
- if(p->off < 0)
- o = OSUB;
- }
-
- switch(p->alg) {
- case 0:
- switch(p->arg) {
- case 1:
- shiftit(n->type, &nods, n);
- break;
- case 15:
- case 25:
- case 27:
- case 45:
- case 81:
- genmuladd(n, n, m1(p->arg), n);
- /* fall thru */
- case 3:
- case 5:
- case 9:
- genmuladd(n, n, m0(p->arg), n);
- shiftit(n->type, &nods, n);
- break;
- default:
- goto bad;
- }
- if(p->neg == 1)
- gins(ANEGL, Z, n);
- break;
- case 1:
- switch(p->arg) {
- case 1:
- gmove(n, &nod);
- shiftit(n->type, &nods, &nod);
- break;
- case 3:
- case 5:
- case 9:
- genmuladd(&nod, n, m0(p->arg), n);
- shiftit(n->type, &nods, &nod);
- break;
- default:
- goto bad;
- }
- if(p->neg)
- gopcode(o, n->type, &nod, n);
- else {
- gopcode(o, n->type, n, &nod);
- gmove(&nod, n);
- }
- break;
- case 2:
- genmuladd(&nod, n, m0(p->off), n);
- shiftit(n->type, &nods, n);
- goto comop;
- case 3:
- genmuladd(&nod, n, m0(p->off), n);
- shiftit(n->type, &nods, n);
- genmuladd(n, &nod, m2(p->off), n);
- break;
- case 4:
- genmuladd(&nod, n, m0(p->off), nodconst(0));
- shiftit(n->type, &nods, n);
- goto comop;
- default:
- diag(Z, "bad mul alg");
- break;
- comop:
- if(p->neg) {
- gopcode(o, n->type, n, &nod);
- gmove(&nod, n);
- }
- else
- gopcode(o, n->type, &nod, n);
- }
-
- if(p->alg > 0)
- regfree(&nod);
-
- return 1;
-
-bad:
- diag(Z, "mulgen botch");
- return 1;
-}
-
-void
-mulgen(Type *t, Node *r, Node *n)
-{
- if(!mulgen1(r->vconst, n))
- gopcode(OMUL, t, r, n);
-}
diff --git a/src/cmd/6c/peep.c b/src/cmd/6c/peep.c
deleted file mode 100644
index a11067c84..000000000
--- a/src/cmd/6c/peep.c
+++ /dev/null
@@ -1,902 +0,0 @@
-// Inferno utils/6c/peep.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-static int
-needc(Prog *p)
-{
- while(p != P) {
- switch(p->as) {
- case AADCL:
- case AADCQ:
- case ASBBL:
- case ASBBQ:
- case ARCRL:
- case ARCRQ:
- return 1;
- case AADDL:
- case AADDQ:
- case ASUBL:
- case ASUBQ:
- case AJMP:
- case ARET:
- case ACALL:
- return 0;
- default:
- if(p->to.type == D_BRANCH)
- return 0;
- }
- p = p->link;
- }
- return 0;
-}
-
-static Reg*
-rnops(Reg *r)
-{
- Prog *p;
- Reg *r1;
-
- if(r != R)
- for(;;){
- p = r->prog;
- if(p->as != ANOP || p->from.type != D_NONE || p->to.type != D_NONE)
- break;
- r1 = uniqs(r);
- if(r1 == R)
- break;
- r = r1;
- }
- return r;
-}
-
-void
-peep(void)
-{
- Reg *r, *r1, *r2;
- Prog *p, *p1;
- int t;
-
- /*
- * complete R structure
- */
- t = 0;
- for(r=firstr; r!=R; r=r1) {
- r1 = r->link;
- if(r1 == R)
- break;
- p = r->prog->link;
- while(p != r1->prog)
- switch(p->as) {
- default:
- r2 = rega();
- r->link = r2;
- r2->link = r1;
-
- r2->prog = p;
- r2->p1 = r;
- r->s1 = r2;
- r2->s1 = r1;
- r1->p1 = r2;
-
- r = r2;
- t++;
-
- case ADATA:
- case AGLOBL:
- case ANAME:
- case ASIGNAME:
- p = p->link;
- }
- }
-
- pc = 0; /* speculating it won't kill */
-
-loop1:
-
- t = 0;
- for(r=firstr; r!=R; r=r->link) {
- p = r->prog;
- switch(p->as) {
- case AMOVL:
- case AMOVQ:
- case AMOVSS:
- case AMOVSD:
- if(regtyp(&p->to))
- if(regtyp(&p->from)) {
- if(copyprop(r)) {
- excise(r);
- t++;
- } else
- if(subprop(r) && copyprop(r)) {
- excise(r);
- t++;
- }
- }
- break;
-
- case AMOVBLZX:
- case AMOVWLZX:
- case AMOVBLSX:
- case AMOVWLSX:
- if(regtyp(&p->to)) {
- r1 = rnops(uniqs(r));
- if(r1 != R) {
- p1 = r1->prog;
- if(p->as == p1->as && p->to.type == p1->from.type){
- p1->as = AMOVL;
- t++;
- }
- }
- }
- break;
-
- case AMOVBQSX:
- case AMOVBQZX:
- case AMOVWQSX:
- case AMOVWQZX:
- case AMOVLQSX:
- case AMOVLQZX:
- if(regtyp(&p->to)) {
- r1 = rnops(uniqs(r));
- if(r1 != R) {
- p1 = r1->prog;
- if(p->as == p1->as && p->to.type == p1->from.type){
- p1->as = AMOVQ;
- t++;
- }
- }
- }
- break;
-
- case AADDL:
- case AADDQ:
- case AADDW:
- if(p->from.type != D_CONST || needc(p->link))
- break;
- if(p->from.offset == -1){
- if(p->as == AADDQ)
- p->as = ADECQ;
- else if(p->as == AADDL)
- p->as = ADECL;
- else
- p->as = ADECW;
- p->from = zprog.from;
- }
- else if(p->from.offset == 1){
- if(p->as == AADDQ)
- p->as = AINCQ;
- else if(p->as == AADDL)
- p->as = AINCL;
- else
- p->as = AINCW;
- p->from = zprog.from;
- }
- break;
-
- case ASUBL:
- case ASUBQ:
- case ASUBW:
- if(p->from.type != D_CONST || needc(p->link))
- break;
- if(p->from.offset == -1) {
- if(p->as == ASUBQ)
- p->as = AINCQ;
- else if(p->as == ASUBL)
- p->as = AINCL;
- else
- p->as = AINCW;
- p->from = zprog.from;
- }
- else if(p->from.offset == 1){
- if(p->as == ASUBQ)
- p->as = ADECQ;
- else if(p->as == ASUBL)
- p->as = ADECL;
- else
- p->as = ADECW;
- p->from = zprog.from;
- }
- break;
- }
- }
- if(t)
- goto loop1;
-}
-
-void
-excise(Reg *r)
-{
- Prog *p;
-
- p = r->prog;
- p->as = ANOP;
- p->from = zprog.from;
- p->to = zprog.to;
-}
-
-Reg*
-uniqp(Reg *r)
-{
- Reg *r1;
-
- r1 = r->p1;
- if(r1 == R) {
- r1 = r->p2;
- if(r1 == R || r1->p2link != R)
- return R;
- } else
- if(r->p2 != R)
- return R;
- return r1;
-}
-
-Reg*
-uniqs(Reg *r)
-{
- Reg *r1;
-
- r1 = r->s1;
- if(r1 == R) {
- r1 = r->s2;
- if(r1 == R)
- return R;
- } else
- if(r->s2 != R)
- return R;
- return r1;
-}
-
-int
-regtyp(Addr *a)
-{
- int t;
-
- t = a->type;
- if(t >= D_AX && t <= D_R15)
- return 1;
- if(t >= D_X0 && t <= D_X0+15)
- return 1;
- return 0;
-}
-
-/*
- * the idea is to substitute
- * one register for another
- * from one MOV to another
- * MOV a, R0
- * ADD b, R0 / no use of R1
- * MOV R0, R1
- * would be converted to
- * MOV a, R1
- * ADD b, R1
- * MOV R1, R0
- * hopefully, then the former or latter MOV
- * will be eliminated by copy propagation.
- */
-int
-subprop(Reg *r0)
-{
- Prog *p;
- Addr *v1, *v2;
- Reg *r;
- int t;
-
- p = r0->prog;
- v1 = &p->from;
- if(!regtyp(v1))
- return 0;
- v2 = &p->to;
- if(!regtyp(v2))
- return 0;
- for(r=uniqp(r0); r!=R; r=uniqp(r)) {
- if(uniqs(r) == R)
- break;
- p = r->prog;
- switch(p->as) {
- case ACALL:
- return 0;
-
- case AIMULL:
- case AIMULQ:
- case AIMULW:
- if(p->to.type != D_NONE)
- break;
- goto giveup;
-
- case AROLB:
- case AROLL:
- case AROLQ:
- case AROLW:
- case ARORB:
- case ARORL:
- case ARORQ:
- case ARORW:
- case ASALB:
- case ASALL:
- case ASALQ:
- case ASALW:
- case ASARB:
- case ASARL:
- case ASARQ:
- case ASARW:
- case ASHLB:
- case ASHLL:
- case ASHLQ:
- case ASHLW:
- case ASHRB:
- case ASHRL:
- case ASHRQ:
- case ASHRW:
- if(p->from.type == D_CONST)
- break;
- goto giveup;
-
- case ADIVB:
- case ADIVL:
- case ADIVQ:
- case ADIVW:
- case AIDIVB:
- case AIDIVL:
- case AIDIVQ:
- case AIDIVW:
- case AIMULB:
- case AMULB:
- case AMULL:
- case AMULQ:
- case AMULW:
-
- case AREP:
- case AREPN:
-
- case ACWD:
- case ACDQ:
- case ACQO:
-
- case ASTOSB:
- case ASTOSL:
- case ASTOSQ:
- case AMOVSB:
- case AMOVSL:
- case AMOVSQ:
- case AMOVQL:
- giveup:
- return 0;
-
- case AMOVL:
- case AMOVQ:
- if(p->to.type == v1->type)
- goto gotit;
- break;
- }
- if(copyau(&p->from, v2) ||
- copyau(&p->to, v2))
- break;
- if(copysub(&p->from, v1, v2, 0) ||
- copysub(&p->to, v1, v2, 0))
- break;
- }
- return 0;
-
-gotit:
- copysub(&p->to, v1, v2, 1);
- if(debug['P']) {
- print("gotit: %D->%D\n%P", v1, v2, r->prog);
- if(p->from.type == v2->type)
- print(" excise");
- print("\n");
- }
- for(r=uniqs(r); r!=r0; r=uniqs(r)) {
- p = r->prog;
- copysub(&p->from, v1, v2, 1);
- copysub(&p->to, v1, v2, 1);
- if(debug['P'])
- print("%P\n", r->prog);
- }
- t = v1->type;
- v1->type = v2->type;
- v2->type = t;
- if(debug['P'])
- print("%P last\n", r->prog);
- return 1;
-}
-
-/*
- * The idea is to remove redundant copies.
- * v1->v2 F=0
- * (use v2 s/v2/v1/)*
- * set v1 F=1
- * use v2 return fail
- * -----------------
- * v1->v2 F=0
- * (use v2 s/v2/v1/)*
- * set v1 F=1
- * set v2 return success
- */
-int
-copyprop(Reg *r0)
-{
- Prog *p;
- Addr *v1, *v2;
- Reg *r;
-
- p = r0->prog;
- v1 = &p->from;
- v2 = &p->to;
- if(copyas(v1, v2))
- return 1;
- for(r=firstr; r!=R; r=r->link)
- r->active = 0;
- return copy1(v1, v2, r0->s1, 0);
-}
-
-int
-copy1(Addr *v1, Addr *v2, Reg *r, int f)
-{
- int t;
- Prog *p;
-
- if(r->active) {
- if(debug['P'])
- print("act set; return 1\n");
- return 1;
- }
- r->active = 1;
- if(debug['P'])
- print("copy %D->%D f=%d\n", v1, v2, f);
- for(; r != R; r = r->s1) {
- p = r->prog;
- if(debug['P'])
- print("%P", p);
- if(!f && uniqp(r) == R) {
- f = 1;
- if(debug['P'])
- print("; merge; f=%d", f);
- }
- t = copyu(p, v2, A);
- switch(t) {
- case 2: /* rar, can't split */
- if(debug['P'])
- print("; %D rar; return 0\n", v2);
- return 0;
-
- case 3: /* set */
- if(debug['P'])
- print("; %D set; return 1\n", v2);
- return 1;
-
- case 1: /* used, substitute */
- case 4: /* use and set */
- if(f) {
- if(!debug['P'])
- return 0;
- if(t == 4)
- print("; %D used+set and f=%d; return 0\n", v2, f);
- else
- print("; %D used and f=%d; return 0\n", v2, f);
- return 0;
- }
- if(copyu(p, v2, v1)) {
- if(debug['P'])
- print("; sub fail; return 0\n");
- return 0;
- }
- if(debug['P'])
- print("; sub %D/%D", v2, v1);
- if(t == 4) {
- if(debug['P'])
- print("; %D used+set; return 1\n", v2);
- return 1;
- }
- break;
- }
- if(!f) {
- t = copyu(p, v1, A);
- if(!f && (t == 2 || t == 3 || t == 4)) {
- f = 1;
- if(debug['P'])
- print("; %D set and !f; f=%d", v1, f);
- }
- }
- if(debug['P'])
- print("\n");
- if(r->s2)
- if(!copy1(v1, v2, r->s2, f))
- return 0;
- }
- return 1;
-}
-
-/*
- * return
- * 1 if v only used (and substitute),
- * 2 if read-alter-rewrite
- * 3 if set
- * 4 if set and used
- * 0 otherwise (not touched)
- */
-int
-copyu(Prog *p, Addr *v, Addr *s)
-{
-
- switch(p->as) {
-
- default:
- if(debug['P'])
- print("unknown op %A\n", p->as);
- /* SBBL; ADCL; FLD1; SAHF */
- return 2;
-
-
- case ANEGB:
- case ANEGW:
- case ANEGL:
- case ANEGQ:
- case ANOTB:
- case ANOTW:
- case ANOTL:
- case ANOTQ:
- if(copyas(&p->to, v))
- return 2;
- break;
-
- case ALEAL: /* lhs addr, rhs store */
- case ALEAQ:
- if(copyas(&p->from, v))
- return 2;
-
-
- case ANOP: /* rhs store */
- case AMOVL:
- case AMOVQ:
- case AMOVBLSX:
- case AMOVBLZX:
- case AMOVBQSX:
- case AMOVBQZX:
- case AMOVLQSX:
- case AMOVLQZX:
- case AMOVWLSX:
- case AMOVWLZX:
- case AMOVWQSX:
- case AMOVWQZX:
- case AMOVQL:
-
- case AMOVSS:
- case AMOVSD:
- case ACVTSD2SL:
- case ACVTSD2SQ:
- case ACVTSD2SS:
- case ACVTSL2SD:
- case ACVTSL2SS:
- case ACVTSQ2SD:
- case ACVTSQ2SS:
- case ACVTSS2SD:
- case ACVTSS2SL:
- case ACVTSS2SQ:
- case ACVTTSD2SL:
- case ACVTTSD2SQ:
- case ACVTTSS2SL:
- case ACVTTSS2SQ:
- if(copyas(&p->to, v)) {
- if(s != A)
- return copysub(&p->from, v, s, 1);
- if(copyau(&p->from, v))
- return 4;
- return 3;
- }
- goto caseread;
-
- case AROLB:
- case AROLL:
- case AROLQ:
- case AROLW:
- case ARORB:
- case ARORL:
- case ARORQ:
- case ARORW:
- case ASALB:
- case ASALL:
- case ASALQ:
- case ASALW:
- case ASARB:
- case ASARL:
- case ASARQ:
- case ASARW:
- case ASHLB:
- case ASHLL:
- case ASHLQ:
- case ASHLW:
- case ASHRB:
- case ASHRL:
- case ASHRQ:
- case ASHRW:
- if(copyas(&p->to, v))
- return 2;
- if(copyas(&p->from, v))
- if(p->from.type == D_CX)
- return 2;
- goto caseread;
-
- case AADDB: /* rhs rar */
- case AADDL:
- case AADDQ:
- case AADDW:
- case AANDB:
- case AANDL:
- case AANDQ:
- case AANDW:
- case ADECL:
- case ADECQ:
- case ADECW:
- case AINCL:
- case AINCQ:
- case AINCW:
- case ASUBB:
- case ASUBL:
- case ASUBQ:
- case ASUBW:
- case AORB:
- case AORL:
- case AORQ:
- case AORW:
- case AXORB:
- case AXORL:
- case AXORQ:
- case AXORW:
- case AMOVB:
- case AMOVW:
-
- case AADDSD:
- case AADDSS:
- case ACMPSD:
- case ACMPSS:
- case ADIVSD:
- case ADIVSS:
- case AMAXSD:
- case AMAXSS:
- case AMINSD:
- case AMINSS:
- case AMULSD:
- case AMULSS:
- case ARCPSS:
- case ARSQRTSS:
- case ASQRTSD:
- case ASQRTSS:
- case ASUBSD:
- case ASUBSS:
- case AXORPD:
- if(copyas(&p->to, v))
- return 2;
- goto caseread;
-
- case ACMPL: /* read only */
- case ACMPW:
- case ACMPB:
- case ACMPQ:
-
- case APREFETCHT0:
- case APREFETCHT1:
- case APREFETCHT2:
- case APREFETCHNTA:
-
- case ACOMISD:
- case ACOMISS:
- case AUCOMISD:
- case AUCOMISS:
- caseread:
- if(s != A) {
- if(copysub(&p->from, v, s, 1))
- return 1;
- return copysub(&p->to, v, s, 1);
- }
- if(copyau(&p->from, v))
- return 1;
- if(copyau(&p->to, v))
- return 1;
- break;
-
- case AJGE: /* no reference */
- case AJNE:
- case AJLE:
- case AJEQ:
- case AJHI:
- case AJLS:
- case AJMI:
- case AJPL:
- case AJGT:
- case AJLT:
- case AJCC:
- case AJCS:
-
- case AADJSP:
- case AWAIT:
- case ACLD:
- break;
-
- case AIMULL:
- case AIMULQ:
- case AIMULW:
- if(p->to.type != D_NONE) {
- if(copyas(&p->to, v))
- return 2;
- goto caseread;
- }
-
- case ADIVB:
- case ADIVL:
- case ADIVQ:
- case ADIVW:
- case AIDIVB:
- case AIDIVL:
- case AIDIVQ:
- case AIDIVW:
- case AIMULB:
- case AMULB:
- case AMULL:
- case AMULQ:
- case AMULW:
-
- case ACWD:
- case ACDQ:
- case ACQO:
- if(v->type == D_AX || v->type == D_DX)
- return 2;
- goto caseread;
-
- case AREP:
- case AREPN:
- if(v->type == D_CX)
- return 2;
- goto caseread;
-
- case AMOVSB:
- case AMOVSL:
- case AMOVSQ:
- if(v->type == D_DI || v->type == D_SI)
- return 2;
- goto caseread;
-
- case ASTOSB:
- case ASTOSL:
- case ASTOSQ:
- if(v->type == D_AX || v->type == D_DI)
- return 2;
- goto caseread;
-
- case AJMP: /* funny */
- if(s != A) {
- if(copysub(&p->to, v, s, 1))
- return 1;
- return 0;
- }
- if(copyau(&p->to, v))
- return 1;
- return 0;
-
- case ARET: /* funny */
- if(v->type == REGRET || v->type == FREGRET)
- return 2;
- if(s != A)
- return 1;
- return 3;
-
- case ACALL: /* funny */
- if(REGARG >= 0 && v->type == (uchar)REGARG)
- return 2;
-
- if(s != A) {
- if(copysub(&p->to, v, s, 1))
- return 1;
- return 0;
- }
- if(copyau(&p->to, v))
- return 4;
- return 3;
-
- case ATEXT: /* funny */
- if(REGARG >= 0 && v->type == (uchar)REGARG)
- return 3;
- return 0;
- }
- return 0;
-}
-
-/*
- * direct reference,
- * could be set/use depending on
- * semantics
- */
-int
-copyas(Addr *a, Addr *v)
-{
- if(a->type != v->type)
- return 0;
- if(regtyp(v))
- return 1;
- if(v->type == D_AUTO || v->type == D_PARAM)
- if(v->offset == a->offset)
- return 1;
- return 0;
-}
-
-/*
- * either direct or indirect
- */
-int
-copyau(Addr *a, Addr *v)
-{
-
- if(copyas(a, v))
- return 1;
- if(regtyp(v)) {
- if(a->type-D_INDIR == v->type)
- return 1;
- if(a->index == v->type)
- return 1;
- }
- return 0;
-}
-
-/*
- * substitute s for v in a
- * return failure to substitute
- */
-int
-copysub(Addr *a, Addr *v, Addr *s, int f)
-{
- int t;
-
- if(copyas(a, v)) {
- t = s->type;
- if(t >= D_AX && t <= D_R15 || t >= D_X0 && t <= D_X0+15) {
- if(f)
- a->type = t;
- }
- return 0;
- }
- if(regtyp(v)) {
- t = v->type;
- if(a->type == t+D_INDIR) {
- if((s->type == D_BP || s->type == D_R13) && a->index != D_NONE)
- return 1; /* can't use BP-base with index */
- if(f)
- a->type = s->type+D_INDIR;
-// return 0;
- }
- if(a->index == t) {
- if(f)
- a->index = s->type;
- return 0;
- }
- return 0;
- }
- return 0;
-}
diff --git a/src/cmd/6c/reg.c b/src/cmd/6c/reg.c
deleted file mode 100644
index 6f8d3ce14..000000000
--- a/src/cmd/6c/reg.c
+++ /dev/null
@@ -1,1523 +0,0 @@
-// Inferno utils/6c/reg.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-static void fixjmp(Reg*);
-
-Reg*
-rega(void)
-{
- Reg *r;
-
- r = freer;
- if(r == R) {
- r = alloc(sizeof(*r));
- } else
- freer = r->link;
-
- *r = zreg;
- return r;
-}
-
-int
-rcmp(const void *a1, const void *a2)
-{
- Rgn *p1, *p2;
- int c1, c2;
-
- p1 = (Rgn*)a1;
- p2 = (Rgn*)a2;
- c1 = p2->cost;
- c2 = p1->cost;
- if(c1 -= c2)
- return c1;
- return p2->varno - p1->varno;
-}
-
-void
-regopt(Prog *p)
-{
- Reg *r, *r1, *r2;
- Prog *p1;
- int i, z;
- int32 initpc, val, npc;
- uint32 vreg;
- Bits bit;
- struct
- {
- int32 m;
- int32 c;
- Reg* p;
- } log5[6], *lp;
-
- firstr = R;
- lastr = R;
- nvar = 0;
- regbits = RtoB(D_SP) | RtoB(D_AX) | RtoB(D_X0);
- for(z=0; z<BITS; z++) {
- externs.b[z] = 0;
- params.b[z] = 0;
- consts.b[z] = 0;
- addrs.b[z] = 0;
- }
-
- /*
- * pass 1
- * build aux data structure
- * allocate pcs
- * find use and set of variables
- */
- val = 5L * 5L * 5L * 5L * 5L;
- lp = log5;
- for(i=0; i<5; i++) {
- lp->m = val;
- lp->c = 0;
- lp->p = R;
- val /= 5L;
- lp++;
- }
- val = 0;
- for(; p != P; p = p->link) {
- switch(p->as) {
- case ADATA:
- case AGLOBL:
- case ANAME:
- case ASIGNAME:
- case AFUNCDATA:
- continue;
- }
- r = rega();
- if(firstr == R) {
- firstr = r;
- lastr = r;
- } else {
- lastr->link = r;
- r->p1 = lastr;
- lastr->s1 = r;
- lastr = r;
- }
- r->prog = p;
- r->pc = val;
- val++;
-
- lp = log5;
- for(i=0; i<5; i++) {
- lp->c--;
- if(lp->c <= 0) {
- lp->c = lp->m;
- if(lp->p != R)
- lp->p->log5 = r;
- lp->p = r;
- (lp+1)->c = 0;
- break;
- }
- lp++;
- }
-
- r1 = r->p1;
- if(r1 != R)
- switch(r1->prog->as) {
- case ARET:
- case AJMP:
- case AIRETL:
- case AIRETQ:
- r->p1 = R;
- r1->s1 = R;
- }
-
- bit = mkvar(r, &p->from);
- if(bany(&bit))
- switch(p->as) {
- /*
- * funny
- */
- case ALEAL:
- case ALEAQ:
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- break;
-
- /*
- * left side read
- */
- default:
- for(z=0; z<BITS; z++)
- r->use1.b[z] |= bit.b[z];
- break;
- }
-
- bit = mkvar(r, &p->to);
- if(bany(&bit))
- switch(p->as) {
- default:
- diag(Z, "reg: unknown op: %A", p->as);
- break;
-
- /*
- * right side read
- */
- case ACMPB:
- case ACMPL:
- case ACMPQ:
- case ACMPW:
- case APREFETCHT0:
- case APREFETCHT1:
- case APREFETCHT2:
- case APREFETCHNTA:
- case ACOMISS:
- case ACOMISD:
- case AUCOMISS:
- case AUCOMISD:
- for(z=0; z<BITS; z++)
- r->use2.b[z] |= bit.b[z];
- break;
-
- /*
- * right side write
- */
- case ANOP:
- case AMOVL:
- case AMOVQ:
- case AMOVB:
- case AMOVW:
- case AMOVBLSX:
- case AMOVBLZX:
- case AMOVBQSX:
- case AMOVBQZX:
- case AMOVLQSX:
- case AMOVLQZX:
- case AMOVWLSX:
- case AMOVWLZX:
- case AMOVWQSX:
- case AMOVWQZX:
- case AMOVQL:
-
- case AMOVSS:
- case AMOVSD:
- case ACVTSD2SL:
- case ACVTSD2SQ:
- case ACVTSD2SS:
- case ACVTSL2SD:
- case ACVTSL2SS:
- case ACVTSQ2SD:
- case ACVTSQ2SS:
- case ACVTSS2SD:
- case ACVTSS2SL:
- case ACVTSS2SQ:
- case ACVTTSD2SL:
- case ACVTTSD2SQ:
- case ACVTTSS2SL:
- case ACVTTSS2SQ:
- for(z=0; z<BITS; z++)
- r->set.b[z] |= bit.b[z];
- break;
-
- /*
- * right side read+write
- */
- case AADDB:
- case AADDL:
- case AADDQ:
- case AADDW:
- case AANDB:
- case AANDL:
- case AANDQ:
- case AANDW:
- case ASUBB:
- case ASUBL:
- case ASUBQ:
- case ASUBW:
- case AORB:
- case AORL:
- case AORQ:
- case AORW:
- case AXORB:
- case AXORL:
- case AXORQ:
- case AXORW:
- case ASALB:
- case ASALL:
- case ASALQ:
- case ASALW:
- case ASARB:
- case ASARL:
- case ASARQ:
- case ASARW:
- case AROLB:
- case AROLL:
- case AROLQ:
- case AROLW:
- case ARORB:
- case ARORL:
- case ARORQ:
- case ARORW:
- case ASHLB:
- case ASHLL:
- case ASHLQ:
- case ASHLW:
- case ASHRB:
- case ASHRL:
- case ASHRQ:
- case ASHRW:
- case AIMULL:
- case AIMULQ:
- case AIMULW:
- case ANEGL:
- case ANEGQ:
- case ANOTL:
- case ANOTQ:
- case AADCL:
- case AADCQ:
- case ASBBL:
- case ASBBQ:
-
- case AADDSD:
- case AADDSS:
- case ACMPSD:
- case ACMPSS:
- case ADIVSD:
- case ADIVSS:
- case AMAXSD:
- case AMAXSS:
- case AMINSD:
- case AMINSS:
- case AMULSD:
- case AMULSS:
- case ARCPSS:
- case ARSQRTSS:
- case ASQRTSD:
- case ASQRTSS:
- case ASUBSD:
- case ASUBSS:
- case AXORPD:
- for(z=0; z<BITS; z++) {
- r->set.b[z] |= bit.b[z];
- r->use2.b[z] |= bit.b[z];
- }
- break;
-
- /*
- * funny
- */
- case ACALL:
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- break;
- }
-
- switch(p->as) {
- case AIMULL:
- case AIMULQ:
- case AIMULW:
- if(p->to.type != D_NONE)
- break;
-
- case AIDIVB:
- case AIDIVL:
- case AIDIVQ:
- case AIDIVW:
- case AIMULB:
- case ADIVB:
- case ADIVL:
- case ADIVQ:
- case ADIVW:
- case AMULB:
- case AMULL:
- case AMULQ:
- case AMULW:
-
- case ACWD:
- case ACDQ:
- case ACQO:
- r->regu |= RtoB(D_AX) | RtoB(D_DX);
- break;
-
- case AREP:
- case AREPN:
- case ALOOP:
- case ALOOPEQ:
- case ALOOPNE:
- r->regu |= RtoB(D_CX);
- break;
-
- case AMOVSB:
- case AMOVSL:
- case AMOVSQ:
- case AMOVSW:
- case ACMPSB:
- case ACMPSL:
- case ACMPSQ:
- case ACMPSW:
- r->regu |= RtoB(D_SI) | RtoB(D_DI);
- break;
-
- case ASTOSB:
- case ASTOSL:
- case ASTOSQ:
- case ASTOSW:
- case ASCASB:
- case ASCASL:
- case ASCASQ:
- case ASCASW:
- r->regu |= RtoB(D_AX) | RtoB(D_DI);
- break;
-
- case AINSB:
- case AINSL:
- case AINSW:
- case AOUTSB:
- case AOUTSL:
- case AOUTSW:
- r->regu |= RtoB(D_DI) | RtoB(D_DX);
- break;
- }
- }
- if(firstr == R)
- return;
- initpc = pc - val;
- npc = val;
-
- /*
- * pass 2
- * turn branch references to pointers
- * build back pointers
- */
- for(r = firstr; r != R; r = r->link) {
- p = r->prog;
- if(p->to.type == D_BRANCH) {
- val = p->to.offset - initpc;
- r1 = firstr;
- while(r1 != R) {
- r2 = r1->log5;
- if(r2 != R && val >= r2->pc) {
- r1 = r2;
- continue;
- }
- if(r1->pc == val)
- break;
- r1 = r1->link;
- }
- if(r1 == R) {
- nearln = p->lineno;
- diag(Z, "ref not found\n%P", p);
- continue;
- }
- if(r1 == r) {
- nearln = p->lineno;
- diag(Z, "ref to self\n%P", p);
- continue;
- }
- r->s2 = r1;
- r->p2link = r1->p2;
- r1->p2 = r;
- }
- }
- if(debug['R']) {
- p = firstr->prog;
- print("\n%L %D\n", p->lineno, &p->from);
- }
-
- /*
- * pass 2.1
- * fix jumps
- */
- fixjmp(firstr);
-
- /*
- * pass 2.5
- * find looping structure
- */
- for(r = firstr; r != R; r = r->link)
- r->active = 0;
- change = 0;
- loopit(firstr, npc);
- if(debug['R'] && debug['v']) {
- print("\nlooping structure:\n");
- for(r = firstr; r != R; r = r->link) {
- print("%d:%P", r->loop, r->prog);
- for(z=0; z<BITS; z++)
- bit.b[z] = r->use1.b[z] |
- r->use2.b[z] |
- r->set.b[z];
- if(bany(&bit)) {
- print("\t");
- if(bany(&r->use1))
- print(" u1=%B", r->use1);
- if(bany(&r->use2))
- print(" u2=%B", r->use2);
- if(bany(&r->set))
- print(" st=%B", r->set);
- }
- print("\n");
- }
- }
-
- /*
- * pass 3
- * iterate propagating usage
- * back until flow graph is complete
- */
-loop1:
- change = 0;
- for(r = firstr; r != R; r = r->link)
- r->active = 0;
- for(r = firstr; r != R; r = r->link)
- if(r->prog->as == ARET)
- prop(r, zbits, zbits);
-loop11:
- /* pick up unreachable code */
- i = 0;
- for(r = firstr; r != R; r = r1) {
- r1 = r->link;
- if(r1 && r1->active && !r->active) {
- prop(r, zbits, zbits);
- i = 1;
- }
- }
- if(i)
- goto loop11;
- if(change)
- goto loop1;
-
-
- /*
- * pass 4
- * iterate propagating register/variable synchrony
- * forward until graph is complete
- */
-loop2:
- change = 0;
- for(r = firstr; r != R; r = r->link)
- r->active = 0;
- synch(firstr, zbits);
- if(change)
- goto loop2;
-
-
- /*
- * pass 5
- * isolate regions
- * calculate costs (paint1)
- */
- r = firstr;
- if(r) {
- for(z=0; z<BITS; z++)
- bit.b[z] = (r->refahead.b[z] | r->calahead.b[z]) &
- ~(externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z]);
- if(bany(&bit)) {
- nearln = r->prog->lineno;
- warn(Z, "used and not set: %B", bit);
- if(debug['R'] && !debug['w'])
- print("used and not set: %B\n", bit);
- }
- }
- if(debug['R'] && debug['v'])
- print("\nprop structure:\n");
- for(r = firstr; r != R; r = r->link)
- r->act = zbits;
- rgp = region;
- nregion = 0;
- for(r = firstr; r != R; r = r->link) {
- if(debug['R'] && debug['v']) {
- print("%P\t", r->prog);
- if(bany(&r->set))
- print("s:%B ", r->set);
- if(bany(&r->refahead))
- print("ra:%B ", r->refahead);
- if(bany(&r->calahead))
- print("ca:%B ", r->calahead);
- print("\n");
- }
- for(z=0; z<BITS; z++)
- bit.b[z] = r->set.b[z] &
- ~(r->refahead.b[z] | r->calahead.b[z] | addrs.b[z]);
- if(bany(&bit)) {
- nearln = r->prog->lineno;
- warn(Z, "set and not used: %B", bit);
- if(debug['R'])
- print("set and not used: %B\n", bit);
- excise(r);
- }
- for(z=0; z<BITS; z++)
- bit.b[z] = LOAD(r) & ~(r->act.b[z] | addrs.b[z]);
- while(bany(&bit)) {
- i = bnum(bit);
- rgp->enter = r;
- rgp->varno = i;
- change = 0;
- if(debug['R'] && debug['v'])
- print("\n");
- paint1(r, i);
- bit.b[i/32] &= ~(1L<<(i%32));
- if(change <= 0) {
- if(debug['R'])
- print("%L$%d: %B\n",
- r->prog->lineno, change, blsh(i));
- continue;
- }
- rgp->cost = change;
- nregion++;
- if(nregion >= NRGN)
- fatal(Z, "too many regions");
- rgp++;
- }
- }
- qsort(region, nregion, sizeof(region[0]), rcmp);
-
- /*
- * pass 6
- * determine used registers (paint2)
- * replace code (paint3)
- */
- rgp = region;
- for(i=0; i<nregion; i++) {
- bit = blsh(rgp->varno);
- vreg = paint2(rgp->enter, rgp->varno);
- vreg = allreg(vreg, rgp);
- if(debug['R']) {
- print("%L$%d %R: %B\n",
- rgp->enter->prog->lineno,
- rgp->cost,
- rgp->regno,
- bit);
- }
- if(rgp->regno != 0)
- paint3(rgp->enter, rgp->varno, vreg, rgp->regno);
- rgp++;
- }
- /*
- * pass 7
- * peep-hole on basic block
- */
- if(!debug['R'] || debug['P'])
- peep();
-
- /*
- * pass 8
- * recalculate pc
- */
- val = initpc;
- for(r = firstr; r != R; r = r1) {
- r->pc = val;
- p = r->prog;
- p1 = P;
- r1 = r->link;
- if(r1 != R)
- p1 = r1->prog;
- for(; p != p1; p = p->link) {
- switch(p->as) {
- default:
- val++;
- break;
-
- case ANOP:
- case ADATA:
- case AGLOBL:
- case ANAME:
- case ASIGNAME:
- case AFUNCDATA:
- break;
- }
- }
- }
- pc = val;
-
- /*
- * fix up branches
- */
- if(debug['R'])
- if(bany(&addrs))
- print("addrs: %B\n", addrs);
-
- r1 = 0; /* set */
- for(r = firstr; r != R; r = r->link) {
- p = r->prog;
- if(p->to.type == D_BRANCH) {
- p->to.offset = r->s2->pc;
- p->to.u.branch = r->s2->prog;
- }
- r1 = r;
- }
-
- /*
- * last pass
- * eliminate nops
- * free aux structures
- */
- for(p = firstr->prog; p != P; p = p->link){
- while(p->link && p->link->as == ANOP)
- p->link = p->link->link;
- }
- if(r1 != R) {
- r1->link = freer;
- freer = firstr;
- }
-}
-
-/*
- * add mov b,rn
- * just after r
- */
-void
-addmove(Reg *r, int bn, int rn, int f)
-{
- Prog *p, *p1;
- Addr *a;
- Var *v;
-
- p1 = alloc(sizeof(*p1));
- *p1 = zprog;
- p = r->prog;
-
- p1->link = p->link;
- p->link = p1;
- p1->lineno = p->lineno;
-
- v = var + bn;
-
- a = &p1->to;
- a->sym = v->sym;
- a->offset = v->offset;
- a->etype = v->etype;
- a->type = v->name;
-
- p1->as = AMOVL;
- if(v->etype == TCHAR || v->etype == TUCHAR)
- p1->as = AMOVB;
- if(v->etype == TSHORT || v->etype == TUSHORT)
- p1->as = AMOVW;
- if(v->etype == TVLONG || v->etype == TUVLONG || (v->etype == TIND && ewidth[TIND] == 8))
- p1->as = AMOVQ;
- if(v->etype == TFLOAT)
- p1->as = AMOVSS;
- if(v->etype == TDOUBLE)
- p1->as = AMOVSD;
-
- p1->from.type = rn;
- if(!f) {
- p1->from = *a;
- *a = zprog.from;
- a->type = rn;
- if(v->etype == TUCHAR)
- p1->as = AMOVB;
- if(v->etype == TUSHORT)
- p1->as = AMOVW;
- }
- if(debug['R'])
- print("%P\t.a%P\n", p, p1);
-}
-
-uint32
-doregbits(int r)
-{
- uint32 b;
-
- b = 0;
- if(r >= D_INDIR)
- r -= D_INDIR;
- if(r >= D_AX && r <= D_R15)
- b |= RtoB(r);
- else
- if(r >= D_AL && r <= D_R15B)
- b |= RtoB(r-D_AL+D_AX);
- else
- if(r >= D_AH && r <= D_BH)
- b |= RtoB(r-D_AH+D_AX);
- else
- if(r >= D_X0 && r <= D_X0+15)
- b |= FtoB(r);
- return b;
-}
-
-Bits
-mkvar(Reg *r, Addr *a)
-{
- Var *v;
- int i, t, n, et, z;
- int32 o;
- Bits bit;
- LSym *s;
-
- /*
- * mark registers used
- */
- t = a->type;
- r->regu |= doregbits(t);
- r->regu |= doregbits(a->index);
-
- switch(t) {
- default:
- goto none;
- case D_ADDR:
- a->type = a->index;
- bit = mkvar(r, a);
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- a->type = t;
- goto none;
- case D_EXTERN:
- case D_STATIC:
- case D_PARAM:
- case D_AUTO:
- n = t;
- break;
- }
- s = a->sym;
- if(s == nil)
- goto none;
- if(s->name[0] == '.')
- goto none;
- et = a->etype;
- o = a->offset;
- v = var;
- for(i=0; i<nvar; i++) {
- if(s == v->sym)
- if(n == v->name)
- if(o == v->offset)
- goto out;
- v++;
- }
- if(nvar >= NVAR)
- fatal(Z, "variable not optimized: %s", s->name);
- i = nvar;
- nvar++;
- v = &var[i];
- v->sym = s;
- v->offset = o;
- v->name = n;
- v->etype = et;
- if(debug['R'])
- print("bit=%2d et=%2d %D\n", i, et, a);
-
-out:
- bit = blsh(i);
- if(n == D_EXTERN || n == D_STATIC)
- for(z=0; z<BITS; z++)
- externs.b[z] |= bit.b[z];
- if(n == D_PARAM)
- for(z=0; z<BITS; z++)
- params.b[z] |= bit.b[z];
- if(v->etype != et || !(typechlpfd[et] || typev[et])) /* funny punning */
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- return bit;
-
-none:
- return zbits;
-}
-
-void
-prop(Reg *r, Bits ref, Bits cal)
-{
- Reg *r1, *r2;
- int z;
-
- for(r1 = r; r1 != R; r1 = r1->p1) {
- for(z=0; z<BITS; z++) {
- ref.b[z] |= r1->refahead.b[z];
- if(ref.b[z] != r1->refahead.b[z]) {
- r1->refahead.b[z] = ref.b[z];
- change++;
- }
- cal.b[z] |= r1->calahead.b[z];
- if(cal.b[z] != r1->calahead.b[z]) {
- r1->calahead.b[z] = cal.b[z];
- change++;
- }
- }
- switch(r1->prog->as) {
- case ACALL:
- for(z=0; z<BITS; z++) {
- cal.b[z] |= ref.b[z] | externs.b[z];
- ref.b[z] = 0;
- }
- break;
-
- case ATEXT:
- for(z=0; z<BITS; z++) {
- cal.b[z] = 0;
- ref.b[z] = 0;
- }
- break;
-
- case ARET:
- for(z=0; z<BITS; z++) {
- cal.b[z] = externs.b[z];
- ref.b[z] = 0;
- }
- }
- for(z=0; z<BITS; z++) {
- ref.b[z] = (ref.b[z] & ~r1->set.b[z]) |
- r1->use1.b[z] | r1->use2.b[z];
- cal.b[z] &= ~(r1->set.b[z] | r1->use1.b[z] | r1->use2.b[z]);
- r1->refbehind.b[z] = ref.b[z];
- r1->calbehind.b[z] = cal.b[z];
- }
- if(r1->active)
- break;
- r1->active = 1;
- }
- for(; r != r1; r = r->p1)
- for(r2 = r->p2; r2 != R; r2 = r2->p2link)
- prop(r2, r->refbehind, r->calbehind);
-}
-
-/*
- * find looping structure
- *
- * 1) find reverse postordering
- * 2) find approximate dominators,
- * the actual dominators if the flow graph is reducible
- * otherwise, dominators plus some other non-dominators.
- * See Matthew S. Hecht and Jeffrey D. Ullman,
- * "Analysis of a Simple Algorithm for Global Data Flow Problems",
- * Conf. Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
- * Oct. 1-3, 1973, pp. 207-217.
- * 3) find all nodes with a predecessor dominated by the current node.
- * such a node is a loop head.
- * recursively, all preds with a greater rpo number are in the loop
- */
-int32
-postorder(Reg *r, Reg **rpo2r, int32 n)
-{
- Reg *r1;
-
- r->rpo = 1;
- r1 = r->s1;
- if(r1 && !r1->rpo)
- n = postorder(r1, rpo2r, n);
- r1 = r->s2;
- if(r1 && !r1->rpo)
- n = postorder(r1, rpo2r, n);
- rpo2r[n] = r;
- n++;
- return n;
-}
-
-int32
-rpolca(int32 *idom, int32 rpo1, int32 rpo2)
-{
- int32 t;
-
- if(rpo1 == -1)
- return rpo2;
- while(rpo1 != rpo2){
- if(rpo1 > rpo2){
- t = rpo2;
- rpo2 = rpo1;
- rpo1 = t;
- }
- while(rpo1 < rpo2){
- t = idom[rpo2];
- if(t >= rpo2)
- fatal(Z, "bad idom");
- rpo2 = t;
- }
- }
- return rpo1;
-}
-
-int
-doms(int32 *idom, int32 r, int32 s)
-{
- while(s > r)
- s = idom[s];
- return s == r;
-}
-
-int
-loophead(int32 *idom, Reg *r)
-{
- int32 src;
-
- src = r->rpo;
- if(r->p1 != R && doms(idom, src, r->p1->rpo))
- return 1;
- for(r = r->p2; r != R; r = r->p2link)
- if(doms(idom, src, r->rpo))
- return 1;
- return 0;
-}
-
-void
-loopmark(Reg **rpo2r, int32 head, Reg *r)
-{
- if(r->rpo < head || r->active == head)
- return;
- r->active = head;
- r->loop += LOOP;
- if(r->p1 != R)
- loopmark(rpo2r, head, r->p1);
- for(r = r->p2; r != R; r = r->p2link)
- loopmark(rpo2r, head, r);
-}
-
-void
-loopit(Reg *r, int32 nr)
-{
- Reg *r1;
- int32 i, d, me;
-
- if(nr > maxnr) {
- rpo2r = alloc(nr * sizeof(Reg*));
- idom = alloc(nr * sizeof(int32));
- maxnr = nr;
- }
-
- d = postorder(r, rpo2r, 0);
- if(d > nr)
- fatal(Z, "too many reg nodes");
- nr = d;
- for(i = 0; i < nr / 2; i++){
- r1 = rpo2r[i];
- rpo2r[i] = rpo2r[nr - 1 - i];
- rpo2r[nr - 1 - i] = r1;
- }
- for(i = 0; i < nr; i++)
- rpo2r[i]->rpo = i;
-
- idom[0] = 0;
- for(i = 0; i < nr; i++){
- r1 = rpo2r[i];
- me = r1->rpo;
- d = -1;
- if(r1->p1 != R && r1->p1->rpo < me)
- d = r1->p1->rpo;
- for(r1 = r1->p2; r1 != nil; r1 = r1->p2link)
- if(r1->rpo < me)
- d = rpolca(idom, d, r1->rpo);
- idom[i] = d;
- }
-
- for(i = 0; i < nr; i++){
- r1 = rpo2r[i];
- r1->loop++;
- if(r1->p2 != R && loophead(idom, r1))
- loopmark(rpo2r, i, r1);
- }
-}
-
-void
-synch(Reg *r, Bits dif)
-{
- Reg *r1;
- int z;
-
- for(r1 = r; r1 != R; r1 = r1->s1) {
- for(z=0; z<BITS; z++) {
- dif.b[z] = (dif.b[z] &
- ~(~r1->refbehind.b[z] & r1->refahead.b[z])) |
- r1->set.b[z] | r1->regdiff.b[z];
- if(dif.b[z] != r1->regdiff.b[z]) {
- r1->regdiff.b[z] = dif.b[z];
- change++;
- }
- }
- if(r1->active)
- break;
- r1->active = 1;
- for(z=0; z<BITS; z++)
- dif.b[z] &= ~(~r1->calbehind.b[z] & r1->calahead.b[z]);
- if(r1->s2 != R)
- synch(r1->s2, dif);
- }
-}
-
-uint32
-allreg(uint32 b, Rgn *r)
-{
- Var *v;
- int i;
-
- v = var + r->varno;
- r->regno = 0;
- switch(v->etype) {
-
- default:
- diag(Z, "unknown etype %d/%d", bitno(b), v->etype);
- break;
-
- case TCHAR:
- case TUCHAR:
- case TSHORT:
- case TUSHORT:
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TVLONG:
- case TUVLONG:
- case TIND:
- case TARRAY:
- i = BtoR(~b);
- if(i && r->cost > 0) {
- r->regno = i;
- return RtoB(i);
- }
- break;
-
- case TDOUBLE:
- case TFLOAT:
- i = BtoF(~b);
- if(i && r->cost > 0) {
- r->regno = i;
- return FtoB(i);
- }
- break;
- }
- return 0;
-}
-
-void
-paint1(Reg *r, int bn)
-{
- Reg *r1;
- Prog *p;
- int z;
- uint32 bb;
-
- z = bn/32;
- bb = 1L<<(bn%32);
- if(r->act.b[z] & bb)
- return;
- for(;;) {
- if(!(r->refbehind.b[z] & bb))
- break;
- r1 = r->p1;
- if(r1 == R)
- break;
- if(!(r1->refahead.b[z] & bb))
- break;
- if(r1->act.b[z] & bb)
- break;
- r = r1;
- }
-
- if(LOAD(r) & ~(r->set.b[z]&~(r->use1.b[z]|r->use2.b[z])) & bb) {
- change -= CLOAD * r->loop;
- if(debug['R'] && debug['v'])
- print("%d%P\td %B $%d\n", r->loop,
- r->prog, blsh(bn), change);
- }
- for(;;) {
- r->act.b[z] |= bb;
- p = r->prog;
-
- if(r->use1.b[z] & bb) {
- change += CREF * r->loop;
- if(debug['R'] && debug['v'])
- print("%d%P\tu1 %B $%d\n", r->loop,
- p, blsh(bn), change);
- }
-
- if((r->use2.b[z]|r->set.b[z]) & bb) {
- change += CREF * r->loop;
- if(debug['R'] && debug['v'])
- print("%d%P\tu2 %B $%d\n", r->loop,
- p, blsh(bn), change);
- }
-
- if(STORE(r) & r->regdiff.b[z] & bb) {
- change -= CLOAD * r->loop;
- if(debug['R'] && debug['v'])
- print("%d%P\tst %B $%d\n", r->loop,
- p, blsh(bn), change);
- }
-
- if(r->refbehind.b[z] & bb)
- for(r1 = r->p2; r1 != R; r1 = r1->p2link)
- if(r1->refahead.b[z] & bb)
- paint1(r1, bn);
-
- if(!(r->refahead.b[z] & bb))
- break;
- r1 = r->s2;
- if(r1 != R)
- if(r1->refbehind.b[z] & bb)
- paint1(r1, bn);
- r = r->s1;
- if(r == R)
- break;
- if(r->act.b[z] & bb)
- break;
- if(!(r->refbehind.b[z] & bb))
- break;
- }
-}
-
-uint32
-regset(Reg *r, uint32 bb)
-{
- uint32 b, set;
- Addr v;
- int c;
-
- set = 0;
- v = zprog.from;
- while(b = bb & ~(bb-1)) {
- v.type = b & 0xFFFF? BtoR(b): BtoF(b);
- if(v.type == 0)
- diag(Z, "zero v.type for %#ux", b);
- c = copyu(r->prog, &v, A);
- if(c == 3)
- set |= b;
- bb &= ~b;
- }
- return set;
-}
-
-uint32
-reguse(Reg *r, uint32 bb)
-{
- uint32 b, set;
- Addr v;
- int c;
-
- set = 0;
- v = zprog.from;
- while(b = bb & ~(bb-1)) {
- v.type = b & 0xFFFF? BtoR(b): BtoF(b);
- c = copyu(r->prog, &v, A);
- if(c == 1 || c == 2 || c == 4)
- set |= b;
- bb &= ~b;
- }
- return set;
-}
-
-uint32
-paint2(Reg *r, int bn)
-{
- Reg *r1;
- int z;
- uint32 bb, vreg, x;
-
- z = bn/32;
- bb = 1L << (bn%32);
- vreg = regbits;
- if(!(r->act.b[z] & bb))
- return vreg;
- for(;;) {
- if(!(r->refbehind.b[z] & bb))
- break;
- r1 = r->p1;
- if(r1 == R)
- break;
- if(!(r1->refahead.b[z] & bb))
- break;
- if(!(r1->act.b[z] & bb))
- break;
- r = r1;
- }
- for(;;) {
- r->act.b[z] &= ~bb;
-
- vreg |= r->regu;
-
- if(r->refbehind.b[z] & bb)
- for(r1 = r->p2; r1 != R; r1 = r1->p2link)
- if(r1->refahead.b[z] & bb)
- vreg |= paint2(r1, bn);
-
- if(!(r->refahead.b[z] & bb))
- break;
- r1 = r->s2;
- if(r1 != R)
- if(r1->refbehind.b[z] & bb)
- vreg |= paint2(r1, bn);
- r = r->s1;
- if(r == R)
- break;
- if(!(r->act.b[z] & bb))
- break;
- if(!(r->refbehind.b[z] & bb))
- break;
- }
-
- bb = vreg;
- for(; r; r=r->s1) {
- x = r->regu & ~bb;
- if(x) {
- vreg |= reguse(r, x);
- bb |= regset(r, x);
- }
- }
- return vreg;
-}
-
-void
-paint3(Reg *r, int bn, int32 rb, int rn)
-{
- Reg *r1;
- Prog *p;
- int z;
- uint32 bb;
-
- z = bn/32;
- bb = 1L << (bn%32);
- if(r->act.b[z] & bb)
- return;
- for(;;) {
- if(!(r->refbehind.b[z] & bb))
- break;
- r1 = r->p1;
- if(r1 == R)
- break;
- if(!(r1->refahead.b[z] & bb))
- break;
- if(r1->act.b[z] & bb)
- break;
- r = r1;
- }
-
- if(LOAD(r) & ~(r->set.b[z] & ~(r->use1.b[z]|r->use2.b[z])) & bb)
- addmove(r, bn, rn, 0);
- for(;;) {
- r->act.b[z] |= bb;
- p = r->prog;
-
- if(r->use1.b[z] & bb) {
- if(debug['R'])
- print("%P", p);
- addreg(&p->from, rn);
- if(debug['R'])
- print("\t.c%P\n", p);
- }
- if((r->use2.b[z]|r->set.b[z]) & bb) {
- if(debug['R'])
- print("%P", p);
- addreg(&p->to, rn);
- if(debug['R'])
- print("\t.c%P\n", p);
- }
-
- if(STORE(r) & r->regdiff.b[z] & bb)
- addmove(r, bn, rn, 1);
- r->regu |= rb;
-
- if(r->refbehind.b[z] & bb)
- for(r1 = r->p2; r1 != R; r1 = r1->p2link)
- if(r1->refahead.b[z] & bb)
- paint3(r1, bn, rb, rn);
-
- if(!(r->refahead.b[z] & bb))
- break;
- r1 = r->s2;
- if(r1 != R)
- if(r1->refbehind.b[z] & bb)
- paint3(r1, bn, rb, rn);
- r = r->s1;
- if(r == R)
- break;
- if(r->act.b[z] & bb)
- break;
- if(!(r->refbehind.b[z] & bb))
- break;
- }
-}
-
-void
-addreg(Addr *a, int rn)
-{
-
- a->sym = 0;
- a->offset = 0;
- a->type = rn;
-}
-
-int32
-RtoB(int r)
-{
-
- if(r < D_AX || r > D_R15)
- return 0;
- return 1L << (r-D_AX);
-}
-
-int
-BtoR(int32 b)
-{
-
- b &= 0xffffL;
- if(nacl)
- b &= ~((1<<(D_BP-D_AX)) | (1<<(D_R15-D_AX)));
- if(b == 0)
- return 0;
- return bitno(b) + D_AX;
-}
-
-/*
- * bit reg
- * 16 X5
- * 17 X6
- * 18 X7
- */
-int32
-FtoB(int f)
-{
- if(f < FREGMIN || f > FREGEXT)
- return 0;
- return 1L << (f - FREGMIN + 16);
-}
-
-int
-BtoF(int32 b)
-{
-
- b &= 0x70000L;
- if(b == 0)
- return 0;
- return bitno(b) - 16 + FREGMIN;
-}
-
-/* what instruction does a JMP to p eventually land on? */
-static Reg*
-chasejmp(Reg *r, int *jmploop)
-{
- int n;
-
- n = 0;
- for(; r; r=r->s2) {
- if(r->prog->as != AJMP || r->prog->to.type != D_BRANCH)
- break;
- if(++n > 10) {
- *jmploop = 1;
- break;
- }
- }
- return r;
-}
-
-/* mark all code reachable from firstp as alive */
-static void
-mark(Reg *firstr)
-{
- Reg *r;
- Prog *p;
-
- for(r=firstr; r; r=r->link) {
- if(r->active)
- break;
- r->active = 1;
- p = r->prog;
- if(p->as != ACALL && p->to.type == D_BRANCH)
- mark(r->s2);
- if(p->as == AJMP || p->as == ARET || p->as == AUNDEF)
- break;
- }
-}
-
-/*
- * the code generator depends on being able to write out JMP
- * instructions that it can jump to now but fill in later.
- * the linker will resolve them nicely, but they make the code
- * longer and more difficult to follow during debugging.
- * remove them.
- */
-static void
-fixjmp(Reg *firstr)
-{
- int jmploop;
- Reg *r;
- Prog *p;
-
- if(debug['R'] && debug['v'])
- print("\nfixjmp\n");
-
- // pass 1: resolve jump to AJMP, mark all code as dead.
- jmploop = 0;
- for(r=firstr; r; r=r->link) {
- p = r->prog;
- if(debug['R'] && debug['v'])
- print("%04d %P\n", (int)r->pc, p);
- if(p->as != ACALL && p->to.type == D_BRANCH && r->s2 && r->s2->prog->as == AJMP) {
- r->s2 = chasejmp(r->s2, &jmploop);
- p->to.offset = r->s2->pc;
- p->to.u.branch = r->s2->prog;
- if(debug['R'] && debug['v'])
- print("->%P\n", p);
- }
- r->active = 0;
- }
- if(debug['R'] && debug['v'])
- print("\n");
-
- // pass 2: mark all reachable code alive
- mark(firstr);
-
- // pass 3: delete dead code (mostly JMPs).
- for(r=firstr; r; r=r->link) {
- if(!r->active) {
- p = r->prog;
- if(p->link == P && p->as == ARET && r->p1 && r->p1->prog->as != ARET) {
- // This is the final ARET, and the code so far doesn't have one.
- // Let it stay.
- } else {
- if(debug['R'] && debug['v'])
- print("del %04d %P\n", (int)r->pc, p);
- p->as = ANOP;
- }
- }
- }
-
- // pass 4: elide JMP to next instruction.
- // only safe if there are no jumps to JMPs anymore.
- if(!jmploop) {
- for(r=firstr; r; r=r->link) {
- p = r->prog;
- if(p->as == AJMP && p->to.type == D_BRANCH && r->s2 == r->link) {
- if(debug['R'] && debug['v'])
- print("del %04d %P\n", (int)r->pc, p);
- p->as = ANOP;
- }
- }
- }
-
- // fix back pointers.
- for(r=firstr; r; r=r->link) {
- r->p2 = R;
- r->p2link = R;
- }
- for(r=firstr; r; r=r->link) {
- if(r->s2) {
- r->p2link = r->s2->p2;
- r->s2->p2 = r;
- }
- }
-
- if(debug['R'] && debug['v']) {
- print("\n");
- for(r=firstr; r; r=r->link)
- print("%04d %P\n", (int)r->pc, r->prog);
- print("\n");
- }
-}
-
diff --git a/src/cmd/6c/sgen.c b/src/cmd/6c/sgen.c
deleted file mode 100644
index fceb332b2..000000000
--- a/src/cmd/6c/sgen.c
+++ /dev/null
@@ -1,483 +0,0 @@
-// Inferno utils/6c/sgen.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/sgen.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-#include "../../runtime/funcdata.h"
-
-Prog*
-gtext(Sym *s, int32 stkoff)
-{
- vlong v;
-
- v = ((uvlong)argsize(1) << 32) | (stkoff & 0xffffffff);
- if((textflag & NOSPLIT) && stkoff >= 128)
- yyerror("stack frame too large for NOSPLIT function");
-
- gpseudo(ATEXT, s, nodgconst(v, types[TVLONG]));
- return p;
-}
-
-void
-noretval(int n)
-{
-
- if(n & 1) {
- gins(ANOP, Z, Z);
- p->to.type = REGRET;
- }
- if(n & 2) {
- gins(ANOP, Z, Z);
- p->to.type = FREGRET;
- }
-}
-
-/* welcome to commute */
-static void
-commute(Node *n)
-{
- Node *l, *r;
-
- l = n->left;
- r = n->right;
- if(r->complex > l->complex) {
- n->left = r;
- n->right = l;
- }
-}
-
-void
-indexshift(Node *n)
-{
- int g;
-
- if(!typechlpv[n->type->etype])
- return;
- simplifyshift(n);
- if(n->op == OASHL && n->right->op == OCONST){
- g = vconst(n->right);
- if(g >= 0 && g <= 3)
- n->addable = 7;
- }
-}
-
-/*
- * calculate addressability as follows
- * NAME ==> 10/11 name+value(SB/SP)
- * REGISTER ==> 12 register
- * CONST ==> 20 $value
- * *(20) ==> 21 value
- * &(10) ==> 13 $name+value(SB)
- * &(11) ==> 1 $name+value(SP)
- * (13) + (20) ==> 13 fold constants
- * (1) + (20) ==> 1 fold constants
- * *(13) ==> 10 back to name
- * *(1) ==> 11 back to name
- *
- * (20) * (X) ==> 7 multiplier in indexing
- * (X,7) + (13,1) ==> 8 adder in indexing (addresses)
- * (8) ==> &9(OINDEX) index, almost addressable
- *
- * calculate complexity (number of registers)
- */
-void
-xcom(Node *n)
-{
- Node *l, *r;
- int g;
-
- if(n == Z)
- return;
- l = n->left;
- r = n->right;
- n->complex = 0;
- n->addable = 0;
- switch(n->op) {
- case OCONST:
- n->addable = 20;
- break;
-
- case ONAME:
- n->addable = 9;
- if(n->class == CPARAM || n->class == CAUTO)
- n->addable = 11;
- break;
-
- case OEXREG:
- n->addable = 0;
- break;
-
- case OREGISTER:
- n->addable = 12;
- break;
-
- case OINDREG:
- n->addable = 12;
- break;
-
- case OADDR:
- xcom(l);
- if(l->addable == 10)
- n->addable = 13;
- else
- if(l->addable == 11)
- n->addable = 1;
- break;
-
- case OADD:
- xcom(l);
- xcom(r);
- if(n->type->etype != TIND)
- break;
-
- switch(r->addable) {
- case 20:
- switch(l->addable) {
- case 1:
- case 13:
- commadd:
- l->type = n->type;
- *n = *l;
- l = new(0, Z, Z);
- *l = *(n->left);
- l->xoffset += r->vconst;
- n->left = l;
- r = n->right;
- goto brk;
- }
- break;
-
- case 1:
- case 13:
- case 10:
- case 11:
- /* l is the base, r is the index */
- if(l->addable != 20)
- n->addable = 8;
- break;
- }
- switch(l->addable) {
- case 20:
- switch(r->addable) {
- case 13:
- case 1:
- r = n->left;
- l = n->right;
- n->left = l;
- n->right = r;
- goto commadd;
- }
- break;
-
- case 13:
- case 1:
- case 10:
- case 11:
- /* r is the base, l is the index */
- if(r->addable != 20)
- n->addable = 8;
- break;
- }
- if(n->addable == 8 && !side(n) && !nacl) {
- indx(n);
- l = new1(OINDEX, idx.basetree, idx.regtree);
- l->scale = idx.scale;
- l->addable = 9;
- l->complex = l->right->complex;
- l->type = l->left->type;
- n->op = OADDR;
- n->left = l;
- n->right = Z;
- n->addable = 8;
- break;
- }
- break;
-
- case OINDEX:
- xcom(l);
- xcom(r);
- n->addable = 9;
- break;
-
- case OIND:
- xcom(l);
- if(l->op == OADDR) {
- l = l->left;
- l->type = n->type;
- *n = *l;
- return;
- }
- switch(l->addable) {
- case 20:
- n->addable = 21;
- break;
- case 1:
- n->addable = 11;
- break;
- case 13:
- n->addable = 10;
- break;
- }
- break;
-
- case OASHL:
- xcom(l);
- xcom(r);
- indexshift(n);
- break;
-
- case OMUL:
- case OLMUL:
- xcom(l);
- xcom(r);
- g = vlog(l);
- if(g >= 0) {
- n->left = r;
- n->right = l;
- l = r;
- r = n->right;
- }
- g = vlog(r);
- if(g >= 0) {
- n->op = OASHL;
- r->vconst = g;
- r->type = types[TINT];
- indexshift(n);
- break;
- }
- commute(n);
- break;
-
- case OASLDIV:
- xcom(l);
- xcom(r);
- g = vlog(r);
- if(g >= 0) {
- n->op = OASLSHR;
- r->vconst = g;
- r->type = types[TINT];
- }
- break;
-
- case OLDIV:
- xcom(l);
- xcom(r);
- g = vlog(r);
- if(g >= 0) {
- n->op = OLSHR;
- r->vconst = g;
- r->type = types[TINT];
- indexshift(n);
- break;
- }
- break;
-
- case OASLMOD:
- xcom(l);
- xcom(r);
- g = vlog(r);
- if(g >= 0) {
- n->op = OASAND;
- r->vconst--;
- }
- break;
-
- case OLMOD:
- xcom(l);
- xcom(r);
- g = vlog(r);
- if(g >= 0) {
- n->op = OAND;
- r->vconst--;
- }
- break;
-
- case OASMUL:
- case OASLMUL:
- xcom(l);
- xcom(r);
- g = vlog(r);
- if(g >= 0) {
- n->op = OASASHL;
- r->vconst = g;
- }
- break;
-
- case OLSHR:
- case OASHR:
- xcom(l);
- xcom(r);
- indexshift(n);
- break;
-
- default:
- if(l != Z)
- xcom(l);
- if(r != Z)
- xcom(r);
- break;
- }
-brk:
- if(n->addable >= 10)
- return;
- if(l != Z)
- n->complex = l->complex;
- if(r != Z) {
- if(r->complex == n->complex)
- n->complex = r->complex+1;
- else
- if(r->complex > n->complex)
- n->complex = r->complex;
- }
- if(n->complex == 0)
- n->complex++;
-
- switch(n->op) {
-
- case OFUNC:
- n->complex = FNX;
- break;
-
- case OCAST:
- if(l->type->etype == TUVLONG && typefd[n->type->etype])
- n->complex += 2;
- break;
-
- case OLMOD:
- case OMOD:
- case OLMUL:
- case OLDIV:
- case OMUL:
- case ODIV:
- case OASLMUL:
- case OASLDIV:
- case OASLMOD:
- case OASMUL:
- case OASDIV:
- case OASMOD:
- if(r->complex >= l->complex) {
- n->complex = l->complex + 3;
- if(r->complex > n->complex)
- n->complex = r->complex;
- } else {
- n->complex = r->complex + 3;
- if(l->complex > n->complex)
- n->complex = l->complex;
- }
- break;
-
- case OLSHR:
- case OASHL:
- case OASHR:
- case OASLSHR:
- case OASASHL:
- case OASASHR:
- if(r->complex >= l->complex) {
- n->complex = l->complex + 2;
- if(r->complex > n->complex)
- n->complex = r->complex;
- } else {
- n->complex = r->complex + 2;
- if(l->complex > n->complex)
- n->complex = l->complex;
- }
- break;
-
- case OADD:
- case OXOR:
- case OAND:
- case OOR:
- /*
- * immediate operators, make const on right
- */
- if(l->op == OCONST) {
- n->left = r;
- n->right = l;
- }
- break;
-
- case OEQ:
- case ONE:
- case OLE:
- case OLT:
- case OGE:
- case OGT:
- case OHI:
- case OHS:
- case OLO:
- case OLS:
- /*
- * compare operators, make const on left
- */
- if(r->op == OCONST) {
- n->left = r;
- n->right = l;
- n->op = invrel[relindex(n->op)];
- }
- break;
- }
-}
-
-void
-indx(Node *n)
-{
- Node *l, *r;
-
- if(debug['x'])
- prtree(n, "indx");
-
- l = n->left;
- r = n->right;
- if(l->addable == 1 || l->addable == 13 || r->complex > l->complex) {
- n->right = l;
- n->left = r;
- l = r;
- r = n->right;
- }
- if(l->addable != 7) {
- idx.regtree = l;
- idx.scale = 1;
- } else
- if(l->right->addable == 20) {
- idx.regtree = l->left;
- idx.scale = 1 << l->right->vconst;
- } else
- if(l->left->addable == 20) {
- idx.regtree = l->right;
- idx.scale = 1 << l->left->vconst;
- } else
- diag(n, "bad index");
-
- idx.basetree = r;
- if(debug['x']) {
- print("scale = %d\n", idx.scale);
- prtree(idx.regtree, "index");
- prtree(idx.basetree, "base");
- }
-}
diff --git a/src/cmd/6c/swt.c b/src/cmd/6c/swt.c
deleted file mode 100644
index 6e918eb10..000000000
--- a/src/cmd/6c/swt.c
+++ /dev/null
@@ -1,353 +0,0 @@
-// Inferno utils/6c/swt.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/swt.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-void
-swit1(C1 *q, int nc, int32 def, Node *n)
-{
- Node nreg;
-
- regalloc(&nreg, n, Z);
- if(typev[n->type->etype])
- nreg.type = types[TVLONG];
- else
- nreg.type = types[TLONG];
- cgen(n, &nreg);
- swit2(q, nc, def, &nreg);
- regfree(&nreg);
-}
-
-void
-swit2(C1 *q, int nc, int32 def, Node *n)
-{
- C1 *r;
- int i;
- Prog *sp;
-
- if(nc < 5) {
- for(i=0; i<nc; i++) {
- if(debug['W'])
- print("case = %.8llux\n", q->val);
- gcmp(OEQ, n, q->val);
- patch(p, q->label);
- q++;
- }
- gbranch(OGOTO);
- patch(p, def);
- return;
- }
- i = nc / 2;
- r = q+i;
- if(debug['W'])
- print("case > %.8llux\n", r->val);
- gcmp(OGT, n, r->val);
- sp = p;
- gbranch(OGOTO);
- p->as = AJEQ;
- patch(p, r->label);
- swit2(q, i, def, n);
-
- if(debug['W'])
- print("case < %.8llux\n", r->val);
- patch(sp, pc);
- swit2(r+1, nc-i-1, def, n);
-}
-
-void
-bitload(Node *b, Node *n1, Node *n2, Node *n3, Node *nn)
-{
- int sh;
- int32 v;
- Node *l;
-
- /*
- * n1 gets adjusted/masked value
- * n2 gets address of cell
- * n3 gets contents of cell
- */
- l = b->left;
- if(n2 != Z) {
- regalloc(n1, l, nn);
- reglcgen(n2, l, Z);
- regalloc(n3, l, Z);
- gmove(n2, n3);
- gmove(n3, n1);
- } else {
- regalloc(n1, l, nn);
- cgen(l, n1);
- }
- if(b->type->shift == 0 && typeu[b->type->etype]) {
- v = ~0 + (1L << b->type->nbits);
- gopcode(OAND, tfield, nodconst(v), n1);
- } else {
- sh = 32 - b->type->shift - b->type->nbits;
- if(sh > 0)
- gopcode(OASHL, tfield, nodconst(sh), n1);
- sh += b->type->shift;
- if(sh > 0)
- if(typeu[b->type->etype])
- gopcode(OLSHR, tfield, nodconst(sh), n1);
- else
- gopcode(OASHR, tfield, nodconst(sh), n1);
- }
-}
-
-void
-bitstore(Node *b, Node *n1, Node *n2, Node *n3, Node *nn)
-{
- int32 v;
- Node nod;
- int sh;
-
- regalloc(&nod, b->left, Z);
- v = ~0 + (1L << b->type->nbits);
- gopcode(OAND, types[TLONG], nodconst(v), n1);
- gmove(n1, &nod);
- if(nn != Z)
- gmove(n1, nn);
- sh = b->type->shift;
- if(sh > 0)
- gopcode(OASHL, types[TLONG], nodconst(sh), &nod);
- v <<= sh;
- gopcode(OAND, types[TLONG], nodconst(~v), n3);
- gopcode(OOR, types[TLONG], n3, &nod);
- gmove(&nod, n2);
-
- regfree(&nod);
- regfree(n1);
- regfree(n2);
- regfree(n3);
-}
-
-int32
-outstring(char *s, int32 n)
-{
- int32 r;
-
- if(suppress)
- return nstring;
- r = nstring;
- while(n) {
- string[mnstring] = *s++;
- mnstring++;
- nstring++;
- if(mnstring >= NSNAME) {
- gpseudo(ADATA, symstring, nodconst(0L));
- p->from.offset += nstring - NSNAME;
- p->from.scale = NSNAME;
- p->to.type = D_SCONST;
- memmove(p->to.u.sval, string, NSNAME);
- mnstring = 0;
- }
- n--;
- }
- return r;
-}
-
-void
-sextern(Sym *s, Node *a, int32 o, int32 w)
-{
- int32 e, lw;
-
- for(e=0; e<w; e+=NSNAME) {
- lw = NSNAME;
- if(w-e < lw)
- lw = w-e;
- gpseudo(ADATA, s, nodconst(0L));
- p->from.offset += o+e;
- p->from.scale = lw;
- p->to.type = D_SCONST;
- memmove(p->to.u.sval, a->cstring+e, lw);
- }
-}
-
-void
-gextern(Sym *s, Node *a, int32 o, int32 w)
-{
- if(0 && a->op == OCONST && typev[a->type->etype]) {
- gpseudo(ADATA, s, lo64(a));
- p->from.offset += o;
- p->from.scale = 4;
- gpseudo(ADATA, s, hi64(a));
- p->from.offset += o + 4;
- p->from.scale = 4;
- return;
- }
- gpseudo(ADATA, s, a);
- p->from.offset += o;
- p->from.scale = w;
- switch(p->to.type) {
- default:
- p->to.index = p->to.type;
- p->to.type = D_ADDR;
- case D_CONST:
- case D_FCONST:
- case D_ADDR:
- break;
- }
-}
-
-void
-outcode(void)
-{
- int f;
- Biobuf b;
-
- f = open(outfile, OWRITE);
- if(f < 0) {
- diag(Z, "cannot open %s", outfile);
- return;
- }
- Binit(&b, f, OWRITE);
-
- Bprint(&b, "go object %s %s %s\n", getgoos(), getgoarch(), getgoversion());
- if(pragcgobuf.to > pragcgobuf.start) {
- Bprint(&b, "\n");
- Bprint(&b, "$$ // exports\n\n");
- Bprint(&b, "$$ // local types\n\n");
- Bprint(&b, "$$ // cgo\n");
- Bprint(&b, "%s", fmtstrflush(&pragcgobuf));
- Bprint(&b, "\n$$\n\n");
- }
- Bprint(&b, "!\n");
-
- writeobj(ctxt, &b);
- Bterm(&b);
- close(f);
- lastp = P;
-}
-
-int32
-align(int32 i, Type *t, int op, int32 *maxalign)
-{
- int32 o;
- Type *v;
- int w, packw;
-
- o = i;
- w = 1;
- packw = 0;
- switch(op) {
- default:
- diag(Z, "unknown align opcode %d", op);
- break;
-
- case Asu2: /* padding at end of a struct */
- w = *maxalign;
- if(w < 1)
- w = 1;
- if(packflg)
- packw = packflg;
- break;
-
- case Ael1: /* initial align of struct element */
- for(v=t; v->etype==TARRAY; v=v->link)
- ;
- if(v->etype == TSTRUCT || v->etype == TUNION)
- w = v->align;
- else
- w = ewidth[v->etype];
- if(w < 1 || w > SZ_VLONG)
- fatal(Z, "align");
- if(packflg)
- packw = packflg;
- break;
-
- case Ael2: /* width of a struct element */
- o += t->width;
- break;
-
- case Aarg0: /* initial passbyptr argument in arg list */
- if(typesu[t->etype]) {
- o = align(o, types[TIND], Aarg1, nil);
- o = align(o, types[TIND], Aarg2, nil);
- }
- break;
-
- case Aarg1: /* initial align of parameter */
- if(ewidth[TIND] == 4) {
- if(typesu[t->etype]) {
- for(v = t->link; v != T; v = v->down)
- o = align(o, v, Aarg1, maxalign);
- goto out;
- }
- w = ewidth[t->etype];
- if(typev[t->etype] || t->etype == TDOUBLE)
- w = 8;
- else if(w <= 0 || w >= 4)
- w = 4;
- else
- w = 1;
- break;
- }
- w = ewidth[t->etype];
- if(w <= 0 || w >= SZ_VLONG) {
- w = SZ_VLONG;
- break;
- }
- w = 1; /* little endian no adjustment */
- break;
-
- case Aarg2: /* width of a parameter */
- o += t->width;
- if(ewidth[TIND] == 4) {
- o = align(o, t, Aarg1, maxalign);
- goto out;
- }
- w = t->width;
- if(w > SZ_VLONG)
- w = SZ_VLONG;
- break;
-
- case Aaut3: /* total align of automatic */
- o = align(o, t, Ael1, nil);
- o = align(o, t, Ael2, nil);
- break;
- }
- if(packw != 0 && xround(o, w) != xround(o, packw))
- diag(Z, "#pragma pack changes offset of %T", t);
- o = xround(o, w);
- if(maxalign && *maxalign < w)
- *maxalign = w;
-out:
- if(debug['A'])
- print("align %s %d %T = %d\n", bnames[op], i, t, o);
- return o;
-}
-
-int32
-maxround(int32 max, int32 v)
-{
- v = xround(v, SZ_VLONG);
- if(v > max)
- return v;
- return max;
-}
diff --git a/src/cmd/6c/txt.c b/src/cmd/6c/txt.c
deleted file mode 100644
index 3bdbf410e..000000000
--- a/src/cmd/6c/txt.c
+++ /dev/null
@@ -1,1674 +0,0 @@
-// Inferno utils/6c/txt.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-int thechar = '6';
-char *thestring = "amd64";
-
-LinkArch *thelinkarch = &linkamd64;
-
-void
-linkarchinit(void)
-{
- if(strcmp(getgoarch(), "amd64p32") == 0)
- thelinkarch = &linkamd64p32;
-}
-
-void
-ginit(void)
-{
- int i;
- Type *t;
-
- dodefine("_64BITREG");
- if(ewidth[TIND] == 8)
- dodefine("_64BIT");
- listinit();
- nstring = 0;
- mnstring = 0;
- nrathole = 0;
- pc = 0;
- breakpc = -1;
- continpc = -1;
- cases = C;
- lastp = P;
- tfield = types[TINT];
-
- typeword = typechlvp;
- typecmplx = typesu;
-
- /* TO DO */
- memmove(typechlpv, typechlp, sizeof(typechlpv));
- typechlpv[TVLONG] = 1;
- typechlpv[TUVLONG] = 1;
-
- zprog.link = P;
- zprog.as = AGOK;
- zprog.from.type = D_NONE;
- zprog.from.index = D_NONE;
- zprog.from.scale = 0;
- zprog.to = zprog.from;
-
- lregnode.op = OREGISTER;
- lregnode.class = CEXREG;
- lregnode.reg = REGTMP;
- lregnode.complex = 0;
- lregnode.addable = 11;
- lregnode.type = types[TLONG];
-
- qregnode = lregnode;
- qregnode.type = types[TVLONG];
-
- constnode.op = OCONST;
- constnode.class = CXXX;
- constnode.complex = 0;
- constnode.addable = 20;
- constnode.type = types[TLONG];
-
- vconstnode = constnode;
- vconstnode.type = types[TVLONG];
-
- fconstnode.op = OCONST;
- fconstnode.class = CXXX;
- fconstnode.complex = 0;
- fconstnode.addable = 20;
- fconstnode.type = types[TDOUBLE];
-
- nodsafe = new(ONAME, Z, Z);
- nodsafe->sym = slookup(".safe");
- nodsafe->type = types[TINT];
- nodsafe->etype = types[TINT]->etype;
- nodsafe->class = CAUTO;
- complex(nodsafe);
-
- t = typ(TARRAY, types[TCHAR]);
- symrathole = slookup(".rathole");
- symrathole->class = CGLOBL;
- symrathole->type = t;
-
- nodrat = new(ONAME, Z, Z);
- nodrat->sym = symrathole;
- nodrat->type = types[TIND];
- nodrat->etype = TVOID;
- nodrat->class = CGLOBL;
- complex(nodrat);
- nodrat->type = t;
-
- nodret = new(ONAME, Z, Z);
- nodret->sym = slookup(".ret");
- nodret->type = types[TIND];
- nodret->etype = TIND;
- nodret->class = CPARAM;
- nodret = new(OIND, nodret, Z);
- complex(nodret);
-
- if(0)
- com64init();
-
- for(i=0; i<nelem(reg); i++) {
- reg[i] = 1;
- if(i >= D_AX && i <= D_R15 && i != D_SP)
- reg[i] = 0;
- if(i >= D_X0 && i <= D_X7)
- reg[i] = 0;
- }
- if(nacl) {
- reg[D_BP] = 1;
- reg[D_R15] = 1;
- }
-}
-
-void
-gclean(void)
-{
- int i;
- Sym *s;
-
- reg[D_SP]--;
- if(nacl) {
- reg[D_BP]--;
- reg[D_R15]--;
- }
- for(i=D_AX; i<=D_R15; i++)
- if(reg[i])
- diag(Z, "reg %R left allocated", i);
- for(i=D_X0; i<=D_X7; i++)
- if(reg[i])
- diag(Z, "reg %R left allocated", i);
- while(mnstring)
- outstring("", 1L);
- symstring->type->width = nstring;
- symrathole->type->width = nrathole;
- for(i=0; i<NHASH; i++)
- for(s = hash[i]; s != S; s = s->link) {
- if(s->type == T)
- continue;
- if(s->type->width == 0)
- continue;
- if(s->class != CGLOBL && s->class != CSTATIC)
- continue;
- if(s->type == types[TENUM])
- continue;
- gpseudo(AGLOBL, s, nodconst(s->type->width));
- }
- nextpc();
- p->as = AEND;
- outcode();
-}
-
-void
-nextpc(void)
-{
- Plist *pl;
-
- p = alloc(sizeof(*p));
- *p = zprog;
- p->lineno = nearln;
- p->pc = pc;
- pc++;
- if(lastp == nil) {
- pl = linknewplist(ctxt);
- pl->firstpc = p;
- } else
- lastp->link = p;
- lastp = p;
-}
-
-void
-gargs(Node *n, Node *tn1, Node *tn2)
-{
- int32 regs;
- Node fnxargs[20], *fnxp;
-
- regs = cursafe;
-
- fnxp = fnxargs;
- garg1(n, tn1, tn2, 0, &fnxp); /* compile fns to temps */
-
- curarg = 0;
- fnxp = fnxargs;
- garg1(n, tn1, tn2, 1, &fnxp); /* compile normal args and temps */
-
- cursafe = regs;
-}
-
-int
-nareg(void)
-{
- int i, n;
-
- n = 0;
- for(i=D_AX; i<=D_R15; i++)
- if(reg[i] == 0)
- n++;
- return n;
-}
-
-void
-garg1(Node *n, Node *tn1, Node *tn2, int f, Node **fnxp)
-{
- Node nod;
-
- if(n == Z)
- return;
- if(n->op == OLIST) {
- garg1(n->left, tn1, tn2, f, fnxp);
- garg1(n->right, tn1, tn2, f, fnxp);
- return;
- }
- if(f == 0) {
- if(n->complex >= FNX) {
- regsalloc(*fnxp, n);
- nod = znode;
- nod.op = OAS;
- nod.left = *fnxp;
- nod.right = n;
- nod.type = n->type;
- cgen(&nod, Z);
- (*fnxp)++;
- }
- return;
- }
- if(typesu[n->type->etype]) {
- regaalloc(tn2, n);
- if(n->complex >= FNX) {
- sugen(*fnxp, tn2, n->type->width);
- (*fnxp)++;
- } else
- sugen(n, tn2, n->type->width);
- return;
- }
- if(REGARG >= 0 && curarg == 0 && typechlpv[n->type->etype]) {
- regaalloc1(tn1, n);
- if(n->complex >= FNX) {
- cgen(*fnxp, tn1);
- (*fnxp)++;
- } else
- cgen(n, tn1);
- return;
- }
- if(vconst(n) == 0) {
- regaalloc(tn2, n);
- gmove(n, tn2);
- return;
- }
- regalloc(tn1, n, Z);
- if(n->complex >= FNX) {
- cgen(*fnxp, tn1);
- (*fnxp)++;
- } else
- cgen(n, tn1);
- regaalloc(tn2, n);
- gmove(tn1, tn2);
- regfree(tn1);
-}
-
-Node*
-nodgconst(vlong v, Type *t)
-{
- if(!typev[t->etype])
- return nodconst((int32)v);
- vconstnode.vconst = v;
- return &vconstnode;
-}
-
-Node*
-nodconst(int32 v)
-{
- constnode.vconst = v;
- return &constnode;
-}
-
-Node*
-nodfconst(double d)
-{
- fconstnode.fconst = d;
- return &fconstnode;
-}
-
-int
-isreg(Node *n, int r)
-{
-
- if(n->op == OREGISTER)
- if(n->reg == r)
- return 1;
- return 0;
-}
-
-int
-nodreg(Node *n, Node *nn, int r)
-{
- int et;
-
- *n = qregnode;
- n->reg = r;
- if(nn != Z){
- et = nn->type->etype;
- if(!typefd[et] && nn->type->width <= SZ_LONG && 0)
- n->type = typeu[et]? types[TUINT]: types[TINT];
- else
- n->type = nn->type;
-//print("nodreg %s [%s]\n", tnames[et], tnames[n->type->etype]);
- n->lineno = nn->lineno;
- }
- if(reg[r] == 0)
- return 0;
- if(nn != Z) {
- if(nn->op == OREGISTER)
- if(nn->reg == r)
- return 0;
- }
- return 1;
-}
-
-void
-regret(Node *n, Node *nn, Type *t, int mode)
-{
- int r;
-
- if(mode == 0 || hasdotdotdot(t) || nn->type->width == 0) {
- r = REGRET;
- if(typefd[nn->type->etype])
- r = FREGRET;
- nodreg(n, nn, r);
- reg[r]++;
- return;
- }
-
- if(mode == 1) {
- // fetch returned value after call.
- // already called gargs, so curarg is set.
- curarg = (curarg+7) & ~7;
- regaalloc(n, nn);
- return;
- }
-
- if(mode == 2) {
- // store value to be returned.
- // must compute arg offset.
- if(t->etype != TFUNC)
- fatal(Z, "bad regret func %T", t);
- *n = *nn;
- n->op = ONAME;
- n->class = CPARAM;
- n->sym = slookup(".ret");
- n->complex = nodret->complex;
- n->addable = 20;
- n->xoffset = argsize(0);
- return;
- }
-
- fatal(Z, "bad regret");
-}
-
-void
-regalloc(Node *n, Node *tn, Node *o)
-{
- int i;
-
- switch(tn->type->etype) {
- case TCHAR:
- case TUCHAR:
- case TSHORT:
- case TUSHORT:
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TVLONG:
- case TUVLONG:
- case TIND:
- if(o != Z && o->op == OREGISTER) {
- i = o->reg;
- if(i >= D_AX && i <= D_R15)
- goto out;
- }
- for(i=D_AX; i<=D_R15; i++)
- if(reg[i] == 0)
- goto out;
- diag(tn, "out of fixed registers");
- goto err;
-
- case TFLOAT:
- case TDOUBLE:
- if(o != Z && o->op == OREGISTER) {
- i = o->reg;
- if(i >= D_X0 && i <= D_X7)
- goto out;
- }
- for(i=D_X0; i<=D_X7; i++)
- if(reg[i] == 0)
- goto out;
- diag(tn, "out of float registers");
- goto out;
- }
- diag(tn, "unknown type in regalloc: %T", tn->type);
-err:
- i = 0;
-out:
- if(i)
- reg[i]++;
- nodreg(n, tn, i);
-}
-
-void
-regialloc(Node *n, Node *tn, Node *o)
-{
- Node nod;
-
- nod = *tn;
- nod.type = types[TIND];
- regalloc(n, &nod, o);
-}
-
-void
-regfree(Node *n)
-{
- int i;
-
- i = 0;
- if(n->op != OREGISTER && n->op != OINDREG)
- goto err;
- i = n->reg;
- if(i < 0 || i >= nelem(reg))
- goto err;
- if(reg[i] <= 0)
- goto err;
- reg[i]--;
- return;
-err:
- diag(n, "error in regfree: %R", i);
-}
-
-void
-regsalloc(Node *n, Node *nn)
-{
- cursafe = align(cursafe, nn->type, Aaut3, nil);
- maxargsafe = maxround(maxargsafe, cursafe+curarg);
- *n = *nodsafe;
- n->xoffset = -(stkoff + cursafe);
- n->type = nn->type;
- n->etype = nn->type->etype;
- n->lineno = nn->lineno;
-}
-
-void
-regaalloc1(Node *n, Node *nn)
-{
- if(REGARG < 0) {
- fatal(n, "regaalloc1 and REGARG<0");
- return;
- }
- nodreg(n, nn, REGARG);
- reg[(uchar)REGARG]++;
- curarg = align(curarg, nn->type, Aarg1, nil);
- curarg = align(curarg, nn->type, Aarg2, nil);
- maxargsafe = maxround(maxargsafe, cursafe+curarg);
-}
-
-void
-regaalloc(Node *n, Node *nn)
-{
- curarg = align(curarg, nn->type, Aarg1, nil);
- *n = *nn;
- n->op = OINDREG;
- n->reg = REGSP;
- n->xoffset = curarg;
- n->complex = 0;
- n->addable = 20;
- curarg = align(curarg, nn->type, Aarg2, nil);
- maxargsafe = maxround(maxargsafe, cursafe+curarg);
-}
-
-void
-regind(Node *n, Node *nn)
-{
-
- if(n->op != OREGISTER) {
- diag(n, "regind not OREGISTER");
- return;
- }
- n->op = OINDREG;
- n->type = nn->type;
-}
-
-void
-naddr(Node *n, Addr *a)
-{
- int32 v;
-
- a->type = D_NONE;
- if(n == Z)
- return;
- switch(n->op) {
- default:
- bad:
- diag(n, "bad in naddr: %O %D", n->op, a);
- break;
-
- case OREGISTER:
- a->type = n->reg;
- a->sym = nil;
- break;
-
- case OEXREG:
- a->type = D_INDIR + D_TLS;
- a->offset = n->reg - 1;
- break;
-
- case OIND:
- naddr(n->left, a);
- if(a->type >= D_AX && a->type <= D_R15)
- a->type += D_INDIR;
- else
- if(a->type == D_CONST)
- a->type = D_NONE+D_INDIR;
- else
- if(a->type == D_ADDR) {
- a->type = a->index;
- a->index = D_NONE;
- } else
- goto bad;
- break;
-
- case OINDEX:
- a->type = idx.ptr;
- if(n->left->op == OADDR || n->left->op == OCONST)
- naddr(n->left, a);
- if(a->type >= D_AX && a->type <= D_R15)
- a->type += D_INDIR;
- else
- if(a->type == D_CONST)
- a->type = D_NONE+D_INDIR;
- else
- if(a->type == D_ADDR) {
- a->type = a->index;
- a->index = D_NONE;
- } else
- goto bad;
- a->index = idx.reg;
- a->scale = n->scale;
- a->offset += n->xoffset;
- break;
-
- case OINDREG:
- a->type = n->reg+D_INDIR;
- a->sym = nil;
- a->offset = n->xoffset;
- break;
-
- case ONAME:
- a->etype = n->etype;
- a->type = D_STATIC;
- a->sym = linksym(n->sym);
- a->offset = n->xoffset;
- if(n->class == CSTATIC)
- break;
- if(n->class == CEXTERN || n->class == CGLOBL) {
- a->type = D_EXTERN;
- break;
- }
- if(n->class == CAUTO) {
- a->type = D_AUTO;
- break;
- }
- if(n->class == CPARAM) {
- a->type = D_PARAM;
- break;
- }
- goto bad;
-
- case OCONST:
- if(typefd[n->type->etype]) {
- a->type = D_FCONST;
- a->u.dval = n->fconst;
- break;
- }
- a->sym = nil;
- a->type = D_CONST;
- if(typev[n->type->etype] || (n->type->etype == TIND && ewidth[TIND] == 8))
- a->offset = n->vconst;
- else
- a->offset = convvtox(n->vconst, typeu[n->type->etype]? TULONG: TLONG);
- break;
-
- case OADDR:
- naddr(n->left, a);
- if(a->type >= D_INDIR) {
- a->type -= D_INDIR;
- break;
- }
- if(a->type == D_EXTERN || a->type == D_STATIC ||
- a->type == D_AUTO || a->type == D_PARAM)
- if(a->index == D_NONE) {
- a->index = a->type;
- a->type = D_ADDR;
- break;
- }
- goto bad;
-
- case OADD:
- if(n->right->op == OCONST) {
- v = n->right->vconst;
- naddr(n->left, a);
- } else
- if(n->left->op == OCONST) {
- v = n->left->vconst;
- naddr(n->right, a);
- } else
- goto bad;
- a->offset += v;
- break;
-
- }
-}
-
-void
-gcmp(int op, Node *n, vlong val)
-{
- Node *cn, nod;
-
- cn = nodgconst(val, n->type);
- if(!immconst(cn)){
- regalloc(&nod, n, Z);
- gmove(cn, &nod);
- gopcode(op, n->type, n, &nod);
- regfree(&nod);
- }else
- gopcode(op, n->type, n, cn);
-}
-
-#define CASE(a,b) ((a<<8)|(b<<0))
-
-void
-gmove(Node *f, Node *t)
-{
- int ft, tt, t64, a;
- Node nod, nod1, nod2, nod3;
- Prog *p1, *p2;
-
- ft = f->type->etype;
- tt = t->type->etype;
- if(ewidth[TIND] == 4) {
- if(ft == TIND)
- ft = TUINT;
- if(tt == TIND)
- tt = TUINT;
- }
- t64 = tt == TVLONG || tt == TUVLONG || tt == TIND;
- if(debug['M'])
- print("gop: %O %O[%s],%O[%s]\n", OAS,
- f->op, tnames[ft], t->op, tnames[tt]);
- if(typefd[ft] && f->op == OCONST) {
- /* TO DO: pick up special constants, possibly preloaded */
- if(f->fconst == 0.0){
- regalloc(&nod, t, t);
- gins(AXORPD, &nod, &nod);
- gmove(&nod, t);
- regfree(&nod);
- return;
- }
- }
-/*
- * load
- */
- if(ft == TVLONG || ft == TUVLONG)
- if(f->op == OCONST)
- if(f->vconst > 0x7fffffffLL || f->vconst < -0x7fffffffLL)
- if(t->op != OREGISTER) {
- regalloc(&nod, f, Z);
- gmove(f, &nod);
- gmove(&nod, t);
- regfree(&nod);
- return;
- }
-
- if(f->op == ONAME || f->op == OINDREG ||
- f->op == OIND || f->op == OINDEX)
- switch(ft) {
- case TCHAR:
- a = AMOVBLSX;
- if(t64)
- a = AMOVBQSX;
- goto ld;
- case TUCHAR:
- a = AMOVBLZX;
- if(t64)
- a = AMOVBQZX;
- goto ld;
- case TSHORT:
- a = AMOVWLSX;
- if(t64)
- a = AMOVWQSX;
- goto ld;
- case TUSHORT:
- a = AMOVWLZX;
- if(t64)
- a = AMOVWQZX;
- goto ld;
- case TINT:
- case TLONG:
- if(typefd[tt]) {
- regalloc(&nod, t, t);
- if(tt == TDOUBLE)
- a = ACVTSL2SD;
- else
- a = ACVTSL2SS;
- gins(a, f, &nod);
- gmove(&nod, t);
- regfree(&nod);
- return;
- }
- a = AMOVL;
- if(t64)
- a = AMOVLQSX;
- goto ld;
- case TUINT:
- case TULONG:
- a = AMOVL;
- if(t64)
- a = AMOVLQZX; /* could probably use plain MOVL */
- goto ld;
- case TVLONG:
- if(typefd[tt]) {
- regalloc(&nod, t, t);
- if(tt == TDOUBLE)
- a = ACVTSQ2SD;
- else
- a = ACVTSQ2SS;
- gins(a, f, &nod);
- gmove(&nod, t);
- regfree(&nod);
- return;
- }
- case TUVLONG:
- a = AMOVQ;
- goto ld;
- case TIND:
- a = AMOVQ;
- if(ewidth[TIND] == 4)
- a = AMOVL;
-
- ld:
- regalloc(&nod, f, t);
- nod.type = t64? types[TVLONG]: types[TINT];
- gins(a, f, &nod);
- gmove(&nod, t);
- regfree(&nod);
- return;
-
- case TFLOAT:
- a = AMOVSS;
- goto fld;
- case TDOUBLE:
- a = AMOVSD;
- fld:
- regalloc(&nod, f, t);
- if(tt != TDOUBLE && tt != TFLOAT){ /* TO DO: why is this here */
- prtree(f, "odd tree");
- nod.type = t64? types[TVLONG]: types[TINT];
- }
- gins(a, f, &nod);
- gmove(&nod, t);
- regfree(&nod);
- return;
- }
-
-/*
- * store
- */
- if(t->op == ONAME || t->op == OINDREG ||
- t->op == OIND || t->op == OINDEX)
- switch(tt) {
- case TCHAR:
- case TUCHAR:
- a = AMOVB; goto st;
- case TSHORT:
- case TUSHORT:
- a = AMOVW; goto st;
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- a = AMOVL; goto st;
- case TVLONG:
- case TUVLONG:
- case TIND:
- a = AMOVQ; goto st;
-
- st:
- if(f->op == OCONST) {
- gins(a, f, t);
- return;
- }
- fst:
- regalloc(&nod, t, f);
- gmove(f, &nod);
- gins(a, &nod, t);
- regfree(&nod);
- return;
-
- case TFLOAT:
- a = AMOVSS;
- goto fst;
- case TDOUBLE:
- a = AMOVSD;
- goto fst;
- }
-
-/*
- * convert
- */
- switch(CASE(ft,tt)) {
- default:
-/*
- * integer to integer
- ********
- a = AGOK; break;
-
- case CASE( TCHAR, TCHAR):
- case CASE( TUCHAR, TCHAR):
- case CASE( TSHORT, TCHAR):
- case CASE( TUSHORT,TCHAR):
- case CASE( TINT, TCHAR):
- case CASE( TUINT, TCHAR):
- case CASE( TLONG, TCHAR):
- case CASE( TULONG, TCHAR):
-
- case CASE( TCHAR, TUCHAR):
- case CASE( TUCHAR, TUCHAR):
- case CASE( TSHORT, TUCHAR):
- case CASE( TUSHORT,TUCHAR):
- case CASE( TINT, TUCHAR):
- case CASE( TUINT, TUCHAR):
- case CASE( TLONG, TUCHAR):
- case CASE( TULONG, TUCHAR):
-
- case CASE( TSHORT, TSHORT):
- case CASE( TUSHORT,TSHORT):
- case CASE( TINT, TSHORT):
- case CASE( TUINT, TSHORT):
- case CASE( TLONG, TSHORT):
- case CASE( TULONG, TSHORT):
-
- case CASE( TSHORT, TUSHORT):
- case CASE( TUSHORT,TUSHORT):
- case CASE( TINT, TUSHORT):
- case CASE( TUINT, TUSHORT):
- case CASE( TLONG, TUSHORT):
- case CASE( TULONG, TUSHORT):
-
- case CASE( TINT, TINT):
- case CASE( TUINT, TINT):
- case CASE( TLONG, TINT):
- case CASE( TULONG, TINT):
-
- case CASE( TINT, TUINT):
- case CASE( TUINT, TUINT):
- case CASE( TLONG, TUINT):
- case CASE( TULONG, TUINT):
- *****/
- a = AMOVL;
- break;
-
- case CASE( TINT, TIND):
- case CASE( TINT, TVLONG):
- case CASE( TINT, TUVLONG):
- case CASE( TLONG, TIND):
- case CASE( TLONG, TVLONG):
- case CASE( TLONG, TUVLONG):
- a = AMOVLQSX;
- if(f->op == OCONST) {
- f->vconst &= (uvlong)0xffffffffU;
- if(f->vconst & 0x80000000)
- f->vconst |= (vlong)0xffffffff << 32;
- a = AMOVQ;
- }
- break;
-
- case CASE( TUINT, TIND):
- case CASE( TUINT, TVLONG):
- case CASE( TUINT, TUVLONG):
- case CASE( TULONG, TVLONG):
- case CASE( TULONG, TUVLONG):
- case CASE( TULONG, TIND):
- a = AMOVLQZX;
- if(f->op == OCONST) {
- f->vconst &= (uvlong)0xffffffffU;
- a = AMOVQ;
- }
- break;
-
- case CASE( TIND, TCHAR):
- case CASE( TIND, TUCHAR):
- case CASE( TIND, TSHORT):
- case CASE( TIND, TUSHORT):
- case CASE( TIND, TINT):
- case CASE( TIND, TUINT):
- case CASE( TIND, TLONG):
- case CASE( TIND, TULONG):
- case CASE( TVLONG, TCHAR):
- case CASE( TVLONG, TUCHAR):
- case CASE( TVLONG, TSHORT):
- case CASE( TVLONG, TUSHORT):
- case CASE( TVLONG, TINT):
- case CASE( TVLONG, TUINT):
- case CASE( TVLONG, TLONG):
- case CASE( TVLONG, TULONG):
- case CASE( TUVLONG, TCHAR):
- case CASE( TUVLONG, TUCHAR):
- case CASE( TUVLONG, TSHORT):
- case CASE( TUVLONG, TUSHORT):
- case CASE( TUVLONG, TINT):
- case CASE( TUVLONG, TUINT):
- case CASE( TUVLONG, TLONG):
- case CASE( TUVLONG, TULONG):
- a = AMOVQL;
- if(f->op == OCONST) {
- f->vconst &= (int)0xffffffffU;
- a = AMOVL;
- }
- break;
-
- case CASE( TIND, TIND):
- case CASE( TIND, TVLONG):
- case CASE( TIND, TUVLONG):
- case CASE( TVLONG, TIND):
- case CASE( TVLONG, TVLONG):
- case CASE( TVLONG, TUVLONG):
- case CASE( TUVLONG, TIND):
- case CASE( TUVLONG, TVLONG):
- case CASE( TUVLONG, TUVLONG):
- a = AMOVQ;
- break;
-
- case CASE( TSHORT, TINT):
- case CASE( TSHORT, TUINT):
- case CASE( TSHORT, TLONG):
- case CASE( TSHORT, TULONG):
- a = AMOVWLSX;
- if(f->op == OCONST) {
- f->vconst &= 0xffff;
- if(f->vconst & 0x8000)
- f->vconst |= 0xffff0000;
- a = AMOVL;
- }
- break;
-
- case CASE( TSHORT, TVLONG):
- case CASE( TSHORT, TUVLONG):
- case CASE( TSHORT, TIND):
- a = AMOVWQSX;
- if(f->op == OCONST) {
- f->vconst &= 0xffff;
- if(f->vconst & 0x8000){
- f->vconst |= 0xffff0000;
- f->vconst |= (vlong)~0 << 32;
- }
- a = AMOVL;
- }
- break;
-
- case CASE( TUSHORT,TINT):
- case CASE( TUSHORT,TUINT):
- case CASE( TUSHORT,TLONG):
- case CASE( TUSHORT,TULONG):
- a = AMOVWLZX;
- if(f->op == OCONST) {
- f->vconst &= 0xffff;
- a = AMOVL;
- }
- break;
-
- case CASE( TUSHORT,TVLONG):
- case CASE( TUSHORT,TUVLONG):
- case CASE( TUSHORT,TIND):
- a = AMOVWQZX;
- if(f->op == OCONST) {
- f->vconst &= 0xffff;
- a = AMOVL; /* MOVL also zero-extends to 64 bits */
- }
- break;
-
- case CASE( TCHAR, TSHORT):
- case CASE( TCHAR, TUSHORT):
- case CASE( TCHAR, TINT):
- case CASE( TCHAR, TUINT):
- case CASE( TCHAR, TLONG):
- case CASE( TCHAR, TULONG):
- a = AMOVBLSX;
- if(f->op == OCONST) {
- f->vconst &= 0xff;
- if(f->vconst & 0x80)
- f->vconst |= 0xffffff00;
- a = AMOVL;
- }
- break;
-
- case CASE( TCHAR, TVLONG):
- case CASE( TCHAR, TUVLONG):
- case CASE( TCHAR, TIND):
- a = AMOVBQSX;
- if(f->op == OCONST) {
- f->vconst &= 0xff;
- if(f->vconst & 0x80){
- f->vconst |= 0xffffff00;
- f->vconst |= (vlong)~0 << 32;
- }
- a = AMOVQ;
- }
- break;
-
- case CASE( TUCHAR, TSHORT):
- case CASE( TUCHAR, TUSHORT):
- case CASE( TUCHAR, TINT):
- case CASE( TUCHAR, TUINT):
- case CASE( TUCHAR, TLONG):
- case CASE( TUCHAR, TULONG):
- a = AMOVBLZX;
- if(f->op == OCONST) {
- f->vconst &= 0xff;
- a = AMOVL;
- }
- break;
-
- case CASE( TUCHAR, TVLONG):
- case CASE( TUCHAR, TUVLONG):
- case CASE( TUCHAR, TIND):
- a = AMOVBQZX;
- if(f->op == OCONST) {
- f->vconst &= 0xff;
- a = AMOVL; /* zero-extends to 64-bits */
- }
- break;
-
-/*
- * float to fix
- */
- case CASE( TFLOAT, TCHAR):
- case CASE( TFLOAT, TUCHAR):
- case CASE( TFLOAT, TSHORT):
- case CASE( TFLOAT, TUSHORT):
- case CASE( TFLOAT, TINT):
- case CASE( TFLOAT, TUINT):
- case CASE( TFLOAT, TLONG):
- case CASE( TFLOAT, TULONG):
- case CASE( TFLOAT, TVLONG):
- case CASE( TFLOAT, TUVLONG):
- case CASE( TFLOAT, TIND):
-
- case CASE( TDOUBLE,TCHAR):
- case CASE( TDOUBLE,TUCHAR):
- case CASE( TDOUBLE,TSHORT):
- case CASE( TDOUBLE,TUSHORT):
- case CASE( TDOUBLE,TINT):
- case CASE( TDOUBLE,TUINT):
- case CASE( TDOUBLE,TLONG):
- case CASE( TDOUBLE,TULONG):
- case CASE( TDOUBLE,TVLONG):
- case CASE( TDOUBLE,TUVLONG):
- case CASE( TDOUBLE,TIND):
- regalloc(&nod, t, Z);
- if(ewidth[tt] == SZ_VLONG || typeu[tt] && ewidth[tt] == SZ_INT){
- if(ft == TFLOAT)
- a = ACVTTSS2SQ;
- else
- a = ACVTTSD2SQ;
- }else{
- if(ft == TFLOAT)
- a = ACVTTSS2SL;
- else
- a = ACVTTSD2SL;
- }
- gins(a, f, &nod);
- gmove(&nod, t);
- regfree(&nod);
- return;
-
-/*
- * uvlong to float
- */
- case CASE( TUVLONG, TDOUBLE):
- case CASE( TUVLONG, TFLOAT):
- a = ACVTSQ2SS;
- if(tt == TDOUBLE)
- a = ACVTSQ2SD;
- regalloc(&nod, f, f);
- gmove(f, &nod);
- regalloc(&nod1, t, t);
- gins(ACMPQ, &nod, nodconst(0));
- gins(AJLT, Z, Z);
- p1 = p;
- gins(a, &nod, &nod1);
- gins(AJMP, Z, Z);
- p2 = p;
- patch(p1, pc);
- regalloc(&nod2, f, Z);
- regalloc(&nod3, f, Z);
- gmove(&nod, &nod2);
- gins(ASHRQ, nodconst(1), &nod2);
- gmove(&nod, &nod3);
- gins(AANDL, nodconst(1), &nod3);
- gins(AORQ, &nod3, &nod2);
- gins(a, &nod2, &nod1);
- gins(tt == TDOUBLE? AADDSD: AADDSS, &nod1, &nod1);
- regfree(&nod2);
- regfree(&nod3);
- patch(p2, pc);
- regfree(&nod);
- regfree(&nod1);
- return;
-
- case CASE( TULONG, TDOUBLE):
- case CASE( TUINT, TDOUBLE):
- case CASE( TULONG, TFLOAT):
- case CASE( TUINT, TFLOAT):
- a = ACVTSQ2SS;
- if(tt == TDOUBLE)
- a = ACVTSQ2SD;
- regalloc(&nod, f, f);
- gins(AMOVLQZX, f, &nod);
- regalloc(&nod1, t, t);
- gins(a, &nod, &nod1);
- gmove(&nod1, t);
- regfree(&nod);
- regfree(&nod1);
- return;
-
-/*
- * fix to float
- */
- case CASE( TCHAR, TFLOAT):
- case CASE( TUCHAR, TFLOAT):
- case CASE( TSHORT, TFLOAT):
- case CASE( TUSHORT,TFLOAT):
- case CASE( TINT, TFLOAT):
- case CASE( TLONG, TFLOAT):
- case CASE( TVLONG, TFLOAT):
- case CASE( TIND, TFLOAT):
-
- case CASE( TCHAR, TDOUBLE):
- case CASE( TUCHAR, TDOUBLE):
- case CASE( TSHORT, TDOUBLE):
- case CASE( TUSHORT,TDOUBLE):
- case CASE( TINT, TDOUBLE):
- case CASE( TLONG, TDOUBLE):
- case CASE( TVLONG, TDOUBLE):
- case CASE( TIND, TDOUBLE):
- regalloc(&nod, t, t);
- if(ewidth[ft] == SZ_VLONG){
- if(tt == TFLOAT)
- a = ACVTSQ2SS;
- else
- a = ACVTSQ2SD;
- }else{
- if(tt == TFLOAT)
- a = ACVTSL2SS;
- else
- a = ACVTSL2SD;
- }
- gins(a, f, &nod);
- gmove(&nod, t);
- regfree(&nod);
- return;
-
-/*
- * float to float
- */
- case CASE( TFLOAT, TFLOAT):
- a = AMOVSS;
- break;
- case CASE( TDOUBLE,TFLOAT):
- a = ACVTSD2SS;
- break;
- case CASE( TFLOAT, TDOUBLE):
- a = ACVTSS2SD;
- break;
- case CASE( TDOUBLE,TDOUBLE):
- a = AMOVSD;
- break;
- }
- if(a == AMOVQ || a == AMOVSD || a == AMOVSS || a == AMOVL && ewidth[ft] == ewidth[tt]) /* TO DO: check AMOVL */
- if(samaddr(f, t))
- return;
- gins(a, f, t);
-}
-
-void
-doindex(Node *n)
-{
- Node nod, nod1;
- int32 v;
-
-if(debug['Y'])
-prtree(n, "index");
-
-if(n->left->complex >= FNX)
-print("botch in doindex\n");
-
- regalloc(&nod, &qregnode, Z);
- v = constnode.vconst;
- cgen(n->right, &nod);
- idx.ptr = D_NONE;
- if(n->left->op == OCONST)
- idx.ptr = D_CONST;
- else if(n->left->op == OREGISTER)
- idx.ptr = n->left->reg;
- else if(n->left->op != OADDR) {
- reg[D_BP]++; // can't be used as a base
- regalloc(&nod1, &qregnode, Z);
- cgen(n->left, &nod1);
- idx.ptr = nod1.reg;
- regfree(&nod1);
- reg[D_BP]--;
- }
- idx.reg = nod.reg;
- regfree(&nod);
- constnode.vconst = v;
-}
-
-void
-gins(int a, Node *f, Node *t)
-{
-
- if(f != Z && f->op == OINDEX)
- doindex(f);
- if(t != Z && t->op == OINDEX)
- doindex(t);
- nextpc();
- p->as = a;
- if(f != Z)
- naddr(f, &p->from);
- if(t != Z)
- naddr(t, &p->to);
- if(debug['g'])
- print("%P\n", p);
-}
-
-void
-gopcode(int o, Type *ty, Node *f, Node *t)
-{
- int a, et;
-
- et = TLONG;
- if(ty != T)
- et = ty->etype;
- if(et == TIND && ewidth[TIND] == 4)
- et = TUINT;
- if(debug['M']) {
- if(f != Z && f->type != T)
- print("gop: %O %O[%s],", o, f->op, tnames[et]);
- else
- print("gop: %O Z,", o);
- if(t != Z && t->type != T)
- print("%O[%s]\n", t->op, tnames[t->type->etype]);
- else
- print("Z\n");
- }
- a = AGOK;
- switch(o) {
- case OCOM:
- a = ANOTL;
- if(et == TCHAR || et == TUCHAR)
- a = ANOTB;
- if(et == TSHORT || et == TUSHORT)
- a = ANOTW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = ANOTQ;
- break;
-
- case ONEG:
- a = ANEGL;
- if(et == TCHAR || et == TUCHAR)
- a = ANEGB;
- if(et == TSHORT || et == TUSHORT)
- a = ANEGW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = ANEGQ;
- break;
-
- case OADDR:
- a = ALEAQ;
- break;
-
- case OASADD:
- case OADD:
- a = AADDL;
- if(et == TCHAR || et == TUCHAR)
- a = AADDB;
- if(et == TSHORT || et == TUSHORT)
- a = AADDW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = AADDQ;
- if(et == TFLOAT)
- a = AADDSS;
- if(et == TDOUBLE)
- a = AADDSD;
- break;
-
- case OASSUB:
- case OSUB:
- a = ASUBL;
- if(et == TCHAR || et == TUCHAR)
- a = ASUBB;
- if(et == TSHORT || et == TUSHORT)
- a = ASUBW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = ASUBQ;
- if(et == TFLOAT)
- a = ASUBSS;
- if(et == TDOUBLE)
- a = ASUBSD;
- break;
-
- case OASOR:
- case OOR:
- a = AORL;
- if(et == TCHAR || et == TUCHAR)
- a = AORB;
- if(et == TSHORT || et == TUSHORT)
- a = AORW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = AORQ;
- break;
-
- case OASAND:
- case OAND:
- a = AANDL;
- if(et == TCHAR || et == TUCHAR)
- a = AANDB;
- if(et == TSHORT || et == TUSHORT)
- a = AANDW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = AANDQ;
- break;
-
- case OASXOR:
- case OXOR:
- a = AXORL;
- if(et == TCHAR || et == TUCHAR)
- a = AXORB;
- if(et == TSHORT || et == TUSHORT)
- a = AXORW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = AXORQ;
- break;
-
- case OASLSHR:
- case OLSHR:
- a = ASHRL;
- if(et == TCHAR || et == TUCHAR)
- a = ASHRB;
- if(et == TSHORT || et == TUSHORT)
- a = ASHRW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = ASHRQ;
- break;
-
- case OASASHR:
- case OASHR:
- a = ASARL;
- if(et == TCHAR || et == TUCHAR)
- a = ASARB;
- if(et == TSHORT || et == TUSHORT)
- a = ASARW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = ASARQ;
- break;
-
- case OASASHL:
- case OASHL:
- a = ASALL;
- if(et == TCHAR || et == TUCHAR)
- a = ASALB;
- if(et == TSHORT || et == TUSHORT)
- a = ASALW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = ASALQ;
- break;
-
- case OROTL:
- a = AROLL;
- if(et == TCHAR || et == TUCHAR)
- a = AROLB;
- if(et == TSHORT || et == TUSHORT)
- a = AROLW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = AROLQ;
- break;
-
- case OFUNC:
- a = ACALL;
- break;
-
- case OASMUL:
- case OMUL:
- if(f->op == OREGISTER && t != Z && isreg(t, D_AX) && reg[D_DX] == 0)
- t = Z;
- a = AIMULL;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = AIMULQ;
- if(et == TFLOAT)
- a = AMULSS;
- if(et == TDOUBLE)
- a = AMULSD;
- break;
-
- case OASMOD:
- case OMOD:
- case OASDIV:
- case ODIV:
- a = AIDIVL;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = AIDIVQ;
- if(et == TFLOAT)
- a = ADIVSS;
- if(et == TDOUBLE)
- a = ADIVSD;
- break;
-
- case OASLMUL:
- case OLMUL:
- a = AMULL;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = AMULQ;
- break;
-
- case OASLMOD:
- case OLMOD:
- case OASLDIV:
- case OLDIV:
- a = ADIVL;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = ADIVQ;
- break;
-
- case OEQ:
- case ONE:
- case OLT:
- case OLE:
- case OGE:
- case OGT:
- case OLO:
- case OLS:
- case OHS:
- case OHI:
- a = ACMPL;
- if(et == TCHAR || et == TUCHAR)
- a = ACMPB;
- if(et == TSHORT || et == TUSHORT)
- a = ACMPW;
- if(et == TVLONG || et == TUVLONG || et == TIND)
- a = ACMPQ;
- if(et == TFLOAT)
- a = AUCOMISS;
- if(et == TDOUBLE)
- a = AUCOMISD;
- gins(a, f, t);
- switch(o) {
- case OEQ: a = AJEQ; break;
- case ONE: a = AJNE; break;
- case OLT: a = AJLT; break;
- case OLE: a = AJLE; break;
- case OGE: a = AJGE; break;
- case OGT: a = AJGT; break;
- case OLO: a = AJCS; break;
- case OLS: a = AJLS; break;
- case OHS: a = AJCC; break;
- case OHI: a = AJHI; break;
- }
- gins(a, Z, Z);
- return;
- }
- if(a == AGOK)
- diag(Z, "bad in gopcode %O", o);
- gins(a, f, t);
-}
-
-int
-samaddr(Node *f, Node *t)
-{
- return f->op == OREGISTER && t->op == OREGISTER && f->reg == t->reg;
-}
-
-void
-gbranch(int o)
-{
- int a;
-
- a = AGOK;
- switch(o) {
- case ORETURN:
- a = ARET;
- break;
- case OGOTO:
- a = AJMP;
- break;
- }
- nextpc();
- if(a == AGOK) {
- diag(Z, "bad in gbranch %O", o);
- nextpc();
- }
- p->as = a;
-}
-
-void
-patch(Prog *op, int32 pc)
-{
- op->to.offset = pc;
- op->to.type = D_BRANCH;
- op->to.u.branch = nil;
- op->pcond = nil;
-}
-
-void
-gpseudo(int a, Sym *s, Node *n)
-{
-
- nextpc();
- p->as = a;
- p->from.type = D_EXTERN;
- p->from.sym = linksym(s);
-
- switch(a) {
- case ATEXT:
- p->from.scale = textflag;
- textflag = 0;
- break;
- case AGLOBL:
- p->from.scale = s->dataflag;
- break;
- }
-
- if(s->class == CSTATIC)
- p->from.type = D_STATIC;
- naddr(n, &p->to);
- if(a == ADATA || a == AGLOBL)
- pc--;
-}
-
-void
-gpcdata(int index, int value)
-{
- Node n1;
-
- n1 = *nodconst(index);
- gins(APCDATA, &n1, nodconst(value));
-}
-
-void
-gprefetch(Node *n)
-{
- Node n1;
-
- regalloc(&n1, n, Z);
- gmove(n, &n1);
- n1.op = OINDREG;
- gins(APREFETCHNTA, &n1, Z);
- regfree(&n1);
-}
-
-int
-sconst(Node *n)
-{
- int32 v;
-
- if(n->op == OCONST && !typefd[n->type->etype]) {
- v = n->vconst;
- if(v >= -32766L && v < 32766L)
- return 1;
- }
- return 0;
-}
-
-int32
-exreg(Type *t)
-{
- int32 o;
-
- if(typechlpv[t->etype]) {
- if(exregoffset >= 64)
- return 0;
- o = exregoffset;
- exregoffset += ewidth[TIND];
- return o+1; // +1 to avoid 0 == failure; naddr's case OEXREG will subtract 1.
- }
- return 0;
-}
-
-schar ewidth[NTYPE] =
-{
- -1, /*[TXXX]*/
- SZ_CHAR, /*[TCHAR]*/
- SZ_CHAR, /*[TUCHAR]*/
- SZ_SHORT, /*[TSHORT]*/
- SZ_SHORT, /*[TUSHORT]*/
- SZ_INT, /*[TINT]*/
- SZ_INT, /*[TUINT]*/
- SZ_LONG, /*[TLONG]*/
- SZ_LONG, /*[TULONG]*/
- SZ_VLONG, /*[TVLONG]*/
- SZ_VLONG, /*[TUVLONG]*/
- SZ_FLOAT, /*[TFLOAT]*/
- SZ_DOUBLE, /*[TDOUBLE]*/
- SZ_IND, /*[TIND]*/
- 0, /*[TFUNC]*/
- -1, /*[TARRAY]*/
- 0, /*[TVOID]*/
- -1, /*[TSTRUCT]*/
- -1, /*[TUNION]*/
- SZ_INT, /*[TENUM]*/
-};
-int32 ncast[NTYPE] =
-{
- 0, /*[TXXX]*/
- BCHAR|BUCHAR, /*[TCHAR]*/
- BCHAR|BUCHAR, /*[TUCHAR]*/
- BSHORT|BUSHORT, /*[TSHORT]*/
- BSHORT|BUSHORT, /*[TUSHORT]*/
- BINT|BUINT|BLONG|BULONG, /*[TINT]*/
- BINT|BUINT|BLONG|BULONG, /*[TUINT]*/
- BINT|BUINT|BLONG|BULONG, /*[TLONG]*/
- BINT|BUINT|BLONG|BULONG, /*[TULONG]*/
- BVLONG|BUVLONG|BIND, /*[TVLONG]*/
- BVLONG|BUVLONG|BIND, /*[TUVLONG]*/
- BFLOAT, /*[TFLOAT]*/
- BDOUBLE, /*[TDOUBLE]*/
- BVLONG|BUVLONG|BIND, /*[TIND]*/
- 0, /*[TFUNC]*/
- 0, /*[TARRAY]*/
- 0, /*[TVOID]*/
- BSTRUCT, /*[TSTRUCT]*/
- BUNION, /*[TUNION]*/
- 0, /*[TENUM]*/
-};
diff --git a/src/cmd/8c/Makefile b/src/cmd/8c/Makefile
deleted file mode 100644
index 3f528d751..000000000
--- a/src/cmd/8c/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright 2012 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../Make.dist
diff --git a/src/cmd/8c/cgen.c b/src/cmd/8c/cgen.c
deleted file mode 100644
index 87e8fdad8..000000000
--- a/src/cmd/8c/cgen.c
+++ /dev/null
@@ -1,1939 +0,0 @@
-// Inferno utils/8c/cgen.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/cgen.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-#include "../../runtime/funcdata.h"
-
-/* ,x/^(print|prtree)\(/i/\/\/ */
-
-void
-cgen(Node *n, Node *nn)
-{
- Node *l, *r, *t;
- Prog *p1;
- Node nod, nod1, nod2, nod3, nod4;
- int o, hardleft;
- int32 v, curs;
- vlong c;
-
- if(debug['g']) {
- prtree(nn, "cgen lhs");
- prtree(n, "cgen");
- }
- if(n == Z || n->type == T)
- return;
- if(typesuv[n->type->etype] && (n->op != OFUNC || nn != Z)) {
- sugen(n, nn, n->type->width);
- return;
- }
- l = n->left;
- r = n->right;
- o = n->op;
-
- if(n->op == OEXREG || (nn != Z && nn->op == OEXREG)) {
- gmove(n, nn);
- return;
- }
-
- if(n->addable >= INDEXED) {
- if(nn == Z) {
- switch(o) {
- default:
- nullwarn(Z, Z);
- break;
- case OINDEX:
- nullwarn(l, r);
- break;
- }
- return;
- }
- gmove(n, nn);
- return;
- }
- curs = cursafe;
-
- if(l->complex >= FNX)
- if(r != Z && r->complex >= FNX)
- switch(o) {
- default:
- if(cond(o) && typesuv[l->type->etype])
- break;
-
- regret(&nod, r, 0, 0);
- cgen(r, &nod);
-
- regsalloc(&nod1, r);
- gmove(&nod, &nod1);
-
- regfree(&nod);
- nod = *n;
- nod.right = &nod1;
-
- cgen(&nod, nn);
- return;
-
- case OFUNC:
- case OCOMMA:
- case OANDAND:
- case OOROR:
- case OCOND:
- case ODOT:
- break;
- }
-
- hardleft = l->addable < INDEXED || l->complex >= FNX;
- switch(o) {
- default:
- diag(n, "unknown op in cgen: %O", o);
- break;
-
- case ONEG:
- case OCOM:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- gopcode(o, n->type, Z, &nod);
- gmove(&nod, nn);
- regfree(&nod);
- break;
-
- case OAS:
- if(typefd[n->type->etype]) {
- cgen(r, &fregnode0);
- if(nn != Z)
- gins(AFMOVD, &fregnode0, &fregnode0);
- if(l->addable < INDEXED) {
- reglcgen(&nod, l, Z);
- gmove(&fregnode0, &nod);
- regfree(&nod);
- } else
- gmove(&fregnode0, l);
- if(nn != Z)
- gmove(&fregnode0, nn);
- return;
- }
- if(l->op == OBIT)
- goto bitas;
- if(!hardleft) {
- if(nn != Z || r->addable < INDEXED) {
- if(r->complex >= FNX && nn == Z)
- regret(&nod, r, 0, 0);
- else
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- gmove(&nod, l);
- if(nn != Z)
- gmove(&nod, nn);
- regfree(&nod);
- } else
- gmove(r, l);
- break;
- }
- if(l->complex >= r->complex) {
- if(l->op == OINDEX && r->op == OCONST) {
- gmove(r, l);
- break;
- }
- reglcgen(&nod1, l, Z);
- if(r->addable >= INDEXED) {
- gmove(r, &nod1);
- if(nn != Z)
- gmove(r, nn);
- regfree(&nod1);
- break;
- }
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- } else {
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- reglcgen(&nod1, l, Z);
- }
- gmove(&nod, &nod1);
- regfree(&nod);
- regfree(&nod1);
- break;
-
- bitas:
- n = l->left;
- regalloc(&nod, r, nn);
- if(l->complex >= r->complex) {
- reglcgen(&nod1, n, Z);
- cgen(r, &nod);
- } else {
- cgen(r, &nod);
- reglcgen(&nod1, n, Z);
- }
- regalloc(&nod2, n, Z);
- gmove(&nod1, &nod2);
- bitstore(l, &nod, &nod1, &nod2, nn);
- break;
-
- case OBIT:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- bitload(n, &nod, Z, Z, nn);
- gmove(&nod, nn);
- regfree(&nod);
- break;
-
- case OLSHR:
- case OASHL:
- case OASHR:
- if(nn == Z) {
- nullwarn(l, r);
- break;
- }
- if(r->op == OCONST) {
- if(r->vconst == 0) {
- cgen(l, nn);
- break;
- }
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- if(o == OASHL && r->vconst == 1)
- gopcode(OADD, n->type, &nod, &nod);
- else
- gopcode(o, n->type, r, &nod);
- gmove(&nod, nn);
- regfree(&nod);
- break;
- }
-
- /*
- * get nod to be D_CX
- */
- if(nodreg(&nod, nn, D_CX)) {
- regsalloc(&nod1, n);
- gmove(&nod, &nod1);
- cgen(n, &nod); /* probably a bug */
- gmove(&nod, nn);
- gmove(&nod1, &nod);
- break;
- }
- reg[D_CX]++;
- if(nn->op == OREGISTER && nn->reg == D_CX)
- regalloc(&nod1, l, Z);
- else
- regalloc(&nod1, l, nn);
- if(r->complex >= l->complex) {
- cgen(r, &nod);
- cgen(l, &nod1);
- } else {
- cgen(l, &nod1);
- cgen(r, &nod);
- }
- gopcode(o, n->type, &nod, &nod1);
- gmove(&nod1, nn);
- regfree(&nod);
- regfree(&nod1);
- break;
-
- case OADD:
- case OSUB:
- case OOR:
- case OXOR:
- case OAND:
- if(nn == Z) {
- nullwarn(l, r);
- break;
- }
- if(typefd[n->type->etype])
- goto fop;
- if(r->op == OCONST) {
- if(r->vconst == 0 && o != OAND) {
- cgen(l, nn);
- break;
- }
- }
- if(n->op == OOR && l->op == OASHL && r->op == OLSHR
- && l->right->op == OCONST && r->right->op == OCONST
- && l->left->op == ONAME && r->left->op == ONAME
- && l->left->sym == r->left->sym
- && l->right->vconst + r->right->vconst == 8 * l->left->type->width) {
- regalloc(&nod, l->left, nn);
- cgen(l->left, &nod);
- gopcode(OROTL, n->type, l->right, &nod);
- gmove(&nod, nn);
- regfree(&nod);
- break;
- }
- if(n->op == OADD && l->op == OASHL && l->right->op == OCONST
- && (r->op != OCONST || r->vconst < -128 || r->vconst > 127)) {
- c = l->right->vconst;
- if(c > 0 && c <= 3) {
- if(l->left->complex >= r->complex) {
- regalloc(&nod, l->left, nn);
- cgen(l->left, &nod);
- if(r->addable < INDEXED) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- genmuladd(&nod, &nod, 1 << c, &nod1);
- regfree(&nod1);
- }
- else
- genmuladd(&nod, &nod, 1 << c, r);
- }
- else {
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- regalloc(&nod1, l->left, Z);
- cgen(l->left, &nod1);
- genmuladd(&nod, &nod1, 1 << c, &nod);
- regfree(&nod1);
- }
- gmove(&nod, nn);
- regfree(&nod);
- break;
- }
- }
- if(r->addable >= INDEXED) {
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- gopcode(o, n->type, r, &nod);
- gmove(&nod, nn);
- regfree(&nod);
- break;
- }
- if(l->complex >= r->complex) {
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- gopcode(o, n->type, &nod1, &nod);
- } else {
- regalloc(&nod1, r, nn);
- cgen(r, &nod1);
- regalloc(&nod, l, Z);
- cgen(l, &nod);
- gopcode(o, n->type, &nod1, &nod);
- }
- gmove(&nod, nn);
- regfree(&nod);
- regfree(&nod1);
- break;
-
- case OLMOD:
- case OMOD:
- case OLMUL:
- case OLDIV:
- case OMUL:
- case ODIV:
- if(nn == Z) {
- nullwarn(l, r);
- break;
- }
- if(typefd[n->type->etype])
- goto fop;
- if(r->op == OCONST) {
- SET(v);
- switch(o) {
- case ODIV:
- case OMOD:
- c = r->vconst;
- if(c < 0)
- c = -c;
- v = xlog2(c);
- if(v < 0)
- break;
- /* fall thru */
- case OMUL:
- case OLMUL:
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- switch(o) {
- case OMUL:
- case OLMUL:
- mulgen(n->type, r, &nod);
- break;
- case ODIV:
- sdiv2(r->vconst, v, l, &nod);
- break;
- case OMOD:
- smod2(r->vconst, v, l, &nod);
- break;
- }
- gmove(&nod, nn);
- regfree(&nod);
- goto done;
- case OLDIV:
- c = r->vconst;
- if((c & 0x80000000) == 0)
- break;
- regalloc(&nod1, l, Z);
- cgen(l, &nod1);
- regalloc(&nod, l, nn);
- zeroregm(&nod);
- gins(ACMPL, &nod1, nodconst(c));
- gins(ASBBL, nodconst(-1), &nod);
- regfree(&nod1);
- gmove(&nod, nn);
- regfree(&nod);
- goto done;
- }
- }
-
- if(o == OMUL || o == OLMUL) {
- if(l->addable >= INDEXED) {
- t = l;
- l = r;
- r = t;
- }
- reg[D_DX]++; // for gopcode case OMUL
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- if(r->addable < INDEXED) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- gopcode(OMUL, n->type, &nod1, &nod);
- regfree(&nod1);
- }else
- gopcode(OMUL, n->type, r, &nod); /* addressible */
- gmove(&nod, nn);
- regfree(&nod);
- reg[D_DX]--;
- break;
- }
-
- /*
- * get nod to be D_AX
- * get nod1 to be D_DX
- */
- if(nodreg(&nod, nn, D_AX)) {
- regsalloc(&nod2, n);
- gmove(&nod, &nod2);
- v = reg[D_AX];
- reg[D_AX] = 0;
-
- if(isreg(l, D_AX)) {
- nod3 = *n;
- nod3.left = &nod2;
- cgen(&nod3, nn);
- } else
- if(isreg(r, D_AX)) {
- nod3 = *n;
- nod3.right = &nod2;
- cgen(&nod3, nn);
- } else
- cgen(n, nn);
-
- gmove(&nod2, &nod);
- reg[D_AX] = v;
- break;
- }
- if(nodreg(&nod1, nn, D_DX)) {
- regsalloc(&nod2, n);
- gmove(&nod1, &nod2);
- v = reg[D_DX];
- reg[D_DX] = 0;
-
- if(isreg(l, D_DX)) {
- nod3 = *n;
- nod3.left = &nod2;
- cgen(&nod3, nn);
- } else
- if(isreg(r, D_DX)) {
- nod3 = *n;
- nod3.right = &nod2;
- cgen(&nod3, nn);
- } else
- cgen(n, nn);
-
- gmove(&nod2, &nod1);
- reg[D_DX] = v;
- break;
- }
- reg[D_AX]++;
-
- if(r->op == OCONST && (o == ODIV || o == OLDIV)) {
- reg[D_DX]++;
- if(l->addable < INDEXED) {
- regalloc(&nod2, l, Z);
- cgen(l, &nod2);
- l = &nod2;
- }
- if(o == ODIV)
- sdivgen(l, r, &nod, &nod1);
- else
- udivgen(l, r, &nod, &nod1);
- gmove(&nod1, nn);
- if(l == &nod2)
- regfree(l);
- goto freeaxdx;
- }
-
- if(l->complex >= r->complex) {
- cgen(l, &nod);
- reg[D_DX]++;
- if(o == ODIV || o == OMOD)
- gins(ACDQ, Z, Z);
- if(o == OLDIV || o == OLMOD)
- zeroregm(&nod1);
- if(r->addable < INDEXED || r->op == OCONST) {
- regsalloc(&nod3, r);
- cgen(r, &nod3);
- gopcode(o, n->type, &nod3, Z);
- } else
- gopcode(o, n->type, r, Z);
- } else {
- regsalloc(&nod3, r);
- cgen(r, &nod3);
- cgen(l, &nod);
- reg[D_DX]++;
- if(o == ODIV || o == OMOD)
- gins(ACDQ, Z, Z);
- if(o == OLDIV || o == OLMOD)
- zeroregm(&nod1);
- gopcode(o, n->type, &nod3, Z);
- }
- if(o == OMOD || o == OLMOD)
- gmove(&nod1, nn);
- else
- gmove(&nod, nn);
- freeaxdx:
- regfree(&nod);
- regfree(&nod1);
- break;
-
- case OASLSHR:
- case OASASHL:
- case OASASHR:
- if(r->op == OCONST)
- goto asand;
- if(l->op == OBIT)
- goto asbitop;
- if(typefd[n->type->etype])
- goto asfop;
-
- /*
- * get nod to be D_CX
- */
- if(nodreg(&nod, nn, D_CX)) {
- regsalloc(&nod1, n);
- gmove(&nod, &nod1);
- cgen(n, &nod);
- if(nn != Z)
- gmove(&nod, nn);
- gmove(&nod1, &nod);
- break;
- }
- reg[D_CX]++;
-
- if(r->complex >= l->complex) {
- cgen(r, &nod);
- if(hardleft)
- reglcgen(&nod1, l, Z);
- else
- nod1 = *l;
- } else {
- if(hardleft)
- reglcgen(&nod1, l, Z);
- else
- nod1 = *l;
- cgen(r, &nod);
- }
-
- gopcode(o, l->type, &nod, &nod1);
- regfree(&nod);
- if(nn != Z)
- gmove(&nod1, nn);
- if(hardleft)
- regfree(&nod1);
- break;
-
- case OASAND:
- case OASADD:
- case OASSUB:
- case OASXOR:
- case OASOR:
- asand:
- if(l->op == OBIT)
- goto asbitop;
- if(typefd[n->type->etype]||typefd[r->type->etype])
- goto asfop;
- if(l->complex >= r->complex) {
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
- if(r->op != OCONST) {
- regalloc(&nod1, r, nn);
- cgen(r, &nod1);
- gopcode(o, l->type, &nod1, &nod);
- regfree(&nod1);
- } else
- gopcode(o, l->type, r, &nod);
- } else {
- regalloc(&nod1, r, nn);
- cgen(r, &nod1);
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
- gopcode(o, l->type, &nod1, &nod);
- regfree(&nod1);
- }
- if(nn != Z)
- gmove(&nod, nn);
- if(hardleft)
- regfree(&nod);
- break;
-
- case OASLMUL:
- case OASLDIV:
- case OASLMOD:
- case OASMUL:
- case OASDIV:
- case OASMOD:
- if(l->op == OBIT)
- goto asbitop;
- if(typefd[n->type->etype]||typefd[r->type->etype])
- goto asfop;
- if(r->op == OCONST) {
- SET(v);
- switch(o) {
- case OASDIV:
- case OASMOD:
- c = r->vconst;
- if(c < 0)
- c = -c;
- v = xlog2(c);
- if(v < 0)
- break;
- /* fall thru */
- case OASMUL:
- case OASLMUL:
- if(hardleft)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- regalloc(&nod, l, nn);
- cgen(&nod2, &nod);
- switch(o) {
- case OASMUL:
- case OASLMUL:
- mulgen(n->type, r, &nod);
- break;
- case OASDIV:
- sdiv2(r->vconst, v, l, &nod);
- break;
- case OASMOD:
- smod2(r->vconst, v, l, &nod);
- break;
- }
- havev:
- gmove(&nod, &nod2);
- if(nn != Z)
- gmove(&nod, nn);
- if(hardleft)
- regfree(&nod2);
- regfree(&nod);
- goto done;
- case OASLDIV:
- c = r->vconst;
- if((c & 0x80000000) == 0)
- break;
- if(hardleft)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- regalloc(&nod1, l, nn);
- cgen(&nod2, &nod1);
- regalloc(&nod, l, nn);
- zeroregm(&nod);
- gins(ACMPL, &nod1, nodconst(c));
- gins(ASBBL, nodconst(-1), &nod);
- regfree(&nod1);
- goto havev;
- }
- }
-
- if(o == OASMUL) {
- /* should favour AX */
- regalloc(&nod, l, nn);
- if(r->complex >= FNX) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- r = &nod1;
- }
- if(hardleft)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- cgen(&nod2, &nod);
- if(r->addable < INDEXED) {
- if(r->complex < FNX) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- }
- gopcode(OASMUL, n->type, &nod1, &nod);
- regfree(&nod1);
- }
- else
- gopcode(OASMUL, n->type, r, &nod);
- if(r == &nod1)
- regfree(r);
- gmove(&nod, &nod2);
- if(nn != Z)
- gmove(&nod, nn);
- regfree(&nod);
- if(hardleft)
- regfree(&nod2);
- break;
- }
-
- /*
- * get nod to be D_AX
- * get nod1 to be D_DX
- */
- if(nodreg(&nod, nn, D_AX)) {
- regsalloc(&nod2, n);
- gmove(&nod, &nod2);
- v = reg[D_AX];
- reg[D_AX] = 0;
-
- if(isreg(l, D_AX)) {
- nod3 = *n;
- nod3.left = &nod2;
- cgen(&nod3, nn);
- } else
- if(isreg(r, D_AX)) {
- nod3 = *n;
- nod3.right = &nod2;
- cgen(&nod3, nn);
- } else
- cgen(n, nn);
-
- gmove(&nod2, &nod);
- reg[D_AX] = v;
- break;
- }
- if(nodreg(&nod1, nn, D_DX)) {
- regsalloc(&nod2, n);
- gmove(&nod1, &nod2);
- v = reg[D_DX];
- reg[D_DX] = 0;
-
- if(isreg(l, D_DX)) {
- nod3 = *n;
- nod3.left = &nod2;
- cgen(&nod3, nn);
- } else
- if(isreg(r, D_DX)) {
- nod3 = *n;
- nod3.right = &nod2;
- cgen(&nod3, nn);
- } else
- cgen(n, nn);
-
- gmove(&nod2, &nod1);
- reg[D_DX] = v;
- break;
- }
- reg[D_AX]++;
- reg[D_DX]++;
-
- if(l->complex >= r->complex) {
- if(hardleft)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- cgen(&nod2, &nod);
- if(r->op == OCONST) {
- switch(o) {
- case OASDIV:
- sdivgen(&nod2, r, &nod, &nod1);
- goto divdone;
- case OASLDIV:
- udivgen(&nod2, r, &nod, &nod1);
- divdone:
- gmove(&nod1, &nod2);
- if(nn != Z)
- gmove(&nod1, nn);
- goto freelxaxdx;
- }
- }
- if(o == OASDIV || o == OASMOD)
- gins(ACDQ, Z, Z);
- if(o == OASLDIV || o == OASLMOD)
- zeroregm(&nod1);
- if(r->addable < INDEXED || r->op == OCONST ||
- !typeil[r->type->etype]) {
- regalloc(&nod3, r, Z);
- cgen(r, &nod3);
- gopcode(o, l->type, &nod3, Z);
- regfree(&nod3);
- } else
- gopcode(o, n->type, r, Z);
- } else {
- regalloc(&nod3, r, Z);
- cgen(r, &nod3);
- if(hardleft)
- reglcgen(&nod2, l, Z);
- else
- nod2 = *l;
- cgen(&nod2, &nod);
- if(o == OASDIV || o == OASMOD)
- gins(ACDQ, Z, Z);
- if(o == OASLDIV || o == OASLMOD)
- zeroregm(&nod1);
- gopcode(o, l->type, &nod3, Z);
- regfree(&nod3);
- }
- if(o == OASMOD || o == OASLMOD) {
- gmove(&nod1, &nod2);
- if(nn != Z)
- gmove(&nod1, nn);
- } else {
- gmove(&nod, &nod2);
- if(nn != Z)
- gmove(&nod, nn);
- }
- freelxaxdx:
- if(hardleft)
- regfree(&nod2);
- regfree(&nod);
- regfree(&nod1);
- break;
-
- fop:
- if(l->complex >= r->complex) {
- cgen(l, &fregnode0);
- if(r->addable < INDEXED) {
- cgen(r, &fregnode0);
- fgopcode(o, &fregnode0, &fregnode1, 1, 0);
- } else
- fgopcode(o, r, &fregnode0, 0, 0);
- } else {
- cgen(r, &fregnode0);
- if(l->addable < INDEXED) {
- cgen(l, &fregnode0);
- fgopcode(o, &fregnode0, &fregnode1, 1, 1);
- } else
- fgopcode(o, l, &fregnode0, 0, 1);
- }
- gmove(&fregnode0, nn);
- break;
-
- asfop:
- if(l->complex >= r->complex) {
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
- cgen(r, &fregnode0);
- } else {
- cgen(r, &fregnode0);
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
- }
- if(!typefd[l->type->etype]) {
- gmove(&nod, &fregnode0);
- fgopcode(o, &fregnode0, &fregnode1, 1, 1);
- } else
- fgopcode(o, &nod, &fregnode0, 0, 1);
- if(nn != Z)
- gins(AFMOVD, &fregnode0, &fregnode0);
- gmove(&fregnode0, &nod);
- if(nn != Z)
- gmove(&fregnode0, nn);
- if(hardleft)
- regfree(&nod);
- break;
-
- asbitop:
- regalloc(&nod4, n, nn);
- if(l->complex >= r->complex) {
- bitload(l, &nod, &nod1, &nod2, &nod4);
- regalloc(&nod3, r, Z);
- cgen(r, &nod3);
- } else {
- regalloc(&nod3, r, Z);
- cgen(r, &nod3);
- bitload(l, &nod, &nod1, &nod2, &nod4);
- }
- gmove(&nod, &nod4);
-
- if(typefd[nod3.type->etype])
- fgopcode(o, &fregnode0, &fregnode1, 1, 1);
- else {
- Node onod;
-
- /* incredible grot ... */
- onod = nod3;
- onod.op = o;
- onod.complex = 2;
- onod.addable = 0;
- onod.type = tfield;
- onod.left = &nod4;
- onod.right = &nod3;
- cgen(&onod, Z);
- }
- regfree(&nod3);
- gmove(&nod4, &nod);
- regfree(&nod4);
- bitstore(l, &nod, &nod1, &nod2, nn);
- break;
-
- case OADDR:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- lcgen(l, nn);
- break;
-
- case OFUNC:
- if(l->complex >= FNX) {
- if(l->op != OIND)
- diag(n, "bad function call");
-
- regret(&nod, l->left, 0, 0);
- cgen(l->left, &nod);
- regsalloc(&nod1, l->left);
- gmove(&nod, &nod1);
- regfree(&nod);
-
- nod = *n;
- nod.left = &nod2;
- nod2 = *l;
- nod2.left = &nod1;
- nod2.complex = 1;
- cgen(&nod, nn);
-
- return;
- }
- gargs(r, &nod, &nod1);
- if(l->addable < INDEXED) {
- reglcgen(&nod, l, nn);
- nod.op = OREGISTER;
- gopcode(OFUNC, n->type, Z, &nod);
- regfree(&nod);
- } else
- gopcode(OFUNC, n->type, Z, l);
- if(REGARG >= 0 && reg[REGARG])
- reg[REGARG]--;
- regret(&nod, n, l->type, 1); // update maxarg if nothing else
- if(nn != Z)
- gmove(&nod, nn);
- if(nod.op == OREGISTER)
- regfree(&nod);
- if(nn == Z && hasdotdotdot(l->type) && typefd[n->type->etype])
- gins(AFMOVDP, &fregnode0, &fregnode0);
- break;
-
- case OIND:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- regialloc(&nod, n, nn);
- r = l;
- while(r->op == OADD)
- r = r->right;
- if(sconst(r)) {
- v = r->vconst;
- r->vconst = 0;
- cgen(l, &nod);
- nod.xoffset += v;
- r->vconst = v;
- } else
- cgen(l, &nod);
- regind(&nod, n);
- gmove(&nod, nn);
- regfree(&nod);
- break;
-
- case OEQ:
- case ONE:
- case OLE:
- case OLT:
- case OGE:
- case OGT:
- case OLO:
- case OLS:
- case OHI:
- case OHS:
- if(nn == Z) {
- nullwarn(l, r);
- break;
- }
- boolgen(n, 1, nn);
- break;
-
- case OANDAND:
- case OOROR:
- boolgen(n, 1, nn);
- if(nn == Z)
- patch(p, pc);
- break;
-
- case ONOT:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- boolgen(n, 1, nn);
- break;
-
- case OCOMMA:
- cgen(l, Z);
- cgen(r, nn);
- break;
-
- case OCAST:
- if(nn == Z) {
- nullwarn(l, Z);
- break;
- }
- /*
- * convert from types l->n->nn
- */
- if(nocast(l->type, n->type) && nocast(n->type, nn->type)) {
- /* both null, gen l->nn */
- cgen(l, nn);
- break;
- }
- if(typev[l->type->etype]) {
- cgen64(n, nn);
- break;
- }
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- regalloc(&nod1, n, &nod);
- gmove(&nod, &nod1);
- gmove(&nod1, nn);
- regfree(&nod1);
- regfree(&nod);
- break;
-
- case ODOT:
- sugen(l, nodrat, l->type->width);
- if(nn == Z)
- break;
- warn(n, "non-interruptable temporary");
- nod = *nodrat;
- if(!r || r->op != OCONST) {
- diag(n, "DOT and no offset");
- break;
- }
- nod.xoffset += (int32)r->vconst;
- nod.type = n->type;
- cgen(&nod, nn);
- break;
-
- case OCOND:
- bcgen(l, 1);
- p1 = p;
- cgen(r->left, nn);
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- cgen(r->right, nn);
- patch(p1, pc);
- break;
-
- case OPOSTINC:
- case OPOSTDEC:
- v = 1;
- if(l->type->etype == TIND)
- v = l->type->link->width;
- if(o == OPOSTDEC)
- v = -v;
- if(l->op == OBIT)
- goto bitinc;
- if(nn == Z)
- goto pre;
-
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
-
- if(typefd[n->type->etype])
- goto fltinc;
- gmove(&nod, nn);
- gopcode(OADD, n->type, nodconst(v), &nod);
- if(hardleft)
- regfree(&nod);
- break;
-
- case OPREINC:
- case OPREDEC:
- v = 1;
- if(l->type->etype == TIND)
- v = l->type->link->width;
- if(o == OPREDEC)
- v = -v;
- if(l->op == OBIT)
- goto bitinc;
-
- pre:
- if(hardleft)
- reglcgen(&nod, l, Z);
- else
- nod = *l;
- if(typefd[n->type->etype])
- goto fltinc;
- gopcode(OADD, n->type, nodconst(v), &nod);
- if(nn != Z)
- gmove(&nod, nn);
- if(hardleft)
- regfree(&nod);
- break;
-
- fltinc:
- gmove(&nod, &fregnode0);
- if(nn != Z && (o == OPOSTINC || o == OPOSTDEC))
- gins(AFMOVD, &fregnode0, &fregnode0);
- gins(AFLD1, Z, Z);
- if(v < 0)
- fgopcode(OSUB, &fregnode0, &fregnode1, 1, 0);
- else
- fgopcode(OADD, &fregnode0, &fregnode1, 1, 0);
- if(nn != Z && (o == OPREINC || o == OPREDEC))
- gins(AFMOVD, &fregnode0, &fregnode0);
- gmove(&fregnode0, &nod);
- if(hardleft)
- regfree(&nod);
- break;
-
- bitinc:
- if(nn != Z && (o == OPOSTINC || o == OPOSTDEC)) {
- bitload(l, &nod, &nod1, &nod2, Z);
- gmove(&nod, nn);
- gopcode(OADD, tfield, nodconst(v), &nod);
- bitstore(l, &nod, &nod1, &nod2, Z);
- break;
- }
- bitload(l, &nod, &nod1, &nod2, nn);
- gopcode(OADD, tfield, nodconst(v), &nod);
- bitstore(l, &nod, &nod1, &nod2, nn);
- break;
- }
-done:
- cursafe = curs;
-}
-
-void
-reglcgen(Node *t, Node *n, Node *nn)
-{
- Node *r;
- int32 v;
-
- regialloc(t, n, nn);
- if(n->op == OIND) {
- r = n->left;
- while(r->op == OADD)
- r = r->right;
- if(sconst(r)) {
- v = r->vconst;
- r->vconst = 0;
- lcgen(n, t);
- t->xoffset += v;
- r->vconst = v;
- regind(t, n);
- return;
- }
- }
- lcgen(n, t);
- regind(t, n);
-}
-
-void
-lcgen(Node *n, Node *nn)
-{
- Prog *p1;
- Node nod;
-
- if(debug['g']) {
- prtree(nn, "lcgen lhs");
- prtree(n, "lcgen");
- }
- if(n == Z || n->type == T)
- return;
- if(nn == Z) {
- nn = &nod;
- regalloc(&nod, n, Z);
- }
- switch(n->op) {
- default:
- if(n->addable < INDEXED) {
- diag(n, "unknown op in lcgen: %O", n->op);
- break;
- }
- gopcode(OADDR, n->type, n, nn);
- break;
-
- case OCOMMA:
- cgen(n->left, n->left);
- lcgen(n->right, nn);
- break;
-
- case OIND:
- cgen(n->left, nn);
- break;
-
- case OCOND:
- bcgen(n->left, 1);
- p1 = p;
- lcgen(n->right->left, nn);
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- lcgen(n->right->right, nn);
- patch(p1, pc);
- break;
- }
-}
-
-void
-bcgen(Node *n, int true)
-{
-
- if(n->type == T)
- gbranch(OGOTO);
- else
- boolgen(n, true, Z);
-}
-
-void
-boolgen(Node *n, int true, Node *nn)
-{
- int o;
- Prog *p1, *p2, *p3;
- Node *l, *r, nod, nod1;
- int32 curs;
-
- if(debug['g']) {
- prtree(nn, "boolgen lhs");
- prtree(n, "boolgen");
- }
- curs = cursafe;
- l = n->left;
- r = n->right;
- switch(n->op) {
-
- default:
- if(typev[n->type->etype]) {
- testv(n, true);
- goto com;
- }
- o = ONE;
- if(true)
- o = OEQ;
- if(typefd[n->type->etype]) {
- if(n->addable < INDEXED) {
- cgen(n, &fregnode0);
- gins(AFLDZ, Z, Z);
- fgopcode(o, &fregnode0, &fregnode1, 1, 1);
- } else {
- gins(AFLDZ, Z, Z);
- fgopcode(o, n, &fregnode0, 0, 1);
- }
- goto com;
- }
- /* bad, 13 is address of external that becomes constant */
- if(n->addable >= INDEXED && n->addable != 13) {
- gopcode(o, n->type, n, nodconst(0));
- goto com;
- }
- regalloc(&nod, n, nn);
- cgen(n, &nod);
- gopcode(o, n->type, &nod, nodconst(0));
- regfree(&nod);
- goto com;
-
- case OCONST:
- o = vconst(n);
- if(!true)
- o = !o;
- gbranch(OGOTO);
- if(o) {
- p1 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- }
- goto com;
-
- case OCOMMA:
- cgen(l, Z);
- boolgen(r, true, nn);
- break;
-
- case ONOT:
- boolgen(l, !true, nn);
- break;
-
- case OCOND:
- bcgen(l, 1);
- p1 = p;
- bcgen(r->left, true);
- p2 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- bcgen(r->right, !true);
- patch(p2, pc);
- p2 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- patch(p2, pc);
- goto com;
-
- case OANDAND:
- if(!true)
- goto caseor;
-
- caseand:
- bcgen(l, true);
- p1 = p;
- bcgen(r, !true);
- p2 = p;
- patch(p1, pc);
- gbranch(OGOTO);
- patch(p2, pc);
- goto com;
-
- case OOROR:
- if(!true)
- goto caseand;
-
- caseor:
- bcgen(l, !true);
- p1 = p;
- bcgen(r, !true);
- p2 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- patch(p2, pc);
- goto com;
-
- case OEQ:
- case ONE:
- case OLE:
- case OLT:
- case OGE:
- case OGT:
- case OHI:
- case OHS:
- case OLO:
- case OLS:
- o = n->op;
- if(typev[l->type->etype]) {
- if(!true)
- n->op = comrel[relindex(o)];
- cgen64(n, Z);
- goto com;
- }
- if(true && typefd[l->type->etype] && (o == OEQ || o == ONE)) {
- // Cannot rewrite !(l == r) into l != r with float64; it breaks NaNs.
- // Jump around instead.
- boolgen(n, 0, Z);
- p1 = p;
- gbranch(OGOTO);
- patch(p1, pc);
- goto com;
- }
- if(true)
- o = comrel[relindex(o)];
- if(l->complex >= FNX && r->complex >= FNX) {
- regret(&nod, r, 0, 0);
- cgen(r, &nod);
- regsalloc(&nod1, r);
- gmove(&nod, &nod1);
- regfree(&nod);
- nod = *n;
- nod.right = &nod1;
- boolgen(&nod, true, nn);
- break;
- }
- if(typefd[l->type->etype]) {
- if(l->complex >= r->complex) {
- cgen(l, &fregnode0);
- if(r->addable < INDEXED) {
- cgen(r, &fregnode0);
- o = invrel[relindex(o)];
- fgopcode(o, &fregnode0, &fregnode1, 1, 1);
- } else
- fgopcode(o, r, &fregnode0, 0, 1);
- } else {
- o = invrel[relindex(o)];
- cgen(r, &fregnode0);
- if(l->addable < INDEXED) {
- cgen(l, &fregnode0);
- o = invrel[relindex(o)];
- fgopcode(o, &fregnode0, &fregnode1, 1, 1);
- } else
- fgopcode(o, l, &fregnode0, 0, 1);
- }
- switch(o) {
- case OEQ:
- // Already emitted AJEQ; want AJEQ and AJPC.
- p1 = p;
- gbranch(OGOTO);
- p2 = p;
- patch(p1, pc);
- gins(AJPC, Z, Z);
- patch(p2, pc);
- break;
-
- case ONE:
- // Already emitted AJNE; want AJNE or AJPS.
- p1 = p;
- gins(AJPS, Z, Z);
- p2 = p;
- gbranch(OGOTO);
- p3 = p;
- patch(p1, pc);
- patch(p2, pc);
- gbranch(OGOTO);
- patch(p3, pc);
- break;
- }
- goto com;
- }
- if(l->op == OCONST) {
- o = invrel[relindex(o)];
- /* bad, 13 is address of external that becomes constant */
- if(r->addable < INDEXED || r->addable == 13) {
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- gopcode(o, l->type, &nod, l);
- regfree(&nod);
- } else
- gopcode(o, l->type, r, l);
- goto com;
- }
- if(l->complex >= r->complex) {
- regalloc(&nod, l, nn);
- cgen(l, &nod);
- if(r->addable < INDEXED) {
- regalloc(&nod1, r, Z);
- cgen(r, &nod1);
- gopcode(o, l->type, &nod, &nod1);
- regfree(&nod1);
- } else
- gopcode(o, l->type, &nod, r);
- regfree(&nod);
- goto com;
- }
- regalloc(&nod, r, nn);
- cgen(r, &nod);
- if(l->addable < INDEXED || l->addable == 13) {
- regalloc(&nod1, l, Z);
- cgen(l, &nod1);
- if(typechlp[l->type->etype])
- gopcode(o, types[TINT], &nod1, &nod);
- else
- gopcode(o, l->type, &nod1, &nod);
- regfree(&nod1);
- } else
- gopcode(o, l->type, l, &nod);
- regfree(&nod);
-
- com:
- if(nn != Z) {
- p1 = p;
- gmove(nodconst(1L), nn);
- gbranch(OGOTO);
- p2 = p;
- patch(p1, pc);
- gmove(nodconst(0L), nn);
- patch(p2, pc);
- }
- break;
- }
- cursafe = curs;
-}
-
-void
-sugen(Node *n, Node *nn, int32 w)
-{
- Prog *p1;
- Node nod0, nod1, nod2, nod3, nod4, *h, *l, *r;
- Type *t;
- int c, v, x;
-
- if(n == Z || n->type == T)
- return;
- if(debug['g']) {
- prtree(nn, "sugen lhs");
- prtree(n, "sugen");
- }
- if(nn == nodrat)
- if(w > nrathole)
- nrathole = w;
- switch(n->op) {
- case OIND:
- if(nn == Z) {
- nullwarn(n->left, Z);
- break;
- }
-
- default:
- goto copy;
-
- case OCONST:
- if(n->type && typev[n->type->etype]) {
- if(nn == Z) {
- nullwarn(n->left, Z);
- break;
- }
-
- if(nn->op == OREGPAIR) {
- loadpair(n, nn);
- break;
- }
- else if(!vaddr(nn, 0)) {
- t = nn->type;
- nn->type = types[TLONG];
- reglcgen(&nod1, nn, Z);
- nn->type = t;
-
- gmove(lo64(n), &nod1);
- nod1.xoffset += SZ_LONG;
- gmove(hi64(n), &nod1);
- regfree(&nod1);
- }
- else {
- gins(AMOVL, lo64(n), nn);
- nn->xoffset += SZ_LONG;
- gins(AMOVL, hi64(n), nn);
- nn->xoffset -= SZ_LONG;
- break;
- }
- break;
- }
- goto copy;
-
- case ODOT:
- l = n->left;
- sugen(l, nodrat, l->type->width);
- if(nn == Z)
- break;
- warn(n, "non-interruptable temporary");
- nod1 = *nodrat;
- r = n->right;
- if(!r || r->op != OCONST) {
- diag(n, "DOT and no offset");
- break;
- }
- nod1.xoffset += (int32)r->vconst;
- nod1.type = n->type;
- sugen(&nod1, nn, w);
- break;
-
- case OSTRUCT:
- /*
- * rewrite so lhs has no fn call
- */
- if(nn != Z && side(nn)) {
- nod1 = *n;
- nod1.type = typ(TIND, n->type);
- regret(&nod2, &nod1, 0, 0);
- lcgen(nn, &nod2);
- regsalloc(&nod0, &nod1);
- cgen(&nod2, &nod0);
- regfree(&nod2);
-
- nod1 = *n;
- nod1.op = OIND;
- nod1.left = &nod0;
- nod1.right = Z;
- nod1.complex = 1;
-
- sugen(n, &nod1, w);
- return;
- }
-
- r = n->left;
- for(t = n->type->link; t != T; t = t->down) {
- l = r;
- if(r->op == OLIST) {
- l = r->left;
- r = r->right;
- }
- if(nn == Z) {
- cgen(l, nn);
- continue;
- }
- /*
- * hand craft *(&nn + o) = l
- */
- nod0 = znode;
- nod0.op = OAS;
- nod0.type = t;
- nod0.left = &nod1;
- nod0.right = nil;
-
- nod1 = znode;
- nod1.op = OIND;
- nod1.type = t;
- nod1.left = &nod2;
-
- nod2 = znode;
- nod2.op = OADD;
- nod2.type = typ(TIND, t);
- nod2.left = &nod3;
- nod2.right = &nod4;
-
- nod3 = znode;
- nod3.op = OADDR;
- nod3.type = nod2.type;
- nod3.left = nn;
-
- nod4 = znode;
- nod4.op = OCONST;
- nod4.type = nod2.type;
- nod4.vconst = t->offset;
-
- ccom(&nod0);
- acom(&nod0);
- xcom(&nod0);
- nod0.addable = 0;
- nod0.right = l;
-
- // prtree(&nod0, "hand craft");
- cgen(&nod0, Z);
- }
- break;
-
- case OAS:
- if(nn == Z) {
- if(n->addable < INDEXED)
- sugen(n->right, n->left, w);
- break;
- }
-
- sugen(n->right, nodrat, w);
- warn(n, "non-interruptable temporary");
- sugen(nodrat, n->left, w);
- sugen(nodrat, nn, w);
- break;
-
- case OFUNC:
- if(!hasdotdotdot(n->left->type)) {
- cgen(n, Z);
- if(nn != Z) {
- curarg -= n->type->width;
- regret(&nod1, n, n->left->type, 1);
- if(nn->complex >= FNX) {
- regsalloc(&nod2, n);
- cgen(&nod1, &nod2);
- nod1 = nod2;
- }
- cgen(&nod1, nn);
- }
- break;
- }
- if(nn == Z) {
- sugen(n, nodrat, w);
- break;
- }
- h = nn;
- if(nn->op == OREGPAIR) {
- regsalloc(&nod1, nn);
- nn = &nod1;
- }
- if(nn->op != OIND) {
- nn = new1(OADDR, nn, Z);
- nn->type = types[TIND];
- nn->addable = 0;
- } else
- nn = nn->left;
- n = new(OFUNC, n->left, new(OLIST, nn, n->right));
- n->type = types[TVOID];
- n->left->type = types[TVOID];
- cgen(n, Z);
- if(h->op == OREGPAIR)
- loadpair(nn->left, h);
- break;
-
- case OCOND:
- bcgen(n->left, 1);
- p1 = p;
- sugen(n->right->left, nn, w);
- gbranch(OGOTO);
- patch(p1, pc);
- p1 = p;
- sugen(n->right->right, nn, w);
- patch(p1, pc);
- break;
-
- case OCOMMA:
- cgen(n->left, Z);
- sugen(n->right, nn, w);
- break;
- }
- return;
-
-copy:
- if(nn == Z) {
- switch(n->op) {
- case OASADD:
- case OASSUB:
- case OASAND:
- case OASOR:
- case OASXOR:
-
- case OASMUL:
- case OASLMUL:
-
-
- case OASASHL:
- case OASASHR:
- case OASLSHR:
- break;
-
- case OPOSTINC:
- case OPOSTDEC:
- case OPREINC:
- case OPREDEC:
- break;
-
- default:
- return;
- }
- }
-
- v = w == 8;
- if(n->complex >= FNX && nn != nil && nn->complex >= FNX) {
- t = nn->type;
- nn->type = types[TLONG];
- regialloc(&nod1, nn, Z);
- lcgen(nn, &nod1);
- regsalloc(&nod2, nn);
- nn->type = t;
-
- gins(AMOVL, &nod1, &nod2);
- regfree(&nod1);
-
- nod2.type = typ(TIND, t);
-
- nod1 = nod2;
- nod1.op = OIND;
- nod1.left = &nod2;
- nod1.right = Z;
- nod1.complex = 1;
- nod1.type = t;
-
- sugen(n, &nod1, w);
- return;
- }
-
- x = 0;
- if(v) {
- if(nn != nil && nn->complex >= FNX) {
- t = nn->type;
- nn->type = types[TLONG];
- regialloc(&nod2, nn, Z);
- lcgen(nn, &nod2);
- nn->type = t;
-
- nod2.type = typ(TIND, t);
-
- nod1 = nod2;
- nod1.op = OIND;
- nod1.left = &nod2;
- nod1.right = Z;
- nod1.complex = 1;
- nod1.type = t;
-
- sugen(n, &nod1, w);
- regfree(&nod2);
- return;
- }
-
- c = cursafe;
- if(n->left != Z && n->left->complex >= FNX
- && n->right != Z && n->right->complex >= FNX) {
-// warn(n, "toughie");
- regsalloc(&nod1, n->right);
- cgen(n->right, &nod1);
- nod2 = *n;
- nod2.right = &nod1;
- cgen(&nod2, nn);
- cursafe = c;
- return;
- }
- if(cgen64(n, nn)) {
- cursafe = c;
- return;
- }
- if(n->op == OCOM) {
- n = n->left;
- x = 1;
- }
- }
-
- /* botch, need to save in .safe */
- c = 0;
- if(n->complex > nn->complex) {
- t = n->type;
- n->type = types[TLONG];
- if(v) {
- regalloc(&nod0, n, Z);
- if(!vaddr(n, 0)) {
- reglcgen(&nod1, n, Z);
- n->type = t;
- n = &nod1;
- }
- else
- n->type = t;
- }
- else {
- nodreg(&nod1, n, D_SI);
- if(reg[D_SI]) {
- gins(APUSHL, &nod1, Z);
- c |= 1;
- reg[D_SI]++;
- }
- lcgen(n, &nod1);
- n->type = t;
- }
-
- t = nn->type;
- nn->type = types[TLONG];
- if(v) {
- if(!vaddr(nn, 0)) {
- reglcgen(&nod2, nn, Z);
- nn->type = t;
- nn = &nod2;
- }
- else
- nn->type = t;
- }
- else {
- nodreg(&nod2, nn, D_DI);
- if(reg[D_DI]) {
- gins(APUSHL, &nod2, Z);
- c |= 2;
- reg[D_DI]++;
- }
- lcgen(nn, &nod2);
- nn->type = t;
- }
- } else {
- t = nn->type;
- nn->type = types[TLONG];
- if(v) {
- regalloc(&nod0, nn, Z);
- if(!vaddr(nn, 0)) {
- reglcgen(&nod2, nn, Z);
- nn->type = t;
- nn = &nod2;
- }
- else
- nn->type = t;
- }
- else {
- nodreg(&nod2, nn, D_DI);
- if(reg[D_DI]) {
- gins(APUSHL, &nod2, Z);
- c |= 2;
- reg[D_DI]++;
- }
- lcgen(nn, &nod2);
- nn->type = t;
- }
-
- t = n->type;
- n->type = types[TLONG];
- if(v) {
- if(!vaddr(n, 0)) {
- reglcgen(&nod1, n, Z);
- n->type = t;
- n = &nod1;
- }
- else
- n->type = t;
- }
- else {
- nodreg(&nod1, n, D_SI);
- if(reg[D_SI]) {
- gins(APUSHL, &nod1, Z);
- c |= 1;
- reg[D_SI]++;
- }
- lcgen(n, &nod1);
- n->type = t;
- }
- }
- if(v) {
- gins(AMOVL, n, &nod0);
- if(x)
- gins(ANOTL, Z, &nod0);
- gins(AMOVL, &nod0, nn);
- n->xoffset += SZ_LONG;
- nn->xoffset += SZ_LONG;
- gins(AMOVL, n, &nod0);
- if(x)
- gins(ANOTL, Z, &nod0);
- gins(AMOVL, &nod0, nn);
- n->xoffset -= SZ_LONG;
- nn->xoffset -= SZ_LONG;
- if(nn == &nod2)
- regfree(&nod2);
- if(n == &nod1)
- regfree(&nod1);
- regfree(&nod0);
- return;
- }
- nodreg(&nod3, n, D_CX);
- if(reg[D_CX]) {
- gins(APUSHL, &nod3, Z);
- c |= 4;
- reg[D_CX]++;
- }
- gins(AMOVL, nodconst(w/SZ_LONG), &nod3);
- gins(ACLD, Z, Z);
- gins(AREP, Z, Z);
- gins(AMOVSL, Z, Z);
- if(c & 4) {
- gins(APOPL, Z, &nod3);
- reg[D_CX]--;
- }
- if(c & 2) {
- gins(APOPL, Z, &nod2);
- reg[nod2.reg]--;
- }
- if(c & 1) {
- gins(APOPL, Z, &nod1);
- reg[nod1.reg]--;
- }
-}
diff --git a/src/cmd/8c/cgen64.c b/src/cmd/8c/cgen64.c
deleted file mode 100644
index 3424f762c..000000000
--- a/src/cmd/8c/cgen64.c
+++ /dev/null
@@ -1,2657 +0,0 @@
-// Inferno utils/8c/cgen64.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/cgen64.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-void
-zeroregm(Node *n)
-{
- gins(AMOVL, nodconst(0), n);
-}
-
-/* do we need to load the address of a vlong? */
-int
-vaddr(Node *n, int a)
-{
- switch(n->op) {
- case ONAME:
- if(a)
- return 1;
- return !(n->class == CEXTERN || n->class == CGLOBL || n->class == CSTATIC);
-
- case OCONST:
- case OREGISTER:
- case OINDREG:
- return 1;
- }
- return 0;
-}
-
-int32
-hi64v(Node *n)
-{
- if(align(0, types[TCHAR], Aarg1, nil)) /* isbigendian */
- return (int32)(n->vconst) & ~0L;
- else
- return (int32)((uvlong)n->vconst>>32) & ~0L;
-}
-
-int32
-lo64v(Node *n)
-{
- if(align(0, types[TCHAR], Aarg1, nil)) /* isbigendian */
- return (int32)((uvlong)n->vconst>>32) & ~0L;
- else
- return (int32)(n->vconst) & ~0L;
-}
-
-Node *
-hi64(Node *n)
-{
- return nodconst(hi64v(n));
-}
-
-Node *
-lo64(Node *n)
-{
- return nodconst(lo64v(n));
-}
-
-static Node *
-anonreg(void)
-{
- Node *n;
-
- n = new(OREGISTER, Z, Z);
- n->reg = D_NONE;
- n->type = types[TLONG];
- return n;
-}
-
-static Node *
-regpair(Node *n, Node *t)
-{
- Node *r;
-
- if(n != Z && n->op == OREGPAIR)
- return n;
- r = new(OREGPAIR, anonreg(), anonreg());
- if(n != Z)
- r->type = n->type;
- else
- r->type = t->type;
- return r;
-}
-
-static void
-evacaxdx(Node *r)
-{
- Node nod1, nod2;
-
- if(r->reg == D_AX || r->reg == D_DX) {
- reg[D_AX]++;
- reg[D_DX]++;
- /*
- * this is just an optim that should
- * check for spill
- */
- r->type = types[TULONG];
- regalloc(&nod1, r, Z);
- nodreg(&nod2, Z, r->reg);
- gins(AMOVL, &nod2, &nod1);
- regfree(r);
- r->reg = nod1.reg;
- reg[D_AX]--;
- reg[D_DX]--;
- }
-}
-
-/* lazy instantiation of register pair */
-static int
-instpair(Node *n, Node *l)
-{
- int r;
-
- r = 0;
- if(n->left->reg == D_NONE) {
- if(l != Z) {
- n->left->reg = l->reg;
- r = 1;
- }
- else
- regalloc(n->left, n->left, Z);
- }
- if(n->right->reg == D_NONE)
- regalloc(n->right, n->right, Z);
- return r;
-}
-
-static void
-zapreg(Node *n)
-{
- if(n->reg != D_NONE) {
- regfree(n);
- n->reg = D_NONE;
- }
-}
-
-static void
-freepair(Node *n)
-{
- regfree(n->left);
- regfree(n->right);
-}
-
-/* n is not OREGPAIR, nn is */
-void
-loadpair(Node *n, Node *nn)
-{
- Node nod;
-
- instpair(nn, Z);
- if(n->op == OCONST) {
- gins(AMOVL, lo64(n), nn->left);
- n->xoffset += SZ_LONG;
- gins(AMOVL, hi64(n), nn->right);
- n->xoffset -= SZ_LONG;
- return;
- }
- if(!vaddr(n, 0)) {
- /* steal the right register for the laddr */
- nod = regnode;
- nod.reg = nn->right->reg;
- lcgen(n, &nod);
- n = &nod;
- regind(n, n);
- n->xoffset = 0;
- }
- gins(AMOVL, n, nn->left);
- n->xoffset += SZ_LONG;
- gins(AMOVL, n, nn->right);
- n->xoffset -= SZ_LONG;
-}
-
-/* n is OREGPAIR, nn is not */
-static void
-storepair(Node *n, Node *nn, int f)
-{
- Node nod;
-
- if(!vaddr(nn, 0)) {
- reglcgen(&nod, nn, Z);
- nn = &nod;
- }
- gins(AMOVL, n->left, nn);
- nn->xoffset += SZ_LONG;
- gins(AMOVL, n->right, nn);
- nn->xoffset -= SZ_LONG;
- if(nn == &nod)
- regfree(&nod);
- if(f)
- freepair(n);
-}
-
-enum
-{
-/* 4 only, see WW */
- WNONE = 0,
- WCONST,
- WADDR,
- WHARD,
-};
-
-static int
-whatof(Node *n, int a)
-{
- if(n->op == OCONST)
- return WCONST;
- return !vaddr(n, a) ? WHARD : WADDR;
-}
-
-/* can upgrade an extern to addr for AND */
-static int
-reduxv(Node *n)
-{
- return lo64v(n) == 0 || hi64v(n) == 0;
-}
-
-int
-cond(int op)
-{
- switch(op) {
- case OANDAND:
- case OOROR:
- case ONOT:
- return 1;
-
- case OEQ:
- case ONE:
- case OLE:
- case OLT:
- case OGE:
- case OGT:
- case OHI:
- case OHS:
- case OLO:
- case OLS:
- return 1;
- }
- return 0;
-}
-
-/*
- * for a func operand call it and then return
- * the safe node
- */
-static Node *
-vfunc(Node *n, Node *nn)
-{
- Node *t;
-
- if(n->op != OFUNC)
- return n;
- t = new(0, Z, Z);
- if(nn == Z || nn == nodret)
- nn = n;
- regsalloc(t, nn);
- sugen(n, t, 8);
- return t;
-}
-
-/* try to steal a reg */
-static int
-getreg(Node **np, Node *t, int r)
-{
- Node *n, *p;
-
- n = *np;
- if(n->reg == r) {
- p = new(0, Z, Z);
- regalloc(p, n, Z);
- gins(AMOVL, n, p);
- *t = *n;
- *np = p;
- return 1;
- }
- return 0;
-}
-
-static Node *
-snarfreg(Node *n, Node *t, int r, Node *d, Node *c)
-{
- if(n == Z || n->op != OREGPAIR || (!getreg(&n->left, t, r) && !getreg(&n->right, t, r))) {
- if(nodreg(t, Z, r)) {
- regalloc(c, d, Z);
- gins(AMOVL, t, c);
- reg[r]++;
- return c;
- }
- reg[r]++;
- }
- return Z;
-}
-
-enum
-{
- Vstart = OEND,
-
- Vgo,
- Vamv,
- Vmv,
- Vzero,
- Vop,
- Vopx,
- Vins,
- Vins0,
- Vinsl,
- Vinsr,
- Vinsla,
- Vinsra,
- Vinsx,
- Vmul,
- Vshll,
- VT,
- VF,
- V_l_lo_f,
- V_l_hi_f,
- V_l_lo_t,
- V_l_hi_t,
- V_l_lo_u,
- V_l_hi_u,
- V_r_lo_f,
- V_r_hi_f,
- V_r_lo_t,
- V_r_hi_t,
- V_r_lo_u,
- V_r_hi_u,
- Vspazz,
- Vend,
-
- V_T0,
- V_T1,
- V_F0,
- V_F1,
-
- V_a0,
- V_a1,
- V_f0,
- V_f1,
-
- V_p0,
- V_p1,
- V_p2,
- V_p3,
- V_p4,
-
- V_s0,
- V_s1,
- V_s2,
- V_s3,
- V_s4,
-
- C00,
- C01,
- C31,
- C32,
-
- O_l_lo,
- O_l_hi,
- O_r_lo,
- O_r_hi,
- O_t_lo,
- O_t_hi,
- O_l,
- O_r,
- O_l_rp,
- O_r_rp,
- O_t_rp,
- O_r0,
- O_r1,
- O_Zop,
-
- O_a0,
- O_a1,
-
- V_C0,
- V_C1,
-
- V_S0,
- V_S1,
-
- VOPS = 5,
- VLEN = 5,
- VARGS = 2,
-
- S00 = 0,
- Sc0,
- Sc1,
- Sc2,
- Sac3,
- Sac4,
- S10,
-
- SAgen = 0,
- SAclo,
- SAc32,
- SAchi,
- SAdgen,
- SAdclo,
- SAdc32,
- SAdchi,
-
- B0c = 0,
- Bca,
- Bac,
-
- T0i = 0,
- Tii,
-
- Bop0 = 0,
- Bop1,
-};
-
-/*
- * _testv:
- * CMPL lo,$0
- * JNE true
- * CMPL hi,$0
- * JNE true
- * GOTO false
- * false:
- * GOTO code
- * true:
- * GOTO patchme
- * code:
- */
-
-static uchar testi[][VLEN] =
-{
- {Vop, ONE, O_l_lo, C00},
- {V_s0, Vop, ONE, O_l_hi, C00},
- {V_s1, Vgo, V_s2, Vgo, V_s3},
- {VF, V_p0, V_p1, VT, V_p2},
- {Vgo, V_p3},
- {VT, V_p0, V_p1, VF, V_p2},
- {Vend},
-};
-
-/* shift left general case */
-static uchar shll00[][VLEN] =
-{
- {Vop, OGE, O_r, C32},
- {V_s0, Vinsl, ASHLL, O_r, O_l_rp},
- {Vins, ASHLL, O_r, O_l_lo, Vgo},
- {V_p0, V_s0},
- {Vins, ASHLL, O_r, O_l_lo},
- {Vins, AMOVL, O_l_lo, O_l_hi},
- {Vzero, O_l_lo, V_p0, Vend},
-};
-
-/* shift left rp, const < 32 */
-static uchar shllc0[][VLEN] =
-{
- {Vinsl, ASHLL, O_r, O_l_rp},
- {Vshll, O_r, O_l_lo, Vend},
-};
-
-/* shift left rp, const == 32 */
-static uchar shllc1[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_l_hi},
- {Vzero, O_l_lo, Vend},
-};
-
-/* shift left rp, const > 32 */
-static uchar shllc2[][VLEN] =
-{
- {Vshll, O_r, O_l_lo},
- {Vins, AMOVL, O_l_lo, O_l_hi},
- {Vzero, O_l_lo, Vend},
-};
-
-/* shift left addr, const == 32 */
-static uchar shllac3[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_hi},
- {Vzero, O_t_lo, Vend},
-};
-
-/* shift left addr, const > 32 */
-static uchar shllac4[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_hi},
- {Vshll, O_r, O_t_hi},
- {Vzero, O_t_lo, Vend},
-};
-
-/* shift left of constant */
-static uchar shll10[][VLEN] =
-{
- {Vop, OGE, O_r, C32},
- {V_s0, Vins, AMOVL, O_l_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsl, ASHLL, O_r, O_t_rp},
- {Vins, ASHLL, O_r, O_t_lo, Vgo},
- {V_p0, V_s0},
- {Vins, AMOVL, O_l_lo, O_t_hi},
- {V_l_lo_t, Vins, ASHLL, O_r, O_t_hi},
- {Vzero, O_t_lo, V_p0, Vend},
-};
-
-static uchar (*shlltab[])[VLEN] =
-{
- shll00,
- shllc0,
- shllc1,
- shllc2,
- shllac3,
- shllac4,
- shll10,
-};
-
-/* shift right general case */
-static uchar shrl00[][VLEN] =
-{
- {Vop, OGE, O_r, C32},
- {V_s0, Vinsr, ASHRL, O_r, O_l_rp},
- {Vins, O_a0, O_r, O_l_hi, Vgo},
- {V_p0, V_s0},
- {Vins, O_a0, O_r, O_l_hi},
- {Vins, AMOVL, O_l_hi, O_l_lo},
- {V_T1, Vzero, O_l_hi},
- {V_F1, Vins, ASARL, C31, O_l_hi},
- {V_p0, Vend},
-};
-
-/* shift right rp, const < 32 */
-static uchar shrlc0[][VLEN] =
-{
- {Vinsr, ASHRL, O_r, O_l_rp},
- {Vins, O_a0, O_r, O_l_hi, Vend},
-};
-
-/* shift right rp, const == 32 */
-static uchar shrlc1[][VLEN] =
-{
- {Vins, AMOVL, O_l_hi, O_l_lo},
- {V_T1, Vzero, O_l_hi},
- {V_F1, Vins, ASARL, C31, O_l_hi},
- {Vend},
-};
-
-/* shift right rp, const > 32 */
-static uchar shrlc2[][VLEN] =
-{
- {Vins, O_a0, O_r, O_l_hi},
- {Vins, AMOVL, O_l_hi, O_l_lo},
- {V_T1, Vzero, O_l_hi},
- {V_F1, Vins, ASARL, C31, O_l_hi},
- {Vend},
-};
-
-/* shift right addr, const == 32 */
-static uchar shrlac3[][VLEN] =
-{
- {Vins, AMOVL, O_l_hi, O_t_lo},
- {V_T1, Vzero, O_t_hi},
- {V_F1, Vins, AMOVL, O_t_lo, O_t_hi},
- {V_F1, Vins, ASARL, C31, O_t_hi},
- {Vend},
-};
-
-/* shift right addr, const > 32 */
-static uchar shrlac4[][VLEN] =
-{
- {Vins, AMOVL, O_l_hi, O_t_lo},
- {Vins, O_a0, O_r, O_t_lo},
- {V_T1, Vzero, O_t_hi},
- {V_F1, Vins, AMOVL, O_t_lo, O_t_hi},
- {V_F1, Vins, ASARL, C31, O_t_hi},
- {Vend},
-};
-
-/* shift right of constant */
-static uchar shrl10[][VLEN] =
-{
- {Vop, OGE, O_r, C32},
- {V_s0, Vins, AMOVL, O_l_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsr, ASHRL, O_r, O_t_rp},
- {Vins, O_a0, O_r, O_t_hi, Vgo},
- {V_p0, V_s0},
- {Vins, AMOVL, O_l_hi, O_t_lo},
- {V_l_hi_t, Vins, O_a0, O_r, O_t_lo},
- {V_l_hi_u, V_S1},
- {V_T1, Vzero, O_t_hi, V_p0},
- {V_F1, Vins, AMOVL, O_t_lo, O_t_hi},
- {V_F1, Vins, ASARL, C31, O_t_hi},
- {Vend},
-};
-
-static uchar (*shrltab[])[VLEN] =
-{
- shrl00,
- shrlc0,
- shrlc1,
- shrlc2,
- shrlac3,
- shrlac4,
- shrl10,
-};
-
-/* shift asop left general case */
-static uchar asshllgen[][VLEN] =
-{
- {V_a0, V_a1},
- {Vop, OGE, O_r, C32},
- {V_s0, Vins, AMOVL, O_l_lo, O_r0},
- {Vins, AMOVL, O_l_hi, O_r1},
- {Vinsla, ASHLL, O_r, O_r0},
- {Vins, ASHLL, O_r, O_r0},
- {Vins, AMOVL, O_r1, O_l_hi},
- {Vins, AMOVL, O_r0, O_l_lo, Vgo},
- {V_p0, V_s0},
- {Vins, AMOVL, O_l_lo, O_r0},
- {Vzero, O_l_lo},
- {Vins, ASHLL, O_r, O_r0},
- {Vins, AMOVL, O_r0, O_l_hi, V_p0},
- {V_f0, V_f1, Vend},
-};
-
-/* shift asop left, const < 32 */
-static uchar asshllclo[][VLEN] =
-{
- {V_a0, V_a1},
- {Vins, AMOVL, O_l_lo, O_r0},
- {Vins, AMOVL, O_l_hi, O_r1},
- {Vinsla, ASHLL, O_r, O_r0},
- {Vshll, O_r, O_r0},
- {Vins, AMOVL, O_r1, O_l_hi},
- {Vins, AMOVL, O_r0, O_l_lo},
- {V_f0, V_f1, Vend},
-};
-
-/* shift asop left, const == 32 */
-static uchar asshllc32[][VLEN] =
-{
- {V_a0},
- {Vins, AMOVL, O_l_lo, O_r0},
- {Vzero, O_l_lo},
- {Vins, AMOVL, O_r0, O_l_hi},
- {V_f0, Vend},
-};
-
-/* shift asop left, const > 32 */
-static uchar asshllchi[][VLEN] =
-{
- {V_a0},
- {Vins, AMOVL, O_l_lo, O_r0},
- {Vzero, O_l_lo},
- {Vshll, O_r, O_r0},
- {Vins, AMOVL, O_r0, O_l_hi},
- {V_f0, Vend},
-};
-
-/* shift asop dest left general case */
-static uchar asdshllgen[][VLEN] =
-{
- {Vop, OGE, O_r, C32},
- {V_s0, Vins, AMOVL, O_l_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsl, ASHLL, O_r, O_t_rp},
- {Vins, ASHLL, O_r, O_t_lo},
- {Vins, AMOVL, O_t_hi, O_l_hi},
- {Vins, AMOVL, O_t_lo, O_l_lo, Vgo},
- {V_p0, V_s0},
- {Vins, AMOVL, O_l_lo, O_t_hi},
- {Vzero, O_l_lo},
- {Vins, ASHLL, O_r, O_t_hi},
- {Vzero, O_t_lo},
- {Vins, AMOVL, O_t_hi, O_l_hi, V_p0},
- {Vend},
-};
-
-/* shift asop dest left, const < 32 */
-static uchar asdshllclo[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsl, ASHLL, O_r, O_t_rp},
- {Vshll, O_r, O_t_lo},
- {Vins, AMOVL, O_t_hi, O_l_hi},
- {Vins, AMOVL, O_t_lo, O_l_lo},
- {Vend},
-};
-
-/* shift asop dest left, const == 32 */
-static uchar asdshllc32[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_hi},
- {Vzero, O_t_lo},
- {Vins, AMOVL, O_t_hi, O_l_hi},
- {Vins, AMOVL, O_t_lo, O_l_lo},
- {Vend},
-};
-
-/* shift asop dest, const > 32 */
-static uchar asdshllchi[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_hi},
- {Vzero, O_t_lo},
- {Vshll, O_r, O_t_hi},
- {Vins, AMOVL, O_t_lo, O_l_lo},
- {Vins, AMOVL, O_t_hi, O_l_hi},
- {Vend},
-};
-
-static uchar (*asshlltab[])[VLEN] =
-{
- asshllgen,
- asshllclo,
- asshllc32,
- asshllchi,
- asdshllgen,
- asdshllclo,
- asdshllc32,
- asdshllchi,
-};
-
-/* shift asop right general case */
-static uchar asshrlgen[][VLEN] =
-{
- {V_a0, V_a1},
- {Vop, OGE, O_r, C32},
- {V_s0, Vins, AMOVL, O_l_lo, O_r0},
- {Vins, AMOVL, O_l_hi, O_r1},
- {Vinsra, ASHRL, O_r, O_r0},
- {Vinsx, Bop0, O_r, O_r1},
- {Vins, AMOVL, O_r0, O_l_lo},
- {Vins, AMOVL, O_r1, O_l_hi, Vgo},
- {V_p0, V_s0},
- {Vins, AMOVL, O_l_hi, O_r0},
- {Vinsx, Bop0, O_r, O_r0},
- {V_T1, Vzero, O_l_hi},
- {Vins, AMOVL, O_r0, O_l_lo},
- {V_F1, Vins, ASARL, C31, O_r0},
- {V_F1, Vins, AMOVL, O_r0, O_l_hi},
- {V_p0, V_f0, V_f1, Vend},
-};
-
-/* shift asop right, const < 32 */
-static uchar asshrlclo[][VLEN] =
-{
- {V_a0, V_a1},
- {Vins, AMOVL, O_l_lo, O_r0},
- {Vins, AMOVL, O_l_hi, O_r1},
- {Vinsra, ASHRL, O_r, O_r0},
- {Vinsx, Bop0, O_r, O_r1},
- {Vins, AMOVL, O_r0, O_l_lo},
- {Vins, AMOVL, O_r1, O_l_hi},
- {V_f0, V_f1, Vend},
-};
-
-/* shift asop right, const == 32 */
-static uchar asshrlc32[][VLEN] =
-{
- {V_a0},
- {Vins, AMOVL, O_l_hi, O_r0},
- {V_T1, Vzero, O_l_hi},
- {Vins, AMOVL, O_r0, O_l_lo},
- {V_F1, Vins, ASARL, C31, O_r0},
- {V_F1, Vins, AMOVL, O_r0, O_l_hi},
- {V_f0, Vend},
-};
-
-/* shift asop right, const > 32 */
-static uchar asshrlchi[][VLEN] =
-{
- {V_a0},
- {Vins, AMOVL, O_l_hi, O_r0},
- {V_T1, Vzero, O_l_hi},
- {Vinsx, Bop0, O_r, O_r0},
- {Vins, AMOVL, O_r0, O_l_lo},
- {V_F1, Vins, ASARL, C31, O_r0},
- {V_F1, Vins, AMOVL, O_r0, O_l_hi},
- {V_f0, Vend},
-};
-
-/* shift asop dest right general case */
-static uchar asdshrlgen[][VLEN] =
-{
- {Vop, OGE, O_r, C32},
- {V_s0, Vins, AMOVL, O_l_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsr, ASHRL, O_r, O_t_rp},
- {Vinsx, Bop0, O_r, O_t_hi},
- {Vins, AMOVL, O_t_lo, O_l_lo},
- {Vins, AMOVL, O_t_hi, O_l_hi, Vgo},
- {V_p0, V_s0},
- {Vins, AMOVL, O_l_hi, O_t_lo},
- {V_T1, Vzero, O_t_hi},
- {Vinsx, Bop0, O_r, O_t_lo},
- {V_F1, Vins, AMOVL, O_t_lo, O_t_hi},
- {V_F1, Vins, ASARL, C31, O_t_hi},
- {Vins, AMOVL, O_t_hi, O_l_hi, V_p0},
- {Vend},
-};
-
-/* shift asop dest right, const < 32 */
-static uchar asdshrlclo[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsr, ASHRL, O_r, O_t_rp},
- {Vinsx, Bop0, O_r, O_t_hi},
- {Vins, AMOVL, O_t_lo, O_l_lo},
- {Vins, AMOVL, O_t_hi, O_l_hi},
- {Vend},
-};
-
-/* shift asop dest right, const == 32 */
-static uchar asdshrlc32[][VLEN] =
-{
- {Vins, AMOVL, O_l_hi, O_t_lo},
- {V_T1, Vzero, O_t_hi},
- {V_F1, Vins, AMOVL, O_t_lo, O_t_hi},
- {V_F1, Vins, ASARL, C31, O_t_hi},
- {Vins, AMOVL, O_t_lo, O_l_lo},
- {Vins, AMOVL, O_t_hi, O_l_hi},
- {Vend},
-};
-
-/* shift asop dest, const > 32 */
-static uchar asdshrlchi[][VLEN] =
-{
- {Vins, AMOVL, O_l_hi, O_t_lo},
- {V_T1, Vzero, O_t_hi},
- {Vinsx, Bop0, O_r, O_t_lo},
- {V_T1, Vins, AMOVL, O_t_hi, O_l_hi},
- {V_T1, Vins, AMOVL, O_t_lo, O_l_lo},
- {V_F1, Vins, AMOVL, O_t_lo, O_t_hi},
- {V_F1, Vins, ASARL, C31, O_t_hi},
- {V_F1, Vins, AMOVL, O_t_lo, O_l_lo},
- {V_F1, Vins, AMOVL, O_t_hi, O_l_hi},
- {Vend},
-};
-
-static uchar (*asshrltab[])[VLEN] =
-{
- asshrlgen,
- asshrlclo,
- asshrlc32,
- asshrlchi,
- asdshrlgen,
- asdshrlclo,
- asdshrlc32,
- asdshrlchi,
-};
-
-static uchar shrlargs[] = { ASHRL, 1 };
-static uchar sarlargs[] = { ASARL, 0 };
-
-/* ++ -- */
-static uchar incdec[][VLEN] =
-{
- {Vinsx, Bop0, C01, O_l_lo},
- {Vinsx, Bop1, C00, O_l_hi, Vend},
-};
-
-/* ++ -- *p */
-static uchar incdecpre[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsx, Bop0, C01, O_t_lo},
- {Vinsx, Bop1, C00, O_t_hi},
- {Vins, AMOVL, O_t_lo, O_l_lo},
- {Vins, AMOVL, O_t_hi, O_l_hi, Vend},
-};
-
-/* *p ++ -- */
-static uchar incdecpost[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsx, Bop0, C01, O_l_lo},
- {Vinsx, Bop1, C00, O_l_hi, Vend},
-};
-
-/* binop rp, rp */
-static uchar binop00[][VLEN] =
-{
- {Vinsx, Bop0, O_r_lo, O_l_lo},
- {Vinsx, Bop1, O_r_hi, O_l_hi, Vend},
- {Vend},
-};
-
-/* binop rp, addr */
-static uchar binoptmp[][VLEN] =
-{
- {V_a0, Vins, AMOVL, O_r_lo, O_r0},
- {Vinsx, Bop0, O_r0, O_l_lo},
- {Vins, AMOVL, O_r_hi, O_r0},
- {Vinsx, Bop1, O_r0, O_l_hi},
- {V_f0, Vend},
-};
-
-/* binop t = *a op *b */
-static uchar binop11[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_lo},
- {Vinsx, Bop0, O_r_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsx, Bop1, O_r_hi, O_t_hi, Vend},
-};
-
-/* binop t = rp +- c */
-static uchar add0c[][VLEN] =
-{
- {V_r_lo_t, Vinsx, Bop0, O_r_lo, O_l_lo},
- {V_r_lo_f, Vamv, Bop0, Bop1},
- {Vinsx, Bop1, O_r_hi, O_l_hi},
- {Vend},
-};
-
-/* binop t = rp & c */
-static uchar and0c[][VLEN] =
-{
- {V_r_lo_t, Vinsx, Bop0, O_r_lo, O_l_lo},
- {V_r_lo_f, Vins, AMOVL, C00, O_l_lo},
- {V_r_hi_t, Vinsx, Bop1, O_r_hi, O_l_hi},
- {V_r_hi_f, Vins, AMOVL, C00, O_l_hi},
- {Vend},
-};
-
-/* binop t = rp | c */
-static uchar or0c[][VLEN] =
-{
- {V_r_lo_t, Vinsx, Bop0, O_r_lo, O_l_lo},
- {V_r_hi_t, Vinsx, Bop1, O_r_hi, O_l_hi},
- {Vend},
-};
-
-/* binop t = c - rp */
-static uchar sub10[][VLEN] =
-{
- {V_a0, Vins, AMOVL, O_l_lo, O_r0},
- {Vinsx, Bop0, O_r_lo, O_r0},
- {Vins, AMOVL, O_l_hi, O_r_lo},
- {Vinsx, Bop1, O_r_hi, O_r_lo},
- {Vspazz, V_f0, Vend},
-};
-
-/* binop t = c + *b */
-static uchar addca[][VLEN] =
-{
- {Vins, AMOVL, O_r_lo, O_t_lo},
- {V_l_lo_t, Vinsx, Bop0, O_l_lo, O_t_lo},
- {V_l_lo_f, Vamv, Bop0, Bop1},
- {Vins, AMOVL, O_r_hi, O_t_hi},
- {Vinsx, Bop1, O_l_hi, O_t_hi},
- {Vend},
-};
-
-/* binop t = c & *b */
-static uchar andca[][VLEN] =
-{
- {V_l_lo_t, Vins, AMOVL, O_r_lo, O_t_lo},
- {V_l_lo_t, Vinsx, Bop0, O_l_lo, O_t_lo},
- {V_l_lo_f, Vzero, O_t_lo},
- {V_l_hi_t, Vins, AMOVL, O_r_hi, O_t_hi},
- {V_l_hi_t, Vinsx, Bop1, O_l_hi, O_t_hi},
- {V_l_hi_f, Vzero, O_t_hi},
- {Vend},
-};
-
-/* binop t = c | *b */
-static uchar orca[][VLEN] =
-{
- {Vins, AMOVL, O_r_lo, O_t_lo},
- {V_l_lo_t, Vinsx, Bop0, O_l_lo, O_t_lo},
- {Vins, AMOVL, O_r_hi, O_t_hi},
- {V_l_hi_t, Vinsx, Bop1, O_l_hi, O_t_hi},
- {Vend},
-};
-
-/* binop t = c - *b */
-static uchar subca[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsx, Bop0, O_r_lo, O_t_lo},
- {Vinsx, Bop1, O_r_hi, O_t_hi},
- {Vend},
-};
-
-/* binop t = *a +- c */
-static uchar addac[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_lo},
- {V_r_lo_t, Vinsx, Bop0, O_r_lo, O_t_lo},
- {V_r_lo_f, Vamv, Bop0, Bop1},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {Vinsx, Bop1, O_r_hi, O_t_hi},
- {Vend},
-};
-
-/* binop t = *a | c */
-static uchar orac[][VLEN] =
-{
- {Vins, AMOVL, O_l_lo, O_t_lo},
- {V_r_lo_t, Vinsx, Bop0, O_r_lo, O_t_lo},
- {Vins, AMOVL, O_l_hi, O_t_hi},
- {V_r_hi_t, Vinsx, Bop1, O_r_hi, O_t_hi},
- {Vend},
-};
-
-/* binop t = *a & c */
-static uchar andac[][VLEN] =
-{
- {V_r_lo_t, Vins, AMOVL, O_l_lo, O_t_lo},
- {V_r_lo_t, Vinsx, Bop0, O_r_lo, O_t_lo},
- {V_r_lo_f, Vzero, O_t_lo},
- {V_r_hi_t, Vins, AMOVL, O_l_hi, O_t_hi},
- {V_r_hi_t, Vinsx, Bop0, O_r_hi, O_t_hi},
- {V_r_hi_f, Vzero, O_t_hi},
- {Vend},
-};
-
-static uchar ADDargs[] = { AADDL, AADCL };
-static uchar ANDargs[] = { AANDL, AANDL };
-static uchar ORargs[] = { AORL, AORL };
-static uchar SUBargs[] = { ASUBL, ASBBL };
-static uchar XORargs[] = { AXORL, AXORL };
-
-static uchar (*ADDtab[])[VLEN] =
-{
- add0c, addca, addac,
-};
-
-static uchar (*ANDtab[])[VLEN] =
-{
- and0c, andca, andac,
-};
-
-static uchar (*ORtab[])[VLEN] =
-{
- or0c, orca, orac,
-};
-
-static uchar (*SUBtab[])[VLEN] =
-{
- add0c, subca, addac,
-};
-
-/* mul of const32 */
-static uchar mulc32[][VLEN] =
-{
- {V_a0, Vop, ONE, O_l_hi, C00},
- {V_s0, Vins, AMOVL, O_r_lo, O_r0},
- {Vins, AMULL, O_r0, O_Zop},
- {Vgo, V_p0, V_s0},
- {Vins, AMOVL, O_l_hi, O_r0},
- {Vmul, O_r_lo, O_r0},
- {Vins, AMOVL, O_r_lo, O_l_hi},
- {Vins, AMULL, O_l_hi, O_Zop},
- {Vins, AADDL, O_r0, O_l_hi},
- {V_f0, V_p0, Vend},
-};
-
-/* mul of const64 */
-static uchar mulc64[][VLEN] =
-{
- {V_a0, Vins, AMOVL, O_r_hi, O_r0},
- {Vop, OOR, O_l_hi, O_r0},
- {Vop, ONE, O_r0, C00},
- {V_s0, Vins, AMOVL, O_r_lo, O_r0},
- {Vins, AMULL, O_r0, O_Zop},
- {Vgo, V_p0, V_s0},
- {Vmul, O_r_lo, O_l_hi},
- {Vins, AMOVL, O_l_lo, O_r0},
- {Vmul, O_r_hi, O_r0},
- {Vins, AADDL, O_l_hi, O_r0},
- {Vins, AMOVL, O_r_lo, O_l_hi},
- {Vins, AMULL, O_l_hi, O_Zop},
- {Vins, AADDL, O_r0, O_l_hi},
- {V_f0, V_p0, Vend},
-};
-
-/* mul general */
-static uchar mull[][VLEN] =
-{
- {V_a0, Vins, AMOVL, O_r_hi, O_r0},
- {Vop, OOR, O_l_hi, O_r0},
- {Vop, ONE, O_r0, C00},
- {V_s0, Vins, AMOVL, O_r_lo, O_r0},
- {Vins, AMULL, O_r0, O_Zop},
- {Vgo, V_p0, V_s0},
- {Vins, AIMULL, O_r_lo, O_l_hi},
- {Vins, AMOVL, O_l_lo, O_r0},
- {Vins, AIMULL, O_r_hi, O_r0},
- {Vins, AADDL, O_l_hi, O_r0},
- {Vins, AMOVL, O_r_lo, O_l_hi},
- {Vins, AMULL, O_l_hi, O_Zop},
- {Vins, AADDL, O_r0, O_l_hi},
- {V_f0, V_p0, Vend},
-};
-
-/* cast rp l to rp t */
-static uchar castrp[][VLEN] =
-{
- {Vmv, O_l, O_t_lo},
- {VT, Vins, AMOVL, O_t_lo, O_t_hi},
- {VT, Vins, ASARL, C31, O_t_hi},
- {VF, Vzero, O_t_hi},
- {Vend},
-};
-
-/* cast rp l to addr t */
-static uchar castrpa[][VLEN] =
-{
- {VT, V_a0, Vmv, O_l, O_r0},
- {VT, Vins, AMOVL, O_r0, O_t_lo},
- {VT, Vins, ASARL, C31, O_r0},
- {VT, Vins, AMOVL, O_r0, O_t_hi},
- {VT, V_f0},
- {VF, Vmv, O_l, O_t_lo},
- {VF, Vzero, O_t_hi},
- {Vend},
-};
-
-static uchar netab0i[][VLEN] =
-{
- {Vop, ONE, O_l_lo, O_r_lo},
- {V_s0, Vop, ONE, O_l_hi, O_r_hi},
- {V_s1, Vgo, V_s2, Vgo, V_s3},
- {VF, V_p0, V_p1, VT, V_p2},
- {Vgo, V_p3},
- {VT, V_p0, V_p1, VF, V_p2},
- {Vend},
-};
-
-static uchar netabii[][VLEN] =
-{
- {V_a0, Vins, AMOVL, O_l_lo, O_r0},
- {Vop, ONE, O_r0, O_r_lo},
- {V_s0, Vins, AMOVL, O_l_hi, O_r0},
- {Vop, ONE, O_r0, O_r_hi},
- {V_s1, Vgo, V_s2, Vgo, V_s3},
- {VF, V_p0, V_p1, VT, V_p2},
- {Vgo, V_p3},
- {VT, V_p0, V_p1, VF, V_p2},
- {V_f0, Vend},
-};
-
-static uchar cmptab0i[][VLEN] =
-{
- {Vopx, Bop0, O_l_hi, O_r_hi},
- {V_s0, Vins0, AJNE},
- {V_s1, Vopx, Bop1, O_l_lo, O_r_lo},
- {V_s2, Vgo, V_s3, Vgo, V_s4},
- {VT, V_p1, V_p3},
- {VF, V_p0, V_p2},
- {Vgo, V_p4},
- {VT, V_p0, V_p2},
- {VF, V_p1, V_p3},
- {Vend},
-};
-
-static uchar cmptabii[][VLEN] =
-{
- {V_a0, Vins, AMOVL, O_l_hi, O_r0},
- {Vopx, Bop0, O_r0, O_r_hi},
- {V_s0, Vins0, AJNE},
- {V_s1, Vins, AMOVL, O_l_lo, O_r0},
- {Vopx, Bop1, O_r0, O_r_lo},
- {V_s2, Vgo, V_s3, Vgo, V_s4},
- {VT, V_p1, V_p3},
- {VF, V_p0, V_p2},
- {Vgo, V_p4},
- {VT, V_p0, V_p2},
- {VF, V_p1, V_p3},
- {V_f0, Vend},
-};
-
-static uchar (*NEtab[])[VLEN] =
-{
- netab0i, netabii,
-};
-
-static uchar (*cmptab[])[VLEN] =
-{
- cmptab0i, cmptabii,
-};
-
-static uchar GEargs[] = { OGT, OHS };
-static uchar GTargs[] = { OGT, OHI };
-static uchar HIargs[] = { OHI, OHI };
-static uchar HSargs[] = { OHI, OHS };
-
-/* Big Generator */
-static void
-biggen(Node *l, Node *r, Node *t, int true, uchar code[][VLEN], uchar *a)
-{
- int i, j, g, oc, op, lo, ro, to, xo, *xp;
- Type *lt;
- Prog *pr[VOPS];
- Node *ot, *tl, *tr, tmps[2];
- uchar *c, (*cp)[VLEN], args[VARGS];
-
- if(a != nil)
- memmove(args, a, VARGS);
-//print("biggen %d %d %d\n", args[0], args[1], args[2]);
-//if(l) prtree(l, "l");
-//if(r) prtree(r, "r");
-//if(t) prtree(t, "t");
- lo = ro = to = 0;
- cp = code;
-
- for (;;) {
- c = *cp++;
- g = 1;
- i = 0;
-//print("code %d %d %d %d %d\n", c[0], c[1], c[2], c[3], c[4]);
- for(;;) {
- switch(op = c[i]) {
- case Vgo:
- if(g)
- gbranch(OGOTO);
- i++;
- break;
-
- case Vamv:
- i += 3;
- if(i > VLEN) {
- diag(l, "bad Vop");
- return;
- }
- if(g)
- args[c[i - 1]] = args[c[i - 2]];
- break;
-
- case Vzero:
- i += 2;
- if(i > VLEN) {
- diag(l, "bad Vop");
- return;
- }
- j = i - 1;
- goto op;
-
- case Vspazz: // nasty hack to save a reg in SUB
-//print("spazz\n");
- if(g) {
-//print("hi %R lo %R t %R\n", r->right->reg, r->left->reg, tmps[0].reg);
- ot = r->right;
- r->right = r->left;
- tl = new(0, Z, Z);
- *tl = tmps[0];
- r->left = tl;
- tmps[0] = *ot;
-//print("hi %R lo %R t %R\n", r->right->reg, r->left->reg, tmps[0].reg);
- }
- i++;
- break;
-
- case Vmv:
- case Vmul:
- case Vshll:
- i += 3;
- if(i > VLEN) {
- diag(l, "bad Vop");
- return;
- }
- j = i - 2;
- goto op;
-
- case Vins0:
- i += 2;
- if(i > VLEN) {
- diag(l, "bad Vop");
- return;
- }
- gins(c[i - 1], Z, Z);
- break;
-
- case Vop:
- case Vopx:
- case Vins:
- case Vinsl:
- case Vinsr:
- case Vinsla:
- case Vinsra:
- case Vinsx:
- i += 4;
- if(i > VLEN) {
- diag(l, "bad Vop");
- return;
- }
- j = i - 2;
- goto op;
-
- op:
- if(!g)
- break;
- tl = Z;
- tr = Z;
- for(; j < i; j++) {
- switch(c[j]) {
- case C00:
- ot = nodconst(0);
- break;
- case C01:
- ot = nodconst(1);
- break;
- case C31:
- ot = nodconst(31);
- break;
- case C32:
- ot = nodconst(32);
- break;
-
- case O_l:
- case O_l_lo:
- ot = l; xp = &lo; xo = 0;
- goto op0;
- case O_l_hi:
- ot = l; xp = &lo; xo = SZ_LONG;
- goto op0;
- case O_r:
- case O_r_lo:
- ot = r; xp = &ro; xo = 0;
- goto op0;
- case O_r_hi:
- ot = r; xp = &ro; xo = SZ_LONG;
- goto op0;
- case O_t_lo:
- ot = t; xp = &to; xo = 0;
- goto op0;
- case O_t_hi:
- ot = t; xp = &to; xo = SZ_LONG;
- goto op0;
- case O_l_rp:
- ot = l;
- break;
- case O_r_rp:
- ot = r;
- break;
- case O_t_rp:
- ot = t;
- break;
- case O_r0:
- case O_r1:
- ot = &tmps[c[j] - O_r0];
- break;
- case O_Zop:
- ot = Z;
- break;
-
- op0:
- switch(ot->op) {
- case OCONST:
- if(xo)
- ot = hi64(ot);
- else
- ot = lo64(ot);
- break;
- case OREGPAIR:
- if(xo)
- ot = ot->right;
- else
- ot = ot->left;
- break;
- case OREGISTER:
- break;
- default:
- if(xo != *xp) {
- ot->xoffset += xo - *xp;
- *xp = xo;
- }
- }
- break;
-
- default:
- diag(l, "bad V_lop");
- return;
- }
- if(tl == nil)
- tl = ot;
- else
- tr = ot;
- }
- if(op == Vzero) {
- zeroregm(tl);
- break;
- }
- oc = c[i - 3];
- if(op == Vinsx || op == Vopx) {
-//print("%d -> %d\n", oc, args[oc]);
- oc = args[oc];
- }
- else {
- switch(oc) {
- case O_a0:
- case O_a1:
- oc = args[oc - O_a0];
- break;
- }
- }
- switch(op) {
- case Vmul:
- mulgen(tr->type, tl, tr);
- break;
- case Vmv:
- gmove(tl, tr);
- break;
- case Vshll:
- shiftit(tr->type, tl, tr);
- break;
- case Vop:
- case Vopx:
- gopcode(oc, types[TULONG], tl, tr);
- break;
- case Vins:
- case Vinsx:
- gins(oc, tl, tr);
- break;
- case Vinsl:
- gins(oc, tl, tr->right);
- p->from.index = tr->left->reg;
- break;
- case Vinsr:
- gins(oc, tl, tr->left);
- p->from.index = tr->right->reg;
- break;
- case Vinsla:
- gins(oc, tl, tr + 1);
- p->from.index = tr->reg;
- break;
- case Vinsra:
- gins(oc, tl, tr);
- p->from.index = (tr + 1)->reg;
- break;
- }
- break;
-
- case VT:
- g = true;
- i++;
- break;
- case VF:
- g = !true;
- i++;
- break;
-
- case V_T0: case V_T1:
- g = args[op - V_T0];
- i++;
- break;
-
- case V_F0: case V_F1:
- g = !args[op - V_F0];
- i++;
- break;
-
- case V_C0: case V_C1:
- if(g)
- args[op - V_C0] = 0;
- i++;
- break;
-
- case V_S0: case V_S1:
- if(g)
- args[op - V_S0] = 1;
- i++;
- break;
-
- case V_l_lo_f:
- g = lo64v(l) == 0;
- i++;
- break;
- case V_l_hi_f:
- g = hi64v(l) == 0;
- i++;
- break;
- case V_l_lo_t:
- g = lo64v(l) != 0;
- i++;
- break;
- case V_l_hi_t:
- g = hi64v(l) != 0;
- i++;
- break;
- case V_l_lo_u:
- g = lo64v(l) >= 0;
- i++;
- break;
- case V_l_hi_u:
- g = hi64v(l) >= 0;
- i++;
- break;
- case V_r_lo_f:
- g = lo64v(r) == 0;
- i++;
- break;
- case V_r_hi_f:
- g = hi64v(r) == 0;
- i++;
- break;
- case V_r_lo_t:
- g = lo64v(r) != 0;
- i++;
- break;
- case V_r_hi_t:
- g = hi64v(r) != 0;
- i++;
- break;
- case V_r_lo_u:
- g = lo64v(r) >= 0;
- i++;
- break;
- case V_r_hi_u:
- g = hi64v(r) >= 0;
- i++;
- break;
-
- case Vend:
- goto out;
-
- case V_a0: case V_a1:
- if(g) {
- lt = l->type;
- l->type = types[TULONG];
- regalloc(&tmps[op - V_a0], l, Z);
- l->type = lt;
- }
- i++;
- break;
-
- case V_f0: case V_f1:
- if(g)
- regfree(&tmps[op - V_f0]);
- i++;
- break;
-
- case V_p0: case V_p1: case V_p2: case V_p3: case V_p4:
- if(g)
- patch(pr[op - V_p0], pc);
- i++;
- break;
-
- case V_s0: case V_s1: case V_s2: case V_s3: case V_s4:
- if(g)
- pr[op - V_s0] = p;
- i++;
- break;
-
- default:
- diag(l, "bad biggen: %d", op);
- return;
- }
- if(i == VLEN || c[i] == 0)
- break;
- }
- }
-out:
- if(lo)
- l->xoffset -= lo;
- if(ro)
- r->xoffset -= ro;
- if(to)
- t->xoffset -= to;
-}
-
-int
-cgen64(Node *n, Node *nn)
-{
- Type *dt;
- uchar *args, (*cp)[VLEN], (**optab)[VLEN];
- int li, ri, lri, dr, si, m, op, sh, cmp, true;
- Node *c, *d, *l, *r, *t, *s, nod1, nod2, nod3, nod4, nod5;
-
- if(debug['g']) {
- prtree(nn, "cgen64 lhs");
- prtree(n, "cgen64");
- print("AX = %d\n", reg[D_AX]);
- }
- cmp = 0;
- sh = 0;
-
- switch(n->op) {
- case ONEG:
- d = regpair(nn, n);
- sugen(n->left, d, 8);
- gins(ANOTL, Z, d->right);
- gins(ANEGL, Z, d->left);
- gins(ASBBL, nodconst(-1), d->right);
- break;
-
- case OCOM:
- if(!vaddr(n->left, 0) || !vaddr(nn, 0))
- d = regpair(nn, n);
- else
- return 0;
- sugen(n->left, d, 8);
- gins(ANOTL, Z, d->left);
- gins(ANOTL, Z, d->right);
- break;
-
- case OADD:
- optab = ADDtab;
- args = ADDargs;
- goto twoop;
- case OAND:
- optab = ANDtab;
- args = ANDargs;
- goto twoop;
- case OOR:
- optab = ORtab;
- args = ORargs;
- goto twoop;
- case OSUB:
- optab = SUBtab;
- args = SUBargs;
- goto twoop;
- case OXOR:
- optab = ORtab;
- args = XORargs;
- goto twoop;
- case OASHL:
- sh = 1;
- args = nil;
- optab = shlltab;
- goto twoop;
- case OLSHR:
- sh = 1;
- args = shrlargs;
- optab = shrltab;
- goto twoop;
- case OASHR:
- sh = 1;
- args = sarlargs;
- optab = shrltab;
- goto twoop;
- case OEQ:
- cmp = 1;
- args = nil;
- optab = nil;
- goto twoop;
- case ONE:
- cmp = 1;
- args = nil;
- optab = nil;
- goto twoop;
- case OLE:
- cmp = 1;
- args = nil;
- optab = nil;
- goto twoop;
- case OLT:
- cmp = 1;
- args = nil;
- optab = nil;
- goto twoop;
- case OGE:
- cmp = 1;
- args = nil;
- optab = nil;
- goto twoop;
- case OGT:
- cmp = 1;
- args = nil;
- optab = nil;
- goto twoop;
- case OHI:
- cmp = 1;
- args = nil;
- optab = nil;
- goto twoop;
- case OHS:
- cmp = 1;
- args = nil;
- optab = nil;
- goto twoop;
- case OLO:
- cmp = 1;
- args = nil;
- optab = nil;
- goto twoop;
- case OLS:
- cmp = 1;
- args = nil;
- optab = nil;
- goto twoop;
-
-twoop:
- dr = nn != Z && nn->op == OREGPAIR;
- l = vfunc(n->left, nn);
- if(sh)
- r = n->right;
- else
- r = vfunc(n->right, nn);
-
- li = l->op == ONAME || l->op == OINDREG || l->op == OCONST;
- ri = r->op == ONAME || r->op == OINDREG || r->op == OCONST;
-
-#define IMM(l, r) ((l) | ((r) << 1))
-
- lri = IMM(li, ri);
-
- /* find out what is so easy about some operands */
- if(li)
- li = whatof(l, sh | cmp);
- if(ri)
- ri = whatof(r, cmp);
-
- if(sh)
- goto shift;
-
- if(cmp)
- goto cmp;
-
- /* evaluate hard subexps, stealing nn if possible. */
- switch(lri) {
- case IMM(0, 0):
- bin00:
- if(l->complex > r->complex) {
- if(dr)
- t = nn;
- else
- t = regpair(Z, n);
- sugen(l, t, 8);
- l = t;
- t = regpair(Z, n);
- sugen(r, t, 8);
- r = t;
- }
- else {
- t = regpair(Z, n);
- sugen(r, t, 8);
- r = t;
- if(dr)
- t = nn;
- else
- t = regpair(Z, n);
- sugen(l, t, 8);
- l = t;
- }
- break;
- case IMM(0, 1):
- if(dr)
- t = nn;
- else
- t = regpair(Z, n);
- sugen(l, t, 8);
- l = t;
- break;
- case IMM(1, 0):
- if(n->op == OSUB && l->op == OCONST && hi64v(l) == 0) {
- lri = IMM(0, 0);
- goto bin00;
- }
- if(dr)
- t = nn;
- else
- t = regpair(Z, n);
- sugen(r, t, 8);
- r = t;
- break;
- case IMM(1, 1):
- break;
- }
-
-#define WW(l, r) ((l) | ((r) << 2))
- d = Z;
- dt = nn->type;
- nn->type = types[TLONG];
-
- switch(lri) {
- case IMM(0, 0):
- biggen(l, r, Z, 0, binop00, args);
- break;
- case IMM(0, 1):
- switch(ri) {
- case WNONE:
- diag(r, "bad whatof\n");
- break;
- case WCONST:
- biggen(l, r, Z, 0, optab[B0c], args);
- break;
- case WHARD:
- reglcgen(&nod2, r, Z);
- r = &nod2;
- /* fall thru */
- case WADDR:
- biggen(l, r, Z, 0, binoptmp, args);
- if(ri == WHARD)
- regfree(r);
- break;
- }
- break;
- case IMM(1, 0):
- if(n->op == OSUB) {
- switch(li) {
- case WNONE:
- diag(l, "bad whatof\n");
- break;
- case WHARD:
- reglcgen(&nod2, l, Z);
- l = &nod2;
- /* fall thru */
- case WADDR:
- case WCONST:
- biggen(l, r, Z, 0, sub10, args);
- break;
- }
- if(li == WHARD)
- regfree(l);
- }
- else {
- switch(li) {
- case WNONE:
- diag(l, "bad whatof\n");
- break;
- case WCONST:
- biggen(r, l, Z, 0, optab[B0c], args);
- break;
- case WHARD:
- reglcgen(&nod2, l, Z);
- l = &nod2;
- /* fall thru */
- case WADDR:
- biggen(r, l, Z, 0, binoptmp, args);
- if(li == WHARD)
- regfree(l);
- break;
- }
- }
- break;
- case IMM(1, 1):
- switch(WW(li, ri)) {
- case WW(WCONST, WHARD):
- if(r->op == ONAME && n->op == OAND && reduxv(l))
- ri = WADDR;
- break;
- case WW(WHARD, WCONST):
- if(l->op == ONAME && n->op == OAND && reduxv(r))
- li = WADDR;
- break;
- }
- if(li == WHARD) {
- reglcgen(&nod3, l, Z);
- l = &nod3;
- }
- if(ri == WHARD) {
- reglcgen(&nod2, r, Z);
- r = &nod2;
- }
- d = regpair(nn, n);
- instpair(d, Z);
- switch(WW(li, ri)) {
- case WW(WCONST, WADDR):
- case WW(WCONST, WHARD):
- biggen(l, r, d, 0, optab[Bca], args);
- break;
-
- case WW(WADDR, WCONST):
- case WW(WHARD, WCONST):
- biggen(l, r, d, 0, optab[Bac], args);
- break;
-
- case WW(WADDR, WADDR):
- case WW(WADDR, WHARD):
- case WW(WHARD, WADDR):
- case WW(WHARD, WHARD):
- biggen(l, r, d, 0, binop11, args);
- break;
-
- default:
- diag(r, "bad whatof pair %d %d\n", li, ri);
- break;
- }
- if(li == WHARD)
- regfree(l);
- if(ri == WHARD)
- regfree(r);
- break;
- }
-
- nn->type = dt;
-
- if(d != Z)
- goto finished;
-
- switch(lri) {
- case IMM(0, 0):
- freepair(r);
- /* fall thru */;
- case IMM(0, 1):
- if(!dr)
- storepair(l, nn, 1);
- break;
- case IMM(1, 0):
- if(!dr)
- storepair(r, nn, 1);
- break;
- case IMM(1, 1):
- break;
- }
- return 1;
-
- shift:
- c = Z;
-
- /* evaluate hard subexps, stealing nn if possible. */
- /* must also secure CX. not as many optims as binop. */
- switch(lri) {
- case IMM(0, 0):
- imm00:
- if(l->complex + 1 > r->complex) {
- if(dr)
- t = nn;
- else
- t = regpair(Z, l);
- sugen(l, t, 8);
- l = t;
- t = &nod1;
- c = snarfreg(l, t, D_CX, r, &nod2);
- cgen(r, t);
- r = t;
- }
- else {
- t = &nod1;
- c = snarfreg(nn, t, D_CX, r, &nod2);
- cgen(r, t);
- r = t;
- if(dr)
- t = nn;
- else
- t = regpair(Z, l);
- sugen(l, t, 8);
- l = t;
- }
- break;
- case IMM(0, 1):
- imm01:
- if(ri != WCONST) {
- lri = IMM(0, 0);
- goto imm00;
- }
- if(dr)
- t = nn;
- else
- t = regpair(Z, n);
- sugen(l, t, 8);
- l = t;
- break;
- case IMM(1, 0):
- imm10:
- if(li != WCONST) {
- lri = IMM(0, 0);
- goto imm00;
- }
- t = &nod1;
- c = snarfreg(nn, t, D_CX, r, &nod2);
- cgen(r, t);
- r = t;
- break;
- case IMM(1, 1):
- if(ri != WCONST) {
- lri = IMM(1, 0);
- goto imm10;
- }
- if(li == WHARD) {
- lri = IMM(0, 1);
- goto imm01;
- }
- break;
- }
-
- d = Z;
-
- switch(lri) {
- case IMM(0, 0):
- biggen(l, r, Z, 0, optab[S00], args);
- break;
- case IMM(0, 1):
- switch(ri) {
- case WNONE:
- case WADDR:
- case WHARD:
- diag(r, "bad whatof\n");
- break;
- case WCONST:
- m = r->vconst & 63;
- s = nodconst(m);
- if(m < 32)
- cp = optab[Sc0];
- else if(m == 32)
- cp = optab[Sc1];
- else
- cp = optab[Sc2];
- biggen(l, s, Z, 0, cp, args);
- break;
- }
- break;
- case IMM(1, 0):
- /* left is const */
- d = regpair(nn, n);
- instpair(d, Z);
- biggen(l, r, d, 0, optab[S10], args);
- regfree(r);
- break;
- case IMM(1, 1):
- d = regpair(nn, n);
- instpair(d, Z);
- switch(WW(li, ri)) {
- case WW(WADDR, WCONST):
- m = r->vconst & 63;
- s = nodconst(m);
- if(m < 32) {
- loadpair(l, d);
- l = d;
- cp = optab[Sc0];
- }
- else if(m == 32)
- cp = optab[Sac3];
- else
- cp = optab[Sac4];
- biggen(l, s, d, 0, cp, args);
- break;
-
- default:
- diag(r, "bad whatof pair %d %d\n", li, ri);
- break;
- }
- break;
- }
-
- if(c != Z) {
- gins(AMOVL, c, r);
- regfree(c);
- }
-
- if(d != Z)
- goto finished;
-
- switch(lri) {
- case IMM(0, 0):
- regfree(r);
- /* fall thru */
- case IMM(0, 1):
- if(!dr)
- storepair(l, nn, 1);
- break;
- case IMM(1, 0):
- regfree(r);
- break;
- case IMM(1, 1):
- break;
- }
- return 1;
-
- cmp:
- op = n->op;
- /* evaluate hard subexps */
- switch(lri) {
- case IMM(0, 0):
- if(l->complex > r->complex) {
- t = regpair(Z, l);
- sugen(l, t, 8);
- l = t;
- t = regpair(Z, r);
- sugen(r, t, 8);
- r = t;
- }
- else {
- t = regpair(Z, r);
- sugen(r, t, 8);
- r = t;
- t = regpair(Z, l);
- sugen(l, t, 8);
- l = t;
- }
- break;
- case IMM(1, 0):
- t = r;
- r = l;
- l = t;
- ri = li;
- op = invrel[relindex(op)];
- /* fall thru */
- case IMM(0, 1):
- t = regpair(Z, l);
- sugen(l, t, 8);
- l = t;
- break;
- case IMM(1, 1):
- break;
- }
-
- true = 1;
- optab = cmptab;
- switch(op) {
- case OEQ:
- optab = NEtab;
- true = 0;
- break;
- case ONE:
- optab = NEtab;
- break;
- case OLE:
- args = GTargs;
- true = 0;
- break;
- case OGT:
- args = GTargs;
- break;
- case OLS:
- args = HIargs;
- true = 0;
- break;
- case OHI:
- args = HIargs;
- break;
- case OLT:
- args = GEargs;
- true = 0;
- break;
- case OGE:
- args = GEargs;
- break;
- case OLO:
- args = HSargs;
- true = 0;
- break;
- case OHS:
- args = HSargs;
- break;
- default:
- diag(n, "bad cmp\n");
- SET(optab);
- }
-
- switch(lri) {
- case IMM(0, 0):
- biggen(l, r, Z, true, optab[T0i], args);
- break;
- case IMM(0, 1):
- case IMM(1, 0):
- switch(ri) {
- case WNONE:
- diag(l, "bad whatof\n");
- break;
- case WCONST:
- biggen(l, r, Z, true, optab[T0i], args);
- break;
- case WHARD:
- reglcgen(&nod2, r, Z);
- r = &nod2;
- /* fall thru */
- case WADDR:
- biggen(l, r, Z, true, optab[T0i], args);
- if(ri == WHARD)
- regfree(r);
- break;
- }
- break;
- case IMM(1, 1):
- if(li == WHARD) {
- reglcgen(&nod3, l, Z);
- l = &nod3;
- }
- if(ri == WHARD) {
- reglcgen(&nod2, r, Z);
- r = &nod2;
- }
- biggen(l, r, Z, true, optab[Tii], args);
- if(li == WHARD)
- regfree(l);
- if(ri == WHARD)
- regfree(r);
- break;
- }
-
- switch(lri) {
- case IMM(0, 0):
- freepair(r);
- /* fall thru */;
- case IMM(0, 1):
- case IMM(1, 0):
- freepair(l);
- break;
- case IMM(1, 1):
- break;
- }
- return 1;
-
- case OASMUL:
- case OASLMUL:
- m = 0;
- goto mulop;
-
- case OMUL:
- case OLMUL:
- m = 1;
- goto mulop;
-
- mulop:
- dr = nn != Z && nn->op == OREGPAIR;
- l = vfunc(n->left, nn);
- r = vfunc(n->right, nn);
- if(r->op != OCONST) {
- if(l->complex > r->complex) {
- if(m) {
- t = l;
- l = r;
- r = t;
- }
- else if(!vaddr(l, 1)) {
- reglcgen(&nod5, l, Z);
- l = &nod5;
- evacaxdx(l);
- }
- }
- t = regpair(Z, n);
- sugen(r, t, 8);
- r = t;
- evacaxdx(r->left);
- evacaxdx(r->right);
- if(l->complex <= r->complex && !m && !vaddr(l, 1)) {
- reglcgen(&nod5, l, Z);
- l = &nod5;
- evacaxdx(l);
- }
- }
- if(dr)
- t = nn;
- else
- t = regpair(Z, n);
- c = Z;
- d = Z;
- if(!nodreg(&nod1, t->left, D_AX)) {
- if(t->left->reg != D_AX){
- t->left->reg = D_AX;
- reg[D_AX]++;
- }else if(reg[D_AX] == 0)
- fatal(Z, "vlong mul AX botch");
- }
- if(!nodreg(&nod2, t->right, D_DX)) {
- if(t->right->reg != D_DX){
- t->right->reg = D_DX;
- reg[D_DX]++;
- }else if(reg[D_DX] == 0)
- fatal(Z, "vlong mul DX botch");
- }
- if(m)
- sugen(l, t, 8);
- else
- loadpair(l, t);
- if(t->left->reg != D_AX) {
- c = &nod3;
- regsalloc(c, t->left);
- gmove(&nod1, c);
- gmove(t->left, &nod1);
- zapreg(t->left);
- }
- if(t->right->reg != D_DX) {
- d = &nod4;
- regsalloc(d, t->right);
- gmove(&nod2, d);
- gmove(t->right, &nod2);
- zapreg(t->right);
- }
- if(c != Z || d != Z) {
- s = regpair(Z, n);
- s->left = &nod1;
- s->right = &nod2;
- }
- else
- s = t;
- if(r->op == OCONST) {
- if(hi64v(r) == 0)
- biggen(s, r, Z, 0, mulc32, nil);
- else
- biggen(s, r, Z, 0, mulc64, nil);
- }
- else
- biggen(s, r, Z, 0, mull, nil);
- instpair(t, Z);
- if(c != Z) {
- gmove(&nod1, t->left);
- gmove(&nod3, &nod1);
- }
- if(d != Z) {
- gmove(&nod2, t->right);
- gmove(&nod4, &nod2);
- }
- if(r->op == OREGPAIR)
- freepair(r);
- if(!m)
- storepair(t, l, 0);
- if(l == &nod5)
- regfree(l);
- if(!dr) {
- if(nn != Z)
- storepair(t, nn, 1);
- else
- freepair(t);
- }
- return 1;
-
- case OASADD:
- args = ADDargs;
- goto vasop;
- case OASAND:
- args = ANDargs;
- goto vasop;
- case OASOR:
- args = ORargs;
- goto vasop;
- case OASSUB:
- args = SUBargs;
- goto vasop;
- case OASXOR:
- args = XORargs;
- goto vasop;
-
- vasop:
- l = n->left;
- r = n->right;
- dr = nn != Z && nn->op == OREGPAIR;
- m = 0;
- if(l->complex > r->complex) {
- if(!vaddr(l, 1)) {
- reglcgen(&nod1, l, Z);
- l = &nod1;
- }
- if(!vaddr(r, 1) || nn != Z || r->op == OCONST) {
- if(dr)
- t = nn;
- else
- t = regpair(Z, r);
- sugen(r, t, 8);
- r = t;
- m = 1;
- }
- }
- else {
- if(!vaddr(r, 1) || nn != Z || r->op == OCONST) {
- if(dr)
- t = nn;
- else
- t = regpair(Z, r);
- sugen(r, t, 8);
- r = t;
- m = 1;
- }
- if(!vaddr(l, 1)) {
- reglcgen(&nod1, l, Z);
- l = &nod1;
- }
- }
- if(nn != Z) {
- if(n->op == OASSUB)
- biggen(l, r, Z, 0, sub10, args);
- else
- biggen(r, l, Z, 0, binoptmp, args);
- storepair(r, l, 0);
- }
- else {
- if(m)
- biggen(l, r, Z, 0, binop00, args);
- else
- biggen(l, r, Z, 0, binoptmp, args);
- }
- if(l == &nod1)
- regfree(&nod1);
- if(m) {
- if(nn == Z)
- freepair(r);
- else if(!dr)
- storepair(r, nn, 1);
- }
- return 1;
-
- case OASASHL:
- args = nil;
- optab = asshlltab;
- goto assh;
- case OASLSHR:
- args = shrlargs;
- optab = asshrltab;
- goto assh;
- case OASASHR:
- args = sarlargs;
- optab = asshrltab;
- goto assh;
-
- assh:
- c = Z;
- l = n->left;
- r = n->right;
- if(r->op == OCONST) {
- m = r->vconst & 63;
- if(m < 32)
- m = SAclo;
- else if(m == 32)
- m = SAc32;
- else
- m = SAchi;
- }
- else
- m = SAgen;
- if(l->complex > r->complex) {
- if(!vaddr(l, 0)) {
- reglcgen(&nod1, l, Z);
- l = &nod1;
- }
- if(m == SAgen) {
- t = &nod2;
- if(l->reg == D_CX) {
- regalloc(t, r, Z);
- gmove(l, t);
- l->reg = t->reg;
- t->reg = D_CX;
- }
- else
- c = snarfreg(nn, t, D_CX, r, &nod3);
- cgen(r, t);
- r = t;
- }
- }
- else {
- if(m == SAgen) {
- t = &nod2;
- c = snarfreg(nn, t, D_CX, r, &nod3);
- cgen(r, t);
- r = t;
- }
- if(!vaddr(l, 0)) {
- reglcgen(&nod1, l, Z);
- l = &nod1;
- }
- }
-
- if(nn != Z) {
- m += SAdgen - SAgen;
- d = regpair(nn, n);
- instpair(d, Z);
- biggen(l, r, d, 0, optab[m], args);
- if(l == &nod1) {
- regfree(&nod1);
- l = Z;
- }
- if(r == &nod2 && c == Z) {
- regfree(&nod2);
- r = Z;
- }
- if(d != nn)
- storepair(d, nn, 1);
- }
- else
- biggen(l, r, Z, 0, optab[m], args);
-
- if(c != Z) {
- gins(AMOVL, c, r);
- regfree(c);
- }
- if(l == &nod1)
- regfree(&nod1);
- if(r == &nod2)
- regfree(&nod2);
- return 1;
-
- case OPOSTINC:
- args = ADDargs;
- cp = incdecpost;
- goto vinc;
- case OPOSTDEC:
- args = SUBargs;
- cp = incdecpost;
- goto vinc;
- case OPREINC:
- args = ADDargs;
- cp = incdecpre;
- goto vinc;
- case OPREDEC:
- args = SUBargs;
- cp = incdecpre;
- goto vinc;
-
- vinc:
- l = n->left;
- if(!vaddr(l, 1)) {
- reglcgen(&nod1, l, Z);
- l = &nod1;
- }
-
- if(nn != Z) {
- d = regpair(nn, n);
- instpair(d, Z);
- biggen(l, Z, d, 0, cp, args);
- if(l == &nod1) {
- regfree(&nod1);
- l = Z;
- }
- if(d != nn)
- storepair(d, nn, 1);
- }
- else
- biggen(l, Z, Z, 0, incdec, args);
-
- if(l == &nod1)
- regfree(&nod1);
- return 1;
-
- case OCAST:
- l = n->left;
- if(typev[l->type->etype]) {
- if(!vaddr(l, 1)) {
- if(l->complex + 1 > nn->complex) {
- d = regpair(Z, l);
- sugen(l, d, 8);
- if(!vaddr(nn, 1)) {
- reglcgen(&nod1, nn, Z);
- r = &nod1;
- }
- else
- r = nn;
- }
- else {
- if(!vaddr(nn, 1)) {
- reglcgen(&nod1, nn, Z);
- r = &nod1;
- }
- else
- r = nn;
- d = regpair(Z, l);
- sugen(l, d, 8);
- }
-// d->left->type = r->type;
- d->left->type = types[TLONG];
- gmove(d->left, r);
- freepair(d);
- }
- else {
- if(nn->op != OREGISTER && !vaddr(nn, 1)) {
- reglcgen(&nod1, nn, Z);
- r = &nod1;
- }
- else
- r = nn;
-// l->type = r->type;
- l->type = types[TLONG];
- gmove(l, r);
- }
- if(r != nn)
- regfree(r);
- }
- else {
- if(typeu[l->type->etype] || cond(l->op))
- si = TUNSIGNED;
- else
- si = TSIGNED;
- regalloc(&nod1, l, Z);
- cgen(l, &nod1);
- if(nn->op == OREGPAIR) {
- m = instpair(nn, &nod1);
- biggen(&nod1, Z, nn, si == TSIGNED, castrp, nil);
- }
- else {
- m = 0;
- if(!vaddr(nn, si != TSIGNED)) {
- dt = nn->type;
- nn->type = types[TLONG];
- reglcgen(&nod2, nn, Z);
- nn->type = dt;
- nn = &nod2;
- }
- dt = nn->type;
- nn->type = types[TLONG];
- biggen(&nod1, Z, nn, si == TSIGNED, castrpa, nil);
- nn->type = dt;
- if(nn == &nod2)
- regfree(&nod2);
- }
- if(!m)
- regfree(&nod1);
- }
- return 1;
-
- default:
- if(n->op == OREGPAIR) {
- storepair(n, nn, 1);
- return 1;
- }
- if(nn->op == OREGPAIR) {
- loadpair(n, nn);
- return 1;
- }
- return 0;
- }
-finished:
- if(d != nn)
- storepair(d, nn, 1);
- return 1;
-}
-
-void
-testv(Node *n, int true)
-{
- Type *t;
- Node *nn, nod;
-
- switch(n->op) {
- case OINDREG:
- case ONAME:
- biggen(n, Z, Z, true, testi, nil);
- break;
-
- default:
- n = vfunc(n, n);
- if(n->addable >= INDEXED) {
- t = n->type;
- n->type = types[TLONG];
- reglcgen(&nod, n, Z);
- n->type = t;
- n = &nod;
- biggen(n, Z, Z, true, testi, nil);
- if(n == &nod)
- regfree(n);
- }
- else {
- nn = regpair(Z, n);
- sugen(n, nn, 8);
- biggen(nn, Z, Z, true, testi, nil);
- freepair(nn);
- }
- }
-}
diff --git a/src/cmd/8c/div.c b/src/cmd/8c/div.c
deleted file mode 100644
index 14945052e..000000000
--- a/src/cmd/8c/div.c
+++ /dev/null
@@ -1,236 +0,0 @@
-// Inferno utils/8c/div.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/div.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-/*
- * Based on: Granlund, T.; Montgomery, P.L.
- * "Division by Invariant Integers using Multiplication".
- * SIGPLAN Notices, Vol. 29, June 1994, page 61.
- */
-
-#define TN(n) ((uvlong)1 << (n))
-#define T31 TN(31)
-#define T32 TN(32)
-
-int
-multiplier(uint32 d, int p, uvlong *mp)
-{
- int l;
- uvlong mlo, mhi, tlo, thi;
-
- l = topbit(d - 1) + 1;
- mlo = (((TN(l) - d) << 32) / d) + T32;
- if(l + p == 64)
- mhi = (((TN(l) + 1 - d) << 32) / d) + T32;
- else
- mhi = (TN(32 + l) + TN(32 + l - p)) / d;
- /*assert(mlo < mhi);*/
- while(l > 0) {
- tlo = mlo >> 1;
- thi = mhi >> 1;
- if(tlo == thi)
- break;
- mlo = tlo;
- mhi = thi;
- l--;
- }
- *mp = mhi;
- return l;
-}
-
-int
-sdiv(uint32 d, uint32 *mp, int *sp)
-{
- int s;
- uvlong m;
-
- s = multiplier(d, 32 - 1, &m);
- *mp = m;
- *sp = s;
- if(m >= T31)
- return 1;
- else
- return 0;
-}
-
-int
-udiv(uint32 d, uint32 *mp, int *sp, int *pp)
-{
- int p, s;
- uvlong m;
-
- s = multiplier(d, 32, &m);
- p = 0;
- if(m >= T32) {
- while((d & 1) == 0) {
- d >>= 1;
- p++;
- }
- s = multiplier(d, 32 - p, &m);
- }
- *mp = m;
- *pp = p;
- if(m >= T32) {
- /*assert(p == 0);*/
- *sp = s - 1;
- return 1;
- }
- else {
- *sp = s;
- return 0;
- }
-}
-
-void
-sdivgen(Node *l, Node *r, Node *ax, Node *dx)
-{
- int a, s;
- uint32 m;
- vlong c;
-
- c = r->vconst;
- if(c < 0)
- c = -c;
- a = sdiv(c, &m, &s);
-//print("a=%d i=%d s=%d m=%ux\n", a, (int32)r->vconst, s, m);
- gins(AMOVL, nodconst(m), ax);
- gins(AIMULL, l, Z);
- gins(AMOVL, l, ax);
- if(a)
- gins(AADDL, ax, dx);
- gins(ASHRL, nodconst(31), ax);
- gins(ASARL, nodconst(s), dx);
- gins(AADDL, ax, dx);
- if(r->vconst < 0)
- gins(ANEGL, Z, dx);
-}
-
-void
-udivgen(Node *l, Node *r, Node *ax, Node *dx)
-{
- int a, s, t;
- uint32 m;
- Node nod;
-
- a = udiv(r->vconst, &m, &s, &t);
-//print("a=%ud i=%d p=%d s=%d m=%ux\n", a, (int32)r->vconst, t, s, m);
- if(t != 0) {
- gins(AMOVL, l, ax);
- gins(ASHRL, nodconst(t), ax);
- gins(AMOVL, nodconst(m), dx);
- gins(AMULL, dx, Z);
- }
- else if(a) {
- if(l->op != OREGISTER) {
- regalloc(&nod, l, Z);
- gins(AMOVL, l, &nod);
- l = &nod;
- }
- gins(AMOVL, nodconst(m), ax);
- gins(AMULL, l, Z);
- gins(AADDL, l, dx);
- gins(ARCRL, nodconst(1), dx);
- if(l == &nod)
- regfree(l);
- }
- else {
- gins(AMOVL, nodconst(m), ax);
- gins(AMULL, l, Z);
- }
- if(s != 0)
- gins(ASHRL, nodconst(s), dx);
-}
-
-void
-sext(Node *d, Node *s, Node *l)
-{
- if(s->reg == D_AX && !nodreg(d, Z, D_DX)) {
- reg[D_DX]++;
- gins(ACDQ, Z, Z);
- }
- else {
- regalloc(d, l, Z);
- gins(AMOVL, s, d);
- gins(ASARL, nodconst(31), d);
- }
-}
-
-void
-sdiv2(int32 c, int v, Node *l, Node *n)
-{
- Node nod;
-
- if(v > 0) {
- if(v > 1) {
- sext(&nod, n, l);
- gins(AANDL, nodconst((1 << v) - 1), &nod);
- gins(AADDL, &nod, n);
- regfree(&nod);
- }
- else {
- gins(ACMPL, n, nodconst(0x80000000));
- gins(ASBBL, nodconst(-1), n);
- }
- gins(ASARL, nodconst(v), n);
- }
- if(c < 0)
- gins(ANEGL, Z, n);
-}
-
-void
-smod2(int32 c, int v, Node *l, Node *n)
-{
- Node nod;
-
- if(c == 1) {
- zeroregm(n);
- return;
- }
-
- sext(&nod, n, l);
- if(v == 0) {
- zeroregm(n);
- gins(AXORL, &nod, n);
- gins(ASUBL, &nod, n);
- }
- else if(v > 1) {
- gins(AANDL, nodconst((1 << v) - 1), &nod);
- gins(AADDL, &nod, n);
- gins(AANDL, nodconst((1 << v) - 1), n);
- gins(ASUBL, &nod, n);
- }
- else {
- gins(AANDL, nodconst(1), n);
- gins(AXORL, &nod, n);
- gins(ASUBL, &nod, n);
- }
- regfree(&nod);
-}
diff --git a/src/cmd/8c/doc.go b/src/cmd/8c/doc.go
deleted file mode 100644
index 0d07db14d..000000000
--- a/src/cmd/8c/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-
-8c is a version of the Plan 9 C compiler. The original is documented at
-
- http://plan9.bell-labs.com/magic/man2html/1/8c
-
-Its target architecture is the x86, referred to by these tools for historical reasons as 386.
-
-*/
-package main
diff --git a/src/cmd/8c/gc.h b/src/cmd/8c/gc.h
deleted file mode 100644
index aa3888d73..000000000
--- a/src/cmd/8c/gc.h
+++ /dev/null
@@ -1,364 +0,0 @@
-// Inferno utils/8c/gc.h
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/gc.h
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "../cc/cc.h"
-#include "../8l/8.out.h"
-
-/*
- * 8c/386
- * Intel 386
- */
-#define SZ_CHAR 1
-#define SZ_SHORT 2
-#define SZ_INT 4
-#define SZ_LONG 4
-#define SZ_IND 4
-#define SZ_FLOAT 4
-#define SZ_VLONG 8
-#define SZ_DOUBLE 8
-#define FNX 100
-
-typedef struct Case Case;
-typedef struct C1 C1;
-typedef struct Reg Reg;
-typedef struct Rgn Rgn;
-typedef struct Renv Renv;
-
-EXTERN struct
-{
- Node* regtree;
- Node* basetree;
- short scale;
- short reg;
- short ptr;
-} idx;
-
-#define A ((Addr*)0)
-
-#define INDEXED 9
-
-#define P ((Prog*)0)
-
-struct Case
-{
- Case* link;
- int32 val;
- int32 label;
- char def;
- char isv;
-};
-#define C ((Case*)0)
-
-struct C1
-{
- int32 val;
- int32 label;
-};
-
-struct Reg
-{
- int32 pc;
- int32 rpo; /* reverse post ordering */
-
- Bits set;
- Bits use1;
- Bits use2;
-
- Bits refbehind;
- Bits refahead;
- Bits calbehind;
- Bits calahead;
- Bits regdiff;
- Bits act;
-
- int32 regu;
- int32 loop; /* could be shorter */
-
- Reg* log5;
- int32 active;
-
- Reg* p1;
- Reg* p2;
- Reg* p2link;
- Reg* s1;
- Reg* s2;
- Reg* link;
- Prog* prog;
-};
-#define R ((Reg*)0)
-
-struct Renv
-{
- int safe;
- Node base;
- Node* saved;
- Node* scope;
-};
-
-#define NRGN 600
-struct Rgn
-{
- Reg* enter;
- short cost;
- short varno;
- short regno;
-};
-
-EXTERN int32 breakpc;
-EXTERN int32 nbreak;
-EXTERN Case* cases;
-EXTERN Node constnode;
-EXTERN Node fconstnode;
-EXTERN int32 continpc;
-EXTERN int32 curarg;
-EXTERN int32 cursafe;
-EXTERN Prog* lastp;
-EXTERN int32 maxargsafe;
-EXTERN int mnstring;
-EXTERN Node* nodrat;
-EXTERN Node* nodret;
-EXTERN Node* nodsafe;
-EXTERN int32 nrathole;
-EXTERN int32 nstring;
-EXTERN Prog* p;
-EXTERN int32 pc;
-EXTERN Node regnode;
-EXTERN Node fregnode0;
-EXTERN Node fregnode1;
-EXTERN char string[NSNAME];
-EXTERN Sym* symrathole;
-EXTERN Node znode;
-EXTERN Prog zprog;
-EXTERN int reg[D_NONE];
-EXTERN int32 exregoffset;
-EXTERN int32 exfregoffset;
-
-#define BLOAD(r) band(bnot(r->refbehind), r->refahead)
-#define BSTORE(r) band(bnot(r->calbehind), r->calahead)
-#define LOAD(r) (~r->refbehind.b[z] & r->refahead.b[z])
-#define STORE(r) (~r->calbehind.b[z] & r->calahead.b[z])
-
-#define bset(a,n) ((a).b[(n)/32]&(1L<<(n)%32))
-
-#define CLOAD 5
-#define CREF 5
-#define CINF 1000
-#define LOOP 3
-
-EXTERN Rgn region[NRGN];
-EXTERN Rgn* rgp;
-EXTERN int nregion;
-EXTERN int nvar;
-
-EXTERN Bits externs;
-EXTERN Bits params;
-EXTERN Bits consts;
-EXTERN Bits addrs;
-
-EXTERN int32 regbits;
-EXTERN int32 exregbits;
-
-EXTERN int change;
-EXTERN int suppress;
-
-EXTERN Reg* firstr;
-EXTERN Reg* lastr;
-EXTERN Reg zreg;
-EXTERN Reg* freer;
-EXTERN int32* idom;
-EXTERN Reg** rpo2r;
-EXTERN int32 maxnr;
-
-extern char* anames[];
-
-/*
- * sgen.c
- */
-void codgen(Node*, Node*);
-void gen(Node*);
-void noretval(int);
-void usedset(Node*, int);
-void xcom(Node*);
-void indx(Node*);
-int bcomplex(Node*, Node*);
-Prog* gtext(Sym*, int32);
-vlong argsize(int);
-
-/*
- * cgen.c
- */
-void zeroregm(Node*);
-void cgen(Node*, Node*);
-void reglcgen(Node*, Node*, Node*);
-void lcgen(Node*, Node*);
-void bcgen(Node*, int);
-void boolgen(Node*, int, Node*);
-void sugen(Node*, Node*, int32);
-int needreg(Node*, int);
-
-/*
- * cgen64.c
- */
-int vaddr(Node*, int);
-void loadpair(Node*, Node*);
-int cgen64(Node*, Node*);
-void testv(Node*, int);
-
-/*
- * txt.c
- */
-void ginit(void);
-void gclean(void);
-void nextpc(void);
-void gargs(Node*, Node*, Node*);
-void garg1(Node*, Node*, Node*, int, Node**);
-Node* nodconst(int32);
-Node* nodfconst(double);
-int nodreg(Node*, Node*, int);
-int isreg(Node*, int);
-void regret(Node*, Node*, Type*, int);
-void regalloc(Node*, Node*, Node*);
-void regfree(Node*);
-void regialloc(Node*, Node*, Node*);
-void regsalloc(Node*, Node*);
-void regaalloc1(Node*, Node*);
-void regaalloc(Node*, Node*);
-void regind(Node*, Node*);
-void gprep(Node*, Node*);
-void naddr(Node*, Addr*);
-void gmove(Node*, Node*);
-void gins(int a, Node*, Node*);
-void fgopcode(int, Node*, Node*, int, int);
-void gopcode(int, Type*, Node*, Node*);
-int samaddr(Node*, Node*);
-void gbranch(int);
-void patch(Prog*, int32);
-int sconst(Node*);
-void gpseudo(int, Sym*, Node*);
-void gprefetch(Node*);
-void gpcdata(int, int);
-
-/*
- * swt.c
- */
-int swcmp(const void*, const void*);
-void doswit(Node*);
-void swit1(C1*, int, int32, Node*);
-void swit2(C1*, int, int32, Node*);
-void newcase(void);
-void bitload(Node*, Node*, Node*, Node*, Node*);
-void bitstore(Node*, Node*, Node*, Node*, Node*);
-int32 outstring(char*, int32);
-void nullwarn(Node*, Node*);
-void sextern(Sym*, Node*, int32, int32);
-void gextern(Sym*, Node*, int32, int32);
-void outcode(void);
-
-/*
- * list
- */
-void listinit(void);
-
-/*
- * reg.c
- */
-Reg* rega(void);
-int rcmp(const void*, const void*);
-void regopt(Prog*);
-void addmove(Reg*, int, int, int);
-Bits mkvar(Reg*, Addr*);
-void prop(Reg*, Bits, Bits);
-void loopit(Reg*, int32);
-void synch(Reg*, Bits);
-uint32 allreg(uint32, Rgn*);
-void paint1(Reg*, int);
-uint32 paint2(Reg*, int);
-void paint3(Reg*, int, int32, int);
-void addreg(Addr*, int);
-
-/*
- * peep.c
- */
-void peep(void);
-void excise(Reg*);
-Reg* uniqp(Reg*);
-Reg* uniqs(Reg*);
-int regtyp(Addr*);
-int anyvar(Addr*);
-int subprop(Reg*);
-int copyprop(Reg*);
-int copy1(Addr*, Addr*, Reg*, int);
-int copyu(Prog*, Addr*, Addr*);
-
-int copyas(Addr*, Addr*);
-int copyau(Addr*, Addr*);
-int copysub(Addr*, Addr*, Addr*, int);
-int copysub1(Prog*, Addr*, Addr*, int);
-
-int32 RtoB(int);
-int32 FtoB(int);
-int BtoR(int32);
-int BtoF(int32);
-
-#define D_HI D_NONE
-#define D_LO D_NONE
-
-/*
- * bound
- */
-void comtarg(void);
-
-/*
- * com64
- */
-int cond(int);
-int com64(Node*);
-void com64init(void);
-void bool64(Node*);
-int32 lo64v(Node*);
-int32 hi64v(Node*);
-Node* lo64(Node*);
-Node* hi64(Node*);
-
-/*
- * div/mul
- */
-void sdivgen(Node*, Node*, Node*, Node*);
-void udivgen(Node*, Node*, Node*, Node*);
-void sdiv2(int32, int, Node*, Node*);
-void smod2(int32, int, Node*, Node*);
-void mulgen(Type*, Node*, Node*);
-void genmuladd(Node*, Node*, int, Node*);
-void shiftit(Type*, Node*, Node*);
-
-/* wrecklessly steal a field */
-
-#define rplink label
diff --git a/src/cmd/8c/list.c b/src/cmd/8c/list.c
deleted file mode 100644
index 1730eccd0..000000000
--- a/src/cmd/8c/list.c
+++ /dev/null
@@ -1,38 +0,0 @@
-// Inferno utils/8c/list.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/list.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#define EXTERN
-#include "gc.h"
-
-void
-listinit(void)
-{
- listinit8();
-}
diff --git a/src/cmd/8c/machcap.c b/src/cmd/8c/machcap.c
deleted file mode 100644
index 61e5aad16..000000000
--- a/src/cmd/8c/machcap.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// Inferno utils/8c/machcap.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/machcap.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-int
-machcap(Node *n)
-{
-
- if(n == Z)
- return 1; /* test */
-
- switch(n->op) {
- case OMUL:
- case OLMUL:
- case OASMUL:
- case OASLMUL:
- if(typechl[n->type->etype])
- return 1;
- if(typev[n->type->etype]) {
- return 1;
- }
- break;
-
- case OCOM:
- case ONEG:
- case OADD:
- case OAND:
- case OOR:
- case OSUB:
- case OXOR:
- case OASHL:
- case OLSHR:
- case OASHR:
- if(typechlv[n->left->type->etype])
- return 1;
- break;
-
- case OCAST:
- if(typev[n->type->etype]) {
- if(typechlp[n->left->type->etype])
- return 1;
- }
- else if(!typefd[n->type->etype]) {
- if(typev[n->left->type->etype])
- return 1;
- }
- break;
-
- case OCOND:
- case OCOMMA:
- case OLIST:
- case OANDAND:
- case OOROR:
- case ONOT:
- return 1;
-
- case OASADD:
- case OASSUB:
- case OASAND:
- case OASOR:
- case OASXOR:
- return 1;
-
- case OASASHL:
- case OASASHR:
- case OASLSHR:
- return 1;
-
- case OPOSTINC:
- case OPOSTDEC:
- case OPREINC:
- case OPREDEC:
- return 1;
-
- case OEQ:
- case ONE:
- case OLE:
- case OGT:
- case OLT:
- case OGE:
- case OHI:
- case OHS:
- case OLO:
- case OLS:
- return 1;
- }
- return 0;
-}
diff --git a/src/cmd/8c/mul.c b/src/cmd/8c/mul.c
deleted file mode 100644
index 9955e762f..000000000
--- a/src/cmd/8c/mul.c
+++ /dev/null
@@ -1,458 +0,0 @@
-// Inferno utils/8c/mul.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/mul.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-typedef struct Malg Malg;
-typedef struct Mparam Mparam;
-
-struct Malg
-{
- schar vals[10];
-};
-
-struct Mparam
-{
- uint32 value;
- schar alg;
- char neg;
- char shift;
- char arg;
- schar off;
-};
-
-static Mparam multab[32];
-static int mulptr;
-
-static Malg malgs[] =
-{
- {0, 100},
- {-1, 1, 100},
- {-9, -5, -3, 3, 5, 9, 100},
- {6, 10, 12, 18, 20, 24, 36, 40, 72, 100},
- {-8, -4, -2, 2, 4, 8, 100},
-};
-
-/*
- * return position of lowest 1
- */
-int
-lowbit(uint32 v)
-{
- int s, i;
- uint32 m;
-
- s = 0;
- m = 0xFFFFFFFFUL;
- for(i = 16; i > 0; i >>= 1) {
- m >>= i;
- if((v & m) == 0) {
- v >>= i;
- s += i;
- }
- }
- return s;
-}
-
-void
-genmuladd(Node *d, Node *s, int m, Node *a)
-{
- Node nod;
-
- nod.op = OINDEX;
- nod.left = a;
- nod.right = s;
- nod.scale = m;
- nod.type = types[TIND];
- nod.xoffset = 0;
- xcom(&nod);
- gopcode(OADDR, d->type, &nod, d);
-}
-
-void
-mulparam(uint32 m, Mparam *mp)
-{
- int c, i, j, n, o, q, s;
- int bc, bi, bn, bo, bq, bs, bt;
- schar *p;
- int32 u;
- uint32 t;
-
- bc = bq = 10;
- bi = bn = bo = bs = bt = 0;
- for(i = 0; i < nelem(malgs); i++) {
- for(p = malgs[i].vals, j = 0; (o = p[j]) < 100; j++)
- for(s = 0; s < 2; s++) {
- c = 10;
- q = 10;
- u = m - o;
- if(u == 0)
- continue;
- if(s) {
- o = -o;
- if(o > 0)
- continue;
- u = -u;
- }
- n = lowbit(u);
- t = (uint32)u >> n;
- switch(i) {
- case 0:
- if(t == 1) {
- c = s + 1;
- q = 0;
- break;
- }
- switch(t) {
- case 3:
- case 5:
- case 9:
- c = s + 1;
- if(n)
- c++;
- q = 0;
- break;
- }
- if(s)
- break;
- switch(t) {
- case 15:
- case 25:
- case 27:
- case 45:
- case 81:
- c = 2;
- if(n)
- c++;
- q = 1;
- break;
- }
- break;
- case 1:
- if(t == 1) {
- c = 3;
- q = 3;
- break;
- }
- switch(t) {
- case 3:
- case 5:
- case 9:
- c = 3;
- q = 2;
- break;
- }
- break;
- case 2:
- if(t == 1) {
- c = 3;
- q = 2;
- break;
- }
- break;
- case 3:
- if(s)
- break;
- if(t == 1) {
- c = 3;
- q = 1;
- break;
- }
- break;
- case 4:
- if(t == 1) {
- c = 3;
- q = 0;
- break;
- }
- break;
- }
- if(c < bc || (c == bc && q > bq)) {
- bc = c;
- bi = i;
- bn = n;
- bo = o;
- bq = q;
- bs = s;
- bt = t;
- }
- }
- }
- mp->value = m;
- if(bc <= 3) {
- mp->alg = bi;
- mp->shift = bn;
- mp->off = bo;
- mp->neg = bs;
- mp->arg = bt;
- }
- else
- mp->alg = -1;
-}
-
-int
-m0(int a)
-{
- switch(a) {
- case -2:
- case 2:
- return 2;
- case -3:
- case 3:
- return 2;
- case -4:
- case 4:
- return 4;
- case -5:
- case 5:
- return 4;
- case 6:
- return 2;
- case -8:
- case 8:
- return 8;
- case -9:
- case 9:
- return 8;
- case 10:
- return 4;
- case 12:
- return 2;
- case 15:
- return 2;
- case 18:
- return 8;
- case 20:
- return 4;
- case 24:
- return 2;
- case 25:
- return 4;
- case 27:
- return 2;
- case 36:
- return 8;
- case 40:
- return 4;
- case 45:
- return 4;
- case 72:
- return 8;
- case 81:
- return 8;
- }
- diag(Z, "bad m0");
- return 0;
-}
-
-int
-m1(int a)
-{
- switch(a) {
- case 15:
- return 4;
- case 25:
- return 4;
- case 27:
- return 8;
- case 45:
- return 8;
- case 81:
- return 8;
- }
- diag(Z, "bad m1");
- return 0;
-}
-
-int
-m2(int a)
-{
- switch(a) {
- case 6:
- return 2;
- case 10:
- return 2;
- case 12:
- return 4;
- case 18:
- return 2;
- case 20:
- return 4;
- case 24:
- return 8;
- case 36:
- return 4;
- case 40:
- return 8;
- case 72:
- return 8;
- }
- diag(Z, "bad m2");
- return 0;
-}
-
-void
-shiftit(Type *t, Node *s, Node *d)
-{
- int32 c;
-
- c = (int32)s->vconst & 31;
- switch(c) {
- case 0:
- break;
- case 1:
- gopcode(OADD, t, d, d);
- break;
- default:
- gopcode(OASHL, t, s, d);
- }
-}
-
-static int
-mulgen1(uint32 v, Node *n)
-{
- int i, o;
- Mparam *p;
- Node nod, nods;
-
- for(i = 0; i < nelem(multab); i++) {
- p = &multab[i];
- if(p->value == v)
- goto found;
- }
-
- p = &multab[mulptr];
- if(++mulptr == nelem(multab))
- mulptr = 0;
-
- mulparam(v, p);
-
-found:
-// print("v=%.x a=%d n=%d s=%d g=%d o=%d \n", p->value, p->alg, p->neg, p->shift, p->arg, p->off);
- if(p->alg < 0)
- return 0;
-
- nods = *nodconst(p->shift);
-
- o = OADD;
- if(p->alg > 0) {
- regalloc(&nod, n, Z);
- if(p->off < 0)
- o = OSUB;
- }
-
- switch(p->alg) {
- case 0:
- switch(p->arg) {
- case 1:
- shiftit(n->type, &nods, n);
- break;
- case 15:
- case 25:
- case 27:
- case 45:
- case 81:
- genmuladd(n, n, m1(p->arg), n);
- /* fall thru */
- case 3:
- case 5:
- case 9:
- genmuladd(n, n, m0(p->arg), n);
- shiftit(n->type, &nods, n);
- break;
- default:
- goto bad;
- }
- if(p->neg == 1)
- gins(ANEGL, Z, n);
- break;
- case 1:
- switch(p->arg) {
- case 1:
- gmove(n, &nod);
- shiftit(n->type, &nods, &nod);
- break;
- case 3:
- case 5:
- case 9:
- genmuladd(&nod, n, m0(p->arg), n);
- shiftit(n->type, &nods, &nod);
- break;
- default:
- goto bad;
- }
- if(p->neg)
- gopcode(o, n->type, &nod, n);
- else {
- gopcode(o, n->type, n, &nod);
- gmove(&nod, n);
- }
- break;
- case 2:
- genmuladd(&nod, n, m0(p->off), n);
- shiftit(n->type, &nods, n);
- goto comop;
- case 3:
- genmuladd(&nod, n, m0(p->off), n);
- shiftit(n->type, &nods, n);
- genmuladd(n, &nod, m2(p->off), n);
- break;
- case 4:
- genmuladd(&nod, n, m0(p->off), nodconst(0));
- shiftit(n->type, &nods, n);
- goto comop;
- default:
- diag(Z, "bad mul alg");
- break;
- comop:
- if(p->neg) {
- gopcode(o, n->type, n, &nod);
- gmove(&nod, n);
- }
- else
- gopcode(o, n->type, &nod, n);
- }
-
- if(p->alg > 0)
- regfree(&nod);
-
- return 1;
-
-bad:
- diag(Z, "mulgen botch");
- return 1;
-}
-
-void
-mulgen(Type *t, Node *r, Node *n)
-{
- if(!mulgen1(r->vconst, n))
- gopcode(OMUL, t, r, n);
-}
diff --git a/src/cmd/8c/peep.c b/src/cmd/8c/peep.c
deleted file mode 100644
index 4f58fc05c..000000000
--- a/src/cmd/8c/peep.c
+++ /dev/null
@@ -1,807 +0,0 @@
-// Inferno utils/8c/peep.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/peep.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-static int
-needc(Prog *p)
-{
- while(p != P) {
- switch(p->as) {
- case AADCL:
- case ASBBL:
- case ARCRL:
- return 1;
- case AADDL:
- case ASUBL:
- case AJMP:
- case ARET:
- case ACALL:
- return 0;
- default:
- if(p->to.type == D_BRANCH)
- return 0;
- }
- p = p->link;
- }
- return 0;
-}
-
-void
-peep(void)
-{
- Reg *r, *r1, *r2;
- Prog *p, *p1;
- int t;
-
- /*
- * complete R structure
- */
- t = 0;
- for(r=firstr; r!=R; r=r1) {
- r1 = r->link;
- if(r1 == R)
- break;
- p = r->prog->link;
- while(p != r1->prog)
- switch(p->as) {
- default:
- r2 = rega();
- r->link = r2;
- r2->link = r1;
-
- r2->prog = p;
- r2->p1 = r;
- r->s1 = r2;
- r2->s1 = r1;
- r1->p1 = r2;
-
- r = r2;
- t++;
-
- case ADATA:
- case AGLOBL:
- case ANAME:
- case ASIGNAME:
- p = p->link;
- }
- }
-
- pc = 0; /* speculating it won't kill */
-
-loop1:
-
- t = 0;
- for(r=firstr; r!=R; r=r->link) {
- p = r->prog;
- switch(p->as) {
- case AMOVL:
- if(regtyp(&p->to))
- if(regtyp(&p->from)) {
- if(copyprop(r)) {
- excise(r);
- t++;
- }
- if(subprop(r) && copyprop(r)) {
- excise(r);
- t++;
- }
- }
- break;
-
- case AMOVBLSX:
- case AMOVBLZX:
- case AMOVWLSX:
- case AMOVWLZX:
- if(regtyp(&p->to)) {
- r1 = uniqs(r);
- if(r1 != R) {
- p1 = r1->prog;
- if(p->as == p1->as && p->to.type == p1->from.type)
- p1->as = AMOVL;
- }
- }
- break;
- case AADDL:
- case AADDW:
- if(p->from.type != D_CONST || needc(p->link))
- break;
- if(p->from.offset == -1){
- if(p->as == AADDL)
- p->as = ADECL;
- else
- p->as = ADECW;
- p->from = zprog.from;
- }
- else if(p->from.offset == 1){
- if(p->as == AADDL)
- p->as = AINCL;
- else
- p->as = AINCW;
- p->from = zprog.from;
- }
- break;
- case ASUBL:
- case ASUBW:
- if(p->from.type != D_CONST || needc(p->link))
- break;
- if(p->from.offset == -1) {
- if(p->as == ASUBL)
- p->as = AINCL;
- else
- p->as = AINCW;
- p->from = zprog.from;
- }
- else if(p->from.offset == 1){
- if(p->as == ASUBL)
- p->as = ADECL;
- else
- p->as = ADECW;
- p->from = zprog.from;
- }
- break;
- }
- }
- if(t)
- goto loop1;
-}
-
-void
-excise(Reg *r)
-{
- Prog *p;
-
- p = r->prog;
- p->as = ANOP;
- p->from = zprog.from;
- p->to = zprog.to;
-}
-
-Reg*
-uniqp(Reg *r)
-{
- Reg *r1;
-
- r1 = r->p1;
- if(r1 == R) {
- r1 = r->p2;
- if(r1 == R || r1->p2link != R)
- return R;
- } else
- if(r->p2 != R)
- return R;
- return r1;
-}
-
-Reg*
-uniqs(Reg *r)
-{
- Reg *r1;
-
- r1 = r->s1;
- if(r1 == R) {
- r1 = r->s2;
- if(r1 == R)
- return R;
- } else
- if(r->s2 != R)
- return R;
- return r1;
-}
-
-int
-regtyp(Addr *a)
-{
- int t;
-
- t = a->type;
- if(t >= D_AX && t <= D_DI)
- return 1;
- return 0;
-}
-
-/*
- * the idea is to substitute
- * one register for another
- * from one MOV to another
- * MOV a, R0
- * ADD b, R0 / no use of R1
- * MOV R0, R1
- * would be converted to
- * MOV a, R1
- * ADD b, R1
- * MOV R1, R0
- * hopefully, then the former or latter MOV
- * will be eliminated by copy propagation.
- */
-int
-subprop(Reg *r0)
-{
- Prog *p;
- Addr *v1, *v2;
- Reg *r;
- int t;
-
- p = r0->prog;
- v1 = &p->from;
- if(!regtyp(v1))
- return 0;
- v2 = &p->to;
- if(!regtyp(v2))
- return 0;
- for(r=uniqp(r0); r!=R; r=uniqp(r)) {
- if(uniqs(r) == R)
- break;
- p = r->prog;
- switch(p->as) {
- case ACALL:
- return 0;
-
- case AIMULL:
- case AIMULW:
- if(p->to.type != D_NONE)
- break;
-
- case ADIVB:
- case ADIVL:
- case ADIVW:
- case AIDIVB:
- case AIDIVL:
- case AIDIVW:
- case AIMULB:
- case AMULB:
- case AMULL:
- case AMULW:
-
- case AROLB:
- case AROLL:
- case AROLW:
- case ARORB:
- case ARORL:
- case ARORW:
- case ASALB:
- case ASALL:
- case ASALW:
- case ASARB:
- case ASARL:
- case ASARW:
- case ASHLB:
- case ASHLL:
- case ASHLW:
- case ASHRB:
- case ASHRL:
- case ASHRW:
-
- case AREP:
- case AREPN:
-
- case ACWD:
- case ACDQ:
-
- case ASTOSB:
- case ASTOSL:
- case AMOVSB:
- case AMOVSL:
- case AFSTSW:
- return 0;
-
- case AMOVL:
- if(p->to.type == v1->type)
- goto gotit;
- break;
- }
- if(copyau(&p->from, v2) ||
- copyau(&p->to, v2))
- break;
- if(copysub(&p->from, v1, v2, 0) ||
- copysub(&p->to, v1, v2, 0))
- break;
- }
- return 0;
-
-gotit:
- copysub(&p->to, v1, v2, 1);
- if(debug['P']) {
- print("gotit: %D->%D\n%P", v1, v2, r->prog);
- if(p->from.type == v2->type)
- print(" excise");
- print("\n");
- }
- for(r=uniqs(r); r!=r0; r=uniqs(r)) {
- p = r->prog;
- copysub(&p->from, v1, v2, 1);
- copysub(&p->to, v1, v2, 1);
- if(debug['P'])
- print("%P\n", r->prog);
- }
- t = v1->type;
- v1->type = v2->type;
- v2->type = t;
- if(debug['P'])
- print("%P last\n", r->prog);
- return 1;
-}
-
-/*
- * The idea is to remove redundant copies.
- * v1->v2 F=0
- * (use v2 s/v2/v1/)*
- * set v1 F=1
- * use v2 return fail
- * -----------------
- * v1->v2 F=0
- * (use v2 s/v2/v1/)*
- * set v1 F=1
- * set v2 return success
- */
-int
-copyprop(Reg *r0)
-{
- Prog *p;
- Addr *v1, *v2;
- Reg *r;
-
- p = r0->prog;
- v1 = &p->from;
- v2 = &p->to;
- if(copyas(v1, v2))
- return 1;
- for(r=firstr; r!=R; r=r->link)
- r->active = 0;
- return copy1(v1, v2, r0->s1, 0);
-}
-
-int
-copy1(Addr *v1, Addr *v2, Reg *r, int f)
-{
- int t;
- Prog *p;
-
- if(r->active) {
- if(debug['P'])
- print("act set; return 1\n");
- return 1;
- }
- r->active = 1;
- if(debug['P'])
- print("copy %D->%D f=%d\n", v1, v2, f);
- for(; r != R; r = r->s1) {
- p = r->prog;
- if(debug['P'])
- print("%P", p);
- if(!f && uniqp(r) == R) {
- f = 1;
- if(debug['P'])
- print("; merge; f=%d", f);
- }
- t = copyu(p, v2, A);
- switch(t) {
- case 2: /* rar, can't split */
- if(debug['P'])
- print("; %D rar; return 0\n", v2);
- return 0;
-
- case 3: /* set */
- if(debug['P'])
- print("; %D set; return 1\n", v2);
- return 1;
-
- case 1: /* used, substitute */
- case 4: /* use and set */
- if(f) {
- if(!debug['P'])
- return 0;
- if(t == 4)
- print("; %D used+set and f=%d; return 0\n", v2, f);
- else
- print("; %D used and f=%d; return 0\n", v2, f);
- return 0;
- }
- if(copyu(p, v2, v1)) {
- if(debug['P'])
- print("; sub fail; return 0\n");
- return 0;
- }
- if(debug['P'])
- print("; sub %D/%D", v2, v1);
- if(t == 4) {
- if(debug['P'])
- print("; %D used+set; return 1\n", v2);
- return 1;
- }
- break;
- }
- if(!f) {
- t = copyu(p, v1, A);
- if(!f && (t == 2 || t == 3 || t == 4)) {
- f = 1;
- if(debug['P'])
- print("; %D set and !f; f=%d", v1, f);
- }
- }
- if(debug['P'])
- print("\n");
- if(r->s2)
- if(!copy1(v1, v2, r->s2, f))
- return 0;
- }
- return 1;
-}
-
-/*
- * return
- * 1 if v only used (and substitute),
- * 2 if read-alter-rewrite
- * 3 if set
- * 4 if set and used
- * 0 otherwise (not touched)
- */
-int
-copyu(Prog *p, Addr *v, Addr *s)
-{
-
- switch(p->as) {
-
- default:
- if(debug['P'])
- print("unknown op %A\n", p->as);
- return 2;
-
- case ANEGB:
- case ANEGW:
- case ANEGL:
- case ANOTB:
- case ANOTW:
- case ANOTL:
- if(copyas(&p->to, v))
- return 2;
- break;
-
- case ALEAL: /* lhs addr, rhs store */
- if(copyas(&p->from, v))
- return 2;
-
-
- case ANOP: /* rhs store */
- case AMOVL:
- case AMOVBLSX:
- case AMOVBLZX:
- case AMOVWLSX:
- case AMOVWLZX:
- if(copyas(&p->to, v)) {
- if(s != A)
- return copysub(&p->from, v, s, 1);
- if(copyau(&p->from, v))
- return 4;
- return 3;
- }
- goto caseread;
-
- case AROLB:
- case AROLL:
- case AROLW:
- case ARORB:
- case ARORL:
- case ARORW:
- case ASALB:
- case ASALL:
- case ASALW:
- case ASARB:
- case ASARL:
- case ASARW:
- case ASHLB:
- case ASHLL:
- case ASHLW:
- case ASHRB:
- case ASHRL:
- case ASHRW:
- if(copyas(&p->to, v))
- return 2;
- if(copyas(&p->from, v))
- if(p->from.type == D_CX)
- return 2;
- goto caseread;
-
- case AADDB: /* rhs rar */
- case AADDL:
- case AADDW:
- case AANDB:
- case AANDL:
- case AANDW:
- case ADECL:
- case ADECW:
- case AINCL:
- case AINCW:
- case ASUBB:
- case ASUBL:
- case ASUBW:
- case AORB:
- case AORL:
- case AORW:
- case AXORB:
- case AXORL:
- case AXORW:
- case AMOVB:
- case AMOVW:
-
- case AFMOVB:
- case AFMOVBP:
- case AFMOVD:
- case AFMOVDP:
- case AFMOVF:
- case AFMOVFP:
- case AFMOVL:
- case AFMOVLP:
- case AFMOVV:
- case AFMOVVP:
- case AFMOVW:
- case AFMOVWP:
- case AFMOVX:
- case AFMOVXP:
- case AFADDDP:
- case AFADDW:
- case AFADDL:
- case AFADDF:
- case AFADDD:
- case AFMULDP:
- case AFMULW:
- case AFMULL:
- case AFMULF:
- case AFMULD:
- case AFSUBDP:
- case AFSUBW:
- case AFSUBL:
- case AFSUBF:
- case AFSUBD:
- case AFSUBRDP:
- case AFSUBRW:
- case AFSUBRL:
- case AFSUBRF:
- case AFSUBRD:
- case AFDIVDP:
- case AFDIVW:
- case AFDIVL:
- case AFDIVF:
- case AFDIVD:
- case AFDIVRDP:
- case AFDIVRW:
- case AFDIVRL:
- case AFDIVRF:
- case AFDIVRD:
- if(copyas(&p->to, v))
- return 2;
- goto caseread;
-
- case ACMPL: /* read only */
- case ACMPW:
- case ACMPB:
-
- case APREFETCHT0:
- case APREFETCHT1:
- case APREFETCHT2:
- case APREFETCHNTA:
-
-
- case AFCOMB:
- case AFCOMBP:
- case AFCOMD:
- case AFCOMDP:
- case AFCOMDPP:
- case AFCOMF:
- case AFCOMFP:
- case AFCOML:
- case AFCOMLP:
- case AFCOMW:
- case AFCOMWP:
- case AFUCOM:
- case AFUCOMP:
- case AFUCOMPP:
- caseread:
- if(s != A) {
- if(copysub(&p->from, v, s, 1))
- return 1;
- return copysub(&p->to, v, s, 1);
- }
- if(copyau(&p->from, v))
- return 1;
- if(copyau(&p->to, v))
- return 1;
- break;
-
- case AJGE: /* no reference */
- case AJNE:
- case AJLE:
- case AJEQ:
- case AJHI:
- case AJLS:
- case AJMI:
- case AJPL:
- case AJGT:
- case AJLT:
- case AJCC:
- case AJCS:
-
- case AADJSP:
- case AFLDZ:
- case AWAIT:
- break;
-
- case AIMULL:
- case AIMULW:
- if(p->to.type != D_NONE) {
- if(copyas(&p->to, v))
- return 2;
- goto caseread;
- }
-
- case ADIVB:
- case ADIVL:
- case ADIVW:
- case AIDIVB:
- case AIDIVL:
- case AIDIVW:
- case AIMULB:
- case AMULB:
- case AMULL:
- case AMULW:
-
- case ACWD:
- case ACDQ:
- if(v->type == D_AX || v->type == D_DX)
- return 2;
- goto caseread;
-
- case AREP:
- case AREPN:
- if(v->type == D_CX)
- return 2;
- goto caseread;
-
- case AMOVSB:
- case AMOVSL:
- if(v->type == D_DI || v->type == D_SI)
- return 2;
- goto caseread;
-
- case ASTOSB:
- case ASTOSL:
- if(v->type == D_AX || v->type == D_DI)
- return 2;
- goto caseread;
-
- case AFSTSW:
- if(v->type == D_AX)
- return 2;
- goto caseread;
-
- case AJMP: /* funny */
- if(s != A) {
- if(copysub(&p->to, v, s, 1))
- return 1;
- return 0;
- }
- if(copyau(&p->to, v))
- return 1;
- return 0;
-
- case ARET: /* funny */
- if(v->type == REGRET)
- return 2;
- if(s != A)
- return 1;
- return 3;
-
- case ACALL: /* funny */
- if(REGARG >= 0 && v->type == (uchar)REGARG)
- return 2;
-
- if(s != A) {
- if(copysub(&p->to, v, s, 1))
- return 1;
- return 0;
- }
- if(copyau(&p->to, v))
- return 4;
- return 3;
- }
- return 0;
-}
-
-/*
- * direct reference,
- * could be set/use depending on
- * semantics
- */
-int
-copyas(Addr *a, Addr *v)
-{
- if(a->type != v->type)
- return 0;
- if(regtyp(v))
- return 1;
- if(v->type == D_AUTO || v->type == D_PARAM)
- if(v->offset == a->offset)
- return 1;
- return 0;
-}
-
-/*
- * either direct or indirect
- */
-int
-copyau(Addr *a, Addr *v)
-{
-
- if(copyas(a, v))
- return 1;
- if(regtyp(v)) {
- if(a->type-D_INDIR == v->type)
- return 1;
- if(a->index == v->type)
- return 1;
- }
- return 0;
-}
-
-/*
- * substitute s for v in a
- * return failure to substitute
- */
-int
-copysub(Addr *a, Addr *v, Addr *s, int f)
-{
- int t;
-
- if(copyas(a, v)) {
- t = s->type;
- if(t >= D_AX && t <= D_DI) {
- if(f)
- a->type = t;
- }
- return 0;
- }
- if(regtyp(v)) {
- t = v->type;
- if(a->type == t+D_INDIR) {
- if(s->type == D_BP && a->index != D_NONE)
- return 1; /* can't use BP-base with index */
- if(f)
- a->type = s->type+D_INDIR;
-// return 0;
- }
- if(a->index == t) {
- if(f)
- a->index = s->type;
- return 0;
- }
- return 0;
- }
- return 0;
-}
diff --git a/src/cmd/8c/reg.c b/src/cmd/8c/reg.c
deleted file mode 100644
index ea862f388..000000000
--- a/src/cmd/8c/reg.c
+++ /dev/null
@@ -1,1438 +0,0 @@
-// Inferno utils/8c/reg.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/reg.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-static void fixjmp(Reg*);
-
-Reg*
-rega(void)
-{
- Reg *r;
-
- r = freer;
- if(r == R) {
- r = alloc(sizeof(*r));
- } else
- freer = r->link;
-
- *r = zreg;
- return r;
-}
-
-int
-rcmp(const void *a1, const void *a2)
-{
- Rgn *p1, *p2;
- int c1, c2;
-
- p1 = (Rgn*)a1;
- p2 = (Rgn*)a2;
- c1 = p2->cost;
- c2 = p1->cost;
- if(c1 -= c2)
- return c1;
- return p2->varno - p1->varno;
-}
-
-void
-regopt(Prog *p)
-{
- Reg *r, *r1, *r2;
- Prog *p1;
- int i, z;
- int32 initpc, val, npc;
- uint32 vreg;
- Bits bit;
- struct
- {
- int32 m;
- int32 c;
- Reg* p;
- } log5[6], *lp;
-
- firstr = R;
- lastr = R;
- nvar = 0;
- regbits = RtoB(D_SP) | RtoB(D_AX);
- for(z=0; z<BITS; z++) {
- externs.b[z] = 0;
- params.b[z] = 0;
- consts.b[z] = 0;
- addrs.b[z] = 0;
- }
-
- /*
- * pass 1
- * build aux data structure
- * allocate pcs
- * find use and set of variables
- */
- val = 5L * 5L * 5L * 5L * 5L;
- lp = log5;
- for(i=0; i<5; i++) {
- lp->m = val;
- lp->c = 0;
- lp->p = R;
- val /= 5L;
- lp++;
- }
- val = 0;
- for(; p != P; p = p->link) {
- switch(p->as) {
- case ADATA:
- case AGLOBL:
- case ANAME:
- case ASIGNAME:
- case AFUNCDATA:
- continue;
- }
- r = rega();
- if(firstr == R) {
- firstr = r;
- lastr = r;
- } else {
- lastr->link = r;
- r->p1 = lastr;
- lastr->s1 = r;
- lastr = r;
- }
- r->prog = p;
- r->pc = val;
- val++;
-
- lp = log5;
- for(i=0; i<5; i++) {
- lp->c--;
- if(lp->c <= 0) {
- lp->c = lp->m;
- if(lp->p != R)
- lp->p->log5 = r;
- lp->p = r;
- (lp+1)->c = 0;
- break;
- }
- lp++;
- }
-
- r1 = r->p1;
- if(r1 != R)
- switch(r1->prog->as) {
- case ARET:
- case AJMP:
- case AIRETL:
- r->p1 = R;
- r1->s1 = R;
- }
- bit = mkvar(r, &p->from);
- if(bany(&bit))
- switch(p->as) {
- /*
- * funny
- */
- case ALEAL:
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- break;
-
- /*
- * left side read
- */
- default:
- for(z=0; z<BITS; z++)
- r->use1.b[z] |= bit.b[z];
- break;
- }
-
- bit = mkvar(r, &p->to);
- if(bany(&bit))
- switch(p->as) {
- default:
- diag(Z, "reg: unknown op: %A", p->as);
- break;
-
- /*
- * right side read
- */
- case ACMPB:
- case ACMPL:
- case ACMPW:
- case APREFETCHT0:
- case APREFETCHT1:
- case APREFETCHT2:
- case APREFETCHNTA:
- for(z=0; z<BITS; z++)
- r->use2.b[z] |= bit.b[z];
- break;
-
- /*
- * right side write
- */
- case ANOP:
- case AMOVL:
- case AMOVB:
- case AMOVW:
- case AMOVBLSX:
- case AMOVBLZX:
- case AMOVWLSX:
- case AMOVWLZX:
- for(z=0; z<BITS; z++)
- r->set.b[z] |= bit.b[z];
- break;
-
- /*
- * right side read+write
- */
- case AADDB:
- case AADDL:
- case AADDW:
- case AANDB:
- case AANDL:
- case AANDW:
- case ASUBB:
- case ASUBL:
- case ASUBW:
- case AORB:
- case AORL:
- case AORW:
- case AXORB:
- case AXORL:
- case AXORW:
- case ASALB:
- case ASALL:
- case ASALW:
- case ASARB:
- case ASARL:
- case ASARW:
- case AROLB:
- case AROLL:
- case AROLW:
- case ARORB:
- case ARORL:
- case ARORW:
- case ASHLB:
- case ASHLL:
- case ASHLW:
- case ASHRB:
- case ASHRL:
- case ASHRW:
- case AIMULL:
- case AIMULW:
- case ANEGL:
- case ANOTL:
- case AADCL:
- case ASBBL:
- for(z=0; z<BITS; z++) {
- r->set.b[z] |= bit.b[z];
- r->use2.b[z] |= bit.b[z];
- }
- break;
-
- /*
- * funny
- */
- case AFMOVDP:
- case AFMOVFP:
- case AFMOVLP:
- case AFMOVVP:
- case AFMOVWP:
- case ACALL:
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- break;
- }
-
- switch(p->as) {
- case AIMULL:
- case AIMULW:
- if(p->to.type != D_NONE)
- break;
-
- case AIDIVB:
- case AIDIVL:
- case AIDIVW:
- case AIMULB:
- case ADIVB:
- case ADIVL:
- case ADIVW:
- case AMULB:
- case AMULL:
- case AMULW:
-
- case ACWD:
- case ACDQ:
- r->regu |= RtoB(D_AX) | RtoB(D_DX);
- break;
-
- case AREP:
- case AREPN:
- case ALOOP:
- case ALOOPEQ:
- case ALOOPNE:
- r->regu |= RtoB(D_CX);
- break;
-
- case AMOVSB:
- case AMOVSL:
- case AMOVSW:
- case ACMPSB:
- case ACMPSL:
- case ACMPSW:
- r->regu |= RtoB(D_SI) | RtoB(D_DI);
- break;
-
- case ASTOSB:
- case ASTOSL:
- case ASTOSW:
- case ASCASB:
- case ASCASL:
- case ASCASW:
- r->regu |= RtoB(D_AX) | RtoB(D_DI);
- break;
-
- case AINSB:
- case AINSL:
- case AINSW:
- case AOUTSB:
- case AOUTSL:
- case AOUTSW:
- r->regu |= RtoB(D_DI) | RtoB(D_DX);
- break;
-
- case AFSTSW:
- case ASAHF:
- r->regu |= RtoB(D_AX);
- break;
- }
- }
- if(firstr == R)
- return;
- initpc = pc - val;
- npc = val;
-
- /*
- * pass 2
- * turn branch references to pointers
- * build back pointers
- */
- for(r = firstr; r != R; r = r->link) {
- p = r->prog;
- if(p->to.type == D_BRANCH) {
- val = p->to.offset - initpc;
- r1 = firstr;
- while(r1 != R) {
- r2 = r1->log5;
- if(r2 != R && val >= r2->pc) {
- r1 = r2;
- continue;
- }
- if(r1->pc == val)
- break;
- r1 = r1->link;
- }
- if(r1 == R) {
- nearln = p->lineno;
- diag(Z, "ref not found\n%P", p);
- continue;
- }
- if(r1 == r) {
- nearln = p->lineno;
- diag(Z, "ref to self\n%P", p);
- continue;
- }
- r->s2 = r1;
- r->p2link = r1->p2;
- r1->p2 = r;
- }
- }
- if(debug['R']) {
- p = firstr->prog;
- print("\n%L %D\n", p->lineno, &p->from);
- }
-
- /*
- * pass 2.1
- * fix jumps
- */
- fixjmp(firstr);
-
- /*
- * pass 2.5
- * find looping structure
- */
- for(r = firstr; r != R; r = r->link)
- r->active = 0;
- change = 0;
- loopit(firstr, npc);
- if(debug['R'] && debug['v']) {
- print("\nlooping structure:\n");
- for(r = firstr; r != R; r = r->link) {
- print("%d:%P", r->loop, r->prog);
- for(z=0; z<BITS; z++)
- bit.b[z] = r->use1.b[z] |
- r->use2.b[z] |
- r->set.b[z];
- if(bany(&bit)) {
- print("\t");
- if(bany(&r->use1))
- print(" u1=%B", r->use1);
- if(bany(&r->use2))
- print(" u2=%B", r->use2);
- if(bany(&r->set))
- print(" st=%B", r->set);
- }
- print("\n");
- }
- }
-
- /*
- * pass 3
- * iterate propagating usage
- * back until flow graph is complete
- */
-loop1:
- change = 0;
- for(r = firstr; r != R; r = r->link)
- r->active = 0;
- for(r = firstr; r != R; r = r->link)
- if(r->prog->as == ARET)
- prop(r, zbits, zbits);
-loop11:
- /* pick up unreachable code */
- i = 0;
- for(r = firstr; r != R; r = r1) {
- r1 = r->link;
- if(r1 && r1->active && !r->active) {
- prop(r, zbits, zbits);
- i = 1;
- }
- }
- if(i)
- goto loop11;
- if(change)
- goto loop1;
-
-
- /*
- * pass 4
- * iterate propagating register/variable synchrony
- * forward until graph is complete
- */
-loop2:
- change = 0;
- for(r = firstr; r != R; r = r->link)
- r->active = 0;
- synch(firstr, zbits);
- if(change)
- goto loop2;
-
-
- /*
- * pass 5
- * isolate regions
- * calculate costs (paint1)
- */
- r = firstr;
- if(r) {
- for(z=0; z<BITS; z++)
- bit.b[z] = (r->refahead.b[z] | r->calahead.b[z]) &
- ~(externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z]);
- if(bany(&bit)) {
- nearln = r->prog->lineno;
- warn(Z, "used and not set: %B", bit);
- if(debug['R'] && !debug['w'])
- print("used and not set: %B\n", bit);
- }
- }
- if(debug['R'] && debug['v'])
- print("\nprop structure:\n");
- for(r = firstr; r != R; r = r->link)
- r->act = zbits;
- rgp = region;
- nregion = 0;
- for(r = firstr; r != R; r = r->link) {
- if(debug['R'] && debug['v']) {
- print("%P\t", r->prog);
- if(bany(&r->set))
- print("s:%B ", r->set);
- if(bany(&r->refahead))
- print("ra:%B ", r->refahead);
- if(bany(&r->calahead))
- print("ca:%B ", r->calahead);
- print("\n");
- }
- for(z=0; z<BITS; z++)
- bit.b[z] = r->set.b[z] &
- ~(r->refahead.b[z] | r->calahead.b[z] | addrs.b[z]);
- if(bany(&bit)) {
- nearln = r->prog->lineno;
- warn(Z, "set and not used: %B", bit);
- if(debug['R'])
- print("set and not used: %B\n", bit);
- excise(r);
- }
- for(z=0; z<BITS; z++)
- bit.b[z] = LOAD(r) & ~(r->act.b[z] | addrs.b[z]);
- while(bany(&bit)) {
- i = bnum(bit);
- rgp->enter = r;
- rgp->varno = i;
- change = 0;
- if(debug['R'] && debug['v'])
- print("\n");
- paint1(r, i);
- bit.b[i/32] &= ~(1L<<(i%32));
- if(change <= 0) {
- if(debug['R'])
- print("%L$%d: %B\n",
- r->prog->lineno, change, blsh(i));
- continue;
- }
- rgp->cost = change;
- nregion++;
- if(nregion >= NRGN) {
- fatal(Z, "too many regions");
- goto brk;
- }
- rgp++;
- }
- }
-brk:
- qsort(region, nregion, sizeof(region[0]), rcmp);
-
- /*
- * pass 6
- * determine used registers (paint2)
- * replace code (paint3)
- */
- rgp = region;
- for(i=0; i<nregion; i++) {
- bit = blsh(rgp->varno);
- vreg = paint2(rgp->enter, rgp->varno);
- vreg = allreg(vreg, rgp);
- if(debug['R']) {
- print("%L$%d %R: %B\n",
- rgp->enter->prog->lineno,
- rgp->cost,
- rgp->regno,
- bit);
- }
- if(rgp->regno != 0)
- paint3(rgp->enter, rgp->varno, vreg, rgp->regno);
- rgp++;
- }
- /*
- * pass 7
- * peep-hole on basic block
- */
- if(!debug['R'] || debug['P'])
- peep();
-
- if(debug['R'] && debug['v']) {
- print("after pass 7 (peep)\n");
- for(r=firstr; r; r=r->link)
- print("%04d %P\n", (int)r->pc, r->prog);
- print("\n");
- }
-
- /*
- * pass 8
- * recalculate pc
- */
- val = initpc;
- for(r = firstr; r != R; r = r1) {
- r->pc = val;
- p = r->prog;
- p1 = P;
- r1 = r->link;
- if(r1 != R)
- p1 = r1->prog;
- for(; p != p1; p = p->link) {
- switch(p->as) {
- default:
- val++;
- break;
-
- case ANOP:
- case ADATA:
- case AGLOBL:
- case ANAME:
- case ASIGNAME:
- case AFUNCDATA:
- break;
- }
- }
- }
- pc = val;
-
- /*
- * fix up branches
- */
- if(debug['R'])
- if(bany(&addrs))
- print("addrs: %B\n", addrs);
-
- r1 = 0; /* set */
- for(r = firstr; r != R; r = r->link) {
- p = r->prog;
- if(p->to.type == D_BRANCH) {
- p->to.offset = r->s2->pc;
- p->to.u.branch = r->s2->prog;
- }
- r1 = r;
- }
-
- /*
- * last pass
- * eliminate nops
- * free aux structures
- */
- for(p = firstr->prog; p != P; p = p->link){
- while(p->link && p->link->as == ANOP)
- p->link = p->link->link;
- }
-
- if(debug['R'] && debug['v']) {
- print("after pass 8 (fixup pc)\n");
- for(p1=firstr->prog; p1!=P; p1=p1->link)
- print("%P\n", p1);
- print("\n");
- }
-
- if(r1 != R) {
- r1->link = freer;
- freer = firstr;
- }
-}
-
-/*
- * add mov b,rn
- * just after r
- */
-void
-addmove(Reg *r, int bn, int rn, int f)
-{
- Prog *p, *p1;
- Addr *a;
- Var *v;
-
- p1 = alloc(sizeof(*p1));
- *p1 = zprog;
- p = r->prog;
-
- p1->link = p->link;
- p->link = p1;
- p1->lineno = p->lineno;
-
- v = var + bn;
-
- a = &p1->to;
- a->sym = v->sym;
- a->offset = v->offset;
- a->etype = v->etype;
- a->type = v->name;
-
- p1->as = AMOVL;
- if(v->etype == TCHAR || v->etype == TUCHAR)
- p1->as = AMOVB;
- if(v->etype == TSHORT || v->etype == TUSHORT)
- p1->as = AMOVW;
-
- p1->from.type = rn;
- if(!f) {
- p1->from = *a;
- *a = zprog.from;
- a->type = rn;
- if(v->etype == TUCHAR)
- p1->as = AMOVB;
- if(v->etype == TUSHORT)
- p1->as = AMOVW;
- }
- if(debug['R'])
- print("%P\t.a%P\n", p, p1);
-}
-
-uint32
-doregbits(int r)
-{
- uint32 b;
-
- b = 0;
- if(r >= D_INDIR)
- r -= D_INDIR;
- if(r >= D_AX && r <= D_DI)
- b |= RtoB(r);
- else
- if(r >= D_AL && r <= D_BL)
- b |= RtoB(r-D_AL+D_AX);
- else
- if(r >= D_AH && r <= D_BH)
- b |= RtoB(r-D_AH+D_AX);
- return b;
-}
-
-Bits
-mkvar(Reg *r, Addr *a)
-{
- Var *v;
- int i, t, n, et, z;
- int32 o;
- Bits bit;
- LSym *s;
-
- /*
- * mark registers used
- */
- t = a->type;
- r->regu |= doregbits(t);
- r->regu |= doregbits(a->index);
-
- switch(t) {
- default:
- goto none;
- case D_ADDR:
- a->type = a->index;
- bit = mkvar(r, a);
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- a->type = t;
- goto none;
- case D_EXTERN:
- case D_STATIC:
- case D_PARAM:
- case D_AUTO:
- n = t;
- break;
- }
- s = a->sym;
- if(s == nil)
- goto none;
- if(s->name[0] == '.')
- goto none;
- et = a->etype;
- o = a->offset;
- v = var;
- for(i=0; i<nvar; i++) {
- if(s == v->sym)
- if(n == v->name)
- if(o == v->offset)
- goto out;
- v++;
- }
- if(nvar >= NVAR)
- fatal(Z, "variable not optimized: %s", s->name);
- i = nvar;
- nvar++;
- v = &var[i];
- v->sym = s;
- v->offset = o;
- v->name = n;
- v->etype = et;
- if(debug['R'])
- print("bit=%2d et=%2d %D\n", i, et, a);
-
-out:
- bit = blsh(i);
- if(n == D_EXTERN || n == D_STATIC)
- for(z=0; z<BITS; z++)
- externs.b[z] |= bit.b[z];
- if(n == D_PARAM)
- for(z=0; z<BITS; z++)
- params.b[z] |= bit.b[z];
- if(v->etype != et || !typechlpfd[et]) /* funny punning */
- for(z=0; z<BITS; z++)
- addrs.b[z] |= bit.b[z];
- return bit;
-
-none:
- return zbits;
-}
-
-void
-prop(Reg *r, Bits ref, Bits cal)
-{
- Reg *r1, *r2;
- int z;
-
- for(r1 = r; r1 != R; r1 = r1->p1) {
- for(z=0; z<BITS; z++) {
- ref.b[z] |= r1->refahead.b[z];
- if(ref.b[z] != r1->refahead.b[z]) {
- r1->refahead.b[z] = ref.b[z];
- change++;
- }
- cal.b[z] |= r1->calahead.b[z];
- if(cal.b[z] != r1->calahead.b[z]) {
- r1->calahead.b[z] = cal.b[z];
- change++;
- }
- }
- switch(r1->prog->as) {
- case ACALL:
- for(z=0; z<BITS; z++) {
- cal.b[z] |= ref.b[z] | externs.b[z];
- ref.b[z] = 0;
- }
- break;
-
- case ATEXT:
- for(z=0; z<BITS; z++) {
- cal.b[z] = 0;
- ref.b[z] = 0;
- }
- break;
-
- case ARET:
- for(z=0; z<BITS; z++) {
- cal.b[z] = externs.b[z];
- ref.b[z] = 0;
- }
- }
- for(z=0; z<BITS; z++) {
- ref.b[z] = (ref.b[z] & ~r1->set.b[z]) |
- r1->use1.b[z] | r1->use2.b[z];
- cal.b[z] &= ~(r1->set.b[z] | r1->use1.b[z] | r1->use2.b[z]);
- r1->refbehind.b[z] = ref.b[z];
- r1->calbehind.b[z] = cal.b[z];
- }
- if(r1->active)
- break;
- r1->active = 1;
- }
- for(; r != r1; r = r->p1)
- for(r2 = r->p2; r2 != R; r2 = r2->p2link)
- prop(r2, r->refbehind, r->calbehind);
-}
-
-/*
- * find looping structure
- *
- * 1) find reverse postordering
- * 2) find approximate dominators,
- * the actual dominators if the flow graph is reducible
- * otherwise, dominators plus some other non-dominators.
- * See Matthew S. Hecht and Jeffrey D. Ullman,
- * "Analysis of a Simple Algorithm for Global Data Flow Problems",
- * Conf. Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
- * Oct. 1-3, 1973, pp. 207-217.
- * 3) find all nodes with a predecessor dominated by the current node.
- * such a node is a loop head.
- * recursively, all preds with a greater rpo number are in the loop
- */
-int32
-postorder(Reg *r, Reg **rpo2r, int32 n)
-{
- Reg *r1;
-
- r->rpo = 1;
- r1 = r->s1;
- if(r1 && !r1->rpo)
- n = postorder(r1, rpo2r, n);
- r1 = r->s2;
- if(r1 && !r1->rpo)
- n = postorder(r1, rpo2r, n);
- rpo2r[n] = r;
- n++;
- return n;
-}
-
-int32
-rpolca(int32 *idom, int32 rpo1, int32 rpo2)
-{
- int32 t;
-
- if(rpo1 == -1)
- return rpo2;
- while(rpo1 != rpo2){
- if(rpo1 > rpo2){
- t = rpo2;
- rpo2 = rpo1;
- rpo1 = t;
- }
- while(rpo1 < rpo2){
- t = idom[rpo2];
- if(t >= rpo2)
- fatal(Z, "bad idom");
- rpo2 = t;
- }
- }
- return rpo1;
-}
-
-int
-doms(int32 *idom, int32 r, int32 s)
-{
- while(s > r)
- s = idom[s];
- return s == r;
-}
-
-int
-loophead(int32 *idom, Reg *r)
-{
- int32 src;
-
- src = r->rpo;
- if(r->p1 != R && doms(idom, src, r->p1->rpo))
- return 1;
- for(r = r->p2; r != R; r = r->p2link)
- if(doms(idom, src, r->rpo))
- return 1;
- return 0;
-}
-
-void
-loopmark(Reg **rpo2r, int32 head, Reg *r)
-{
- if(r->rpo < head || r->active == head)
- return;
- r->active = head;
- r->loop += LOOP;
- if(r->p1 != R)
- loopmark(rpo2r, head, r->p1);
- for(r = r->p2; r != R; r = r->p2link)
- loopmark(rpo2r, head, r);
-}
-
-void
-loopit(Reg *r, int32 nr)
-{
- Reg *r1;
- int32 i, d, me;
-
- if(nr > maxnr) {
- rpo2r = alloc(nr * sizeof(Reg*));
- idom = alloc(nr * sizeof(int32));
- maxnr = nr;
- }
-
- d = postorder(r, rpo2r, 0);
- if(d > nr)
- fatal(Z, "too many reg nodes");
- nr = d;
- for(i = 0; i < nr / 2; i++){
- r1 = rpo2r[i];
- rpo2r[i] = rpo2r[nr - 1 - i];
- rpo2r[nr - 1 - i] = r1;
- }
- for(i = 0; i < nr; i++)
- rpo2r[i]->rpo = i;
-
- idom[0] = 0;
- for(i = 0; i < nr; i++){
- r1 = rpo2r[i];
- me = r1->rpo;
- d = -1;
- if(r1->p1 != R && r1->p1->rpo < me)
- d = r1->p1->rpo;
- for(r1 = r1->p2; r1 != nil; r1 = r1->p2link)
- if(r1->rpo < me)
- d = rpolca(idom, d, r1->rpo);
- idom[i] = d;
- }
-
- for(i = 0; i < nr; i++){
- r1 = rpo2r[i];
- r1->loop++;
- if(r1->p2 != R && loophead(idom, r1))
- loopmark(rpo2r, i, r1);
- }
-}
-
-void
-synch(Reg *r, Bits dif)
-{
- Reg *r1;
- int z;
-
- for(r1 = r; r1 != R; r1 = r1->s1) {
- for(z=0; z<BITS; z++) {
- dif.b[z] = (dif.b[z] &
- ~(~r1->refbehind.b[z] & r1->refahead.b[z])) |
- r1->set.b[z] | r1->regdiff.b[z];
- if(dif.b[z] != r1->regdiff.b[z]) {
- r1->regdiff.b[z] = dif.b[z];
- change++;
- }
- }
- if(r1->active)
- break;
- r1->active = 1;
- for(z=0; z<BITS; z++)
- dif.b[z] &= ~(~r1->calbehind.b[z] & r1->calahead.b[z]);
- if(r1->s2 != R)
- synch(r1->s2, dif);
- }
-}
-
-uint32
-allreg(uint32 b, Rgn *r)
-{
- Var *v;
- int i;
-
- v = var + r->varno;
- r->regno = 0;
- switch(v->etype) {
-
- default:
- diag(Z, "unknown etype %d/%d", bitno(b), v->etype);
- break;
-
- case TCHAR:
- case TUCHAR:
- case TSHORT:
- case TUSHORT:
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- case TARRAY:
- i = BtoR(~b);
- if(i && r->cost > 0) {
- r->regno = i;
- return RtoB(i);
- }
- break;
-
- case TDOUBLE:
- case TFLOAT:
- break;
- }
- return 0;
-}
-
-void
-paint1(Reg *r, int bn)
-{
- Reg *r1;
- Prog *p;
- int z;
- uint32 bb;
-
- z = bn/32;
- bb = 1L<<(bn%32);
- if(r->act.b[z] & bb)
- return;
- for(;;) {
- if(!(r->refbehind.b[z] & bb))
- break;
- r1 = r->p1;
- if(r1 == R)
- break;
- if(!(r1->refahead.b[z] & bb))
- break;
- if(r1->act.b[z] & bb)
- break;
- r = r1;
- }
-
- if(LOAD(r) & ~(r->set.b[z]&~(r->use1.b[z]|r->use2.b[z])) & bb) {
- change -= CLOAD * r->loop;
- if(debug['R'] && debug['v'])
- print("%d%P\td %B $%d\n", r->loop,
- r->prog, blsh(bn), change);
- }
- for(;;) {
- r->act.b[z] |= bb;
- p = r->prog;
-
- if(r->use1.b[z] & bb) {
- change += CREF * r->loop;
- if(p->as == AFMOVL)
- if(BtoR(bb) != D_F0)
- change = -CINF;
- if(debug['R'] && debug['v'])
- print("%d%P\tu1 %B $%d\n", r->loop,
- p, blsh(bn), change);
- }
-
- if((r->use2.b[z]|r->set.b[z]) & bb) {
- change += CREF * r->loop;
- if(p->as == AFMOVL)
- if(BtoR(bb) != D_F0)
- change = -CINF;
- if(debug['R'] && debug['v'])
- print("%d%P\tu2 %B $%d\n", r->loop,
- p, blsh(bn), change);
- }
-
- if(STORE(r) & r->regdiff.b[z] & bb) {
- change -= CLOAD * r->loop;
- if(p->as == AFMOVL)
- if(BtoR(bb) != D_F0)
- change = -CINF;
- if(debug['R'] && debug['v'])
- print("%d%P\tst %B $%d\n", r->loop,
- p, blsh(bn), change);
- }
-
- if(r->refbehind.b[z] & bb)
- for(r1 = r->p2; r1 != R; r1 = r1->p2link)
- if(r1->refahead.b[z] & bb)
- paint1(r1, bn);
-
- if(!(r->refahead.b[z] & bb))
- break;
- r1 = r->s2;
- if(r1 != R)
- if(r1->refbehind.b[z] & bb)
- paint1(r1, bn);
- r = r->s1;
- if(r == R)
- break;
- if(r->act.b[z] & bb)
- break;
- if(!(r->refbehind.b[z] & bb))
- break;
- }
-}
-
-uint32
-regset(Reg *r, uint32 bb)
-{
- uint32 b, set;
- Addr v;
- int c;
-
- set = 0;
- v = zprog.from;
- while(b = bb & ~(bb-1)) {
- v.type = BtoR(b);
- c = copyu(r->prog, &v, A);
- if(c == 3)
- set |= b;
- bb &= ~b;
- }
- return set;
-}
-
-uint32
-reguse(Reg *r, uint32 bb)
-{
- uint32 b, set;
- Addr v;
- int c;
-
- set = 0;
- v = zprog.from;
- while(b = bb & ~(bb-1)) {
- v.type = BtoR(b);
- c = copyu(r->prog, &v, A);
- if(c == 1 || c == 2 || c == 4)
- set |= b;
- bb &= ~b;
- }
- return set;
-}
-
-uint32
-paint2(Reg *r, int bn)
-{
- Reg *r1;
- int z;
- uint32 bb, vreg, x;
-
- z = bn/32;
- bb = 1L << (bn%32);
- vreg = regbits;
- if(!(r->act.b[z] & bb))
- return vreg;
- for(;;) {
- if(!(r->refbehind.b[z] & bb))
- break;
- r1 = r->p1;
- if(r1 == R)
- break;
- if(!(r1->refahead.b[z] & bb))
- break;
- if(!(r1->act.b[z] & bb))
- break;
- r = r1;
- }
- for(;;) {
- r->act.b[z] &= ~bb;
-
- vreg |= r->regu;
-
- if(r->refbehind.b[z] & bb)
- for(r1 = r->p2; r1 != R; r1 = r1->p2link)
- if(r1->refahead.b[z] & bb)
- vreg |= paint2(r1, bn);
-
- if(!(r->refahead.b[z] & bb))
- break;
- r1 = r->s2;
- if(r1 != R)
- if(r1->refbehind.b[z] & bb)
- vreg |= paint2(r1, bn);
- r = r->s1;
- if(r == R)
- break;
- if(!(r->act.b[z] & bb))
- break;
- if(!(r->refbehind.b[z] & bb))
- break;
- }
-
- bb = vreg;
- for(; r; r=r->s1) {
- x = r->regu & ~bb;
- if(x) {
- vreg |= reguse(r, x);
- bb |= regset(r, x);
- }
- }
- return vreg;
-}
-
-void
-paint3(Reg *r, int bn, int32 rb, int rn)
-{
- Reg *r1;
- Prog *p;
- int z;
- uint32 bb;
-
- z = bn/32;
- bb = 1L << (bn%32);
- if(r->act.b[z] & bb)
- return;
- for(;;) {
- if(!(r->refbehind.b[z] & bb))
- break;
- r1 = r->p1;
- if(r1 == R)
- break;
- if(!(r1->refahead.b[z] & bb))
- break;
- if(r1->act.b[z] & bb)
- break;
- r = r1;
- }
-
- if(LOAD(r) & ~(r->set.b[z] & ~(r->use1.b[z]|r->use2.b[z])) & bb)
- addmove(r, bn, rn, 0);
- for(;;) {
- r->act.b[z] |= bb;
- p = r->prog;
-
- if(r->use1.b[z] & bb) {
- if(debug['R'])
- print("%P", p);
- addreg(&p->from, rn);
- if(debug['R'])
- print("\t.c%P\n", p);
- }
- if((r->use2.b[z]|r->set.b[z]) & bb) {
- if(debug['R'])
- print("%P", p);
- addreg(&p->to, rn);
- if(debug['R'])
- print("\t.c%P\n", p);
- }
-
- if(STORE(r) & r->regdiff.b[z] & bb)
- addmove(r, bn, rn, 1);
- r->regu |= rb;
-
- if(r->refbehind.b[z] & bb)
- for(r1 = r->p2; r1 != R; r1 = r1->p2link)
- if(r1->refahead.b[z] & bb)
- paint3(r1, bn, rb, rn);
-
- if(!(r->refahead.b[z] & bb))
- break;
- r1 = r->s2;
- if(r1 != R)
- if(r1->refbehind.b[z] & bb)
- paint3(r1, bn, rb, rn);
- r = r->s1;
- if(r == R)
- break;
- if(r->act.b[z] & bb)
- break;
- if(!(r->refbehind.b[z] & bb))
- break;
- }
-}
-
-void
-addreg(Addr *a, int rn)
-{
-
- a->sym = 0;
- a->offset = 0;
- a->type = rn;
-}
-
-int32
-RtoB(int r)
-{
-
- if(r < D_AX || r > D_DI)
- return 0;
- return 1L << (r-D_AX);
-}
-
-int
-BtoR(int32 b)
-{
-
- b &= 0xffL;
- if(b == 0)
- return 0;
- return bitno(b) + D_AX;
-}
-
-/* what instruction does a JMP to p eventually land on? */
-static Reg*
-chasejmp(Reg *r, int *jmploop)
-{
- int n;
-
- n = 0;
- for(; r; r=r->s2) {
- if(r->prog->as != AJMP || r->prog->to.type != D_BRANCH)
- break;
- if(++n > 10) {
- *jmploop = 1;
- break;
- }
- }
- return r;
-}
-
-/* mark all code reachable from firstp as alive */
-static void
-mark(Reg *firstr)
-{
- Reg *r;
- Prog *p;
-
- for(r=firstr; r; r=r->link) {
- if(r->active)
- break;
- r->active = 1;
- p = r->prog;
- if(p->as != ACALL && p->to.type == D_BRANCH)
- mark(r->s2);
- if(p->as == AJMP || p->as == ARET || p->as == AUNDEF)
- break;
- }
-}
-
-/*
- * the code generator depends on being able to write out JMP
- * instructions that it can jump to now but fill in later.
- * the linker will resolve them nicely, but they make the code
- * longer and more difficult to follow during debugging.
- * remove them.
- */
-static void
-fixjmp(Reg *firstr)
-{
- int jmploop;
- Reg *r;
- Prog *p;
-
- if(debug['R'] && debug['v'])
- print("\nfixjmp\n");
-
- // pass 1: resolve jump to AJMP, mark all code as dead.
- jmploop = 0;
- for(r=firstr; r; r=r->link) {
- p = r->prog;
- if(debug['R'] && debug['v'])
- print("%04d %P\n", (int)r->pc, p);
- if(p->as != ACALL && p->to.type == D_BRANCH && r->s2 && r->s2->prog->as == AJMP) {
- r->s2 = chasejmp(r->s2, &jmploop);
- p->to.offset = r->s2->pc;
- p->to.u.branch = r->s2->prog;
- if(debug['R'] && debug['v'])
- print("->%P\n", p);
- }
- r->active = 0;
- }
- if(debug['R'] && debug['v'])
- print("\n");
-
- // pass 2: mark all reachable code alive
- mark(firstr);
-
- // pass 3: delete dead code (mostly JMPs).
- for(r=firstr; r; r=r->link) {
- if(!r->active) {
- p = r->prog;
- if(p->link == P && p->as == ARET && r->p1 && r->p1->prog->as != ARET) {
- // This is the final ARET, and the code so far doesn't have one.
- // Let it stay.
- } else {
- if(debug['R'] && debug['v'])
- print("del %04d %P\n", (int)r->pc, p);
- p->as = ANOP;
- }
- }
- }
-
- // pass 4: elide JMP to next instruction.
- // only safe if there are no jumps to JMPs anymore.
- if(!jmploop) {
- for(r=firstr; r; r=r->link) {
- p = r->prog;
- if(p->as == AJMP && p->to.type == D_BRANCH && r->s2 == r->link) {
- if(debug['R'] && debug['v'])
- print("del %04d %P\n", (int)r->pc, p);
- p->as = ANOP;
- }
- }
- }
-
- // fix back pointers.
- for(r=firstr; r; r=r->link) {
- r->p2 = R;
- r->p2link = R;
- }
- for(r=firstr; r; r=r->link) {
- if(r->s2) {
- r->p2link = r->s2->p2;
- r->s2->p2 = r;
- }
- }
-
- if(debug['R'] && debug['v']) {
- print("\n");
- for(r=firstr; r; r=r->link)
- print("%04d %P\n", (int)r->pc, r->prog);
- print("\n");
- }
-}
-
diff --git a/src/cmd/8c/sgen.c b/src/cmd/8c/sgen.c
deleted file mode 100644
index d647010ef..000000000
--- a/src/cmd/8c/sgen.c
+++ /dev/null
@@ -1,483 +0,0 @@
-// Inferno utils/8c/sgen.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/sgen.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-Prog*
-gtext(Sym *s, int32 stkoff)
-{
- int32 a;
-
- a = argsize(1);
- if((textflag & NOSPLIT) != 0 && stkoff >= 128)
- yyerror("stack frame too large for NOSPLIT function");
-
- gpseudo(ATEXT, s, nodconst(stkoff));
- p->to.type = D_CONST2;
- p->to.offset2 = a;
- return p;
-}
-
-void
-noretval(int n)
-{
-
- if(n & 1) {
- gins(ANOP, Z, Z);
- p->to.type = REGRET;
- }
- if(n & 2) {
- gins(ANOP, Z, Z);
- p->to.type = FREGRET;
- }
-}
-
-/* welcome to commute */
-static void
-commute(Node *n)
-{
- Node *l, *r;
-
- l = n->left;
- r = n->right;
- if(r->complex > l->complex) {
- n->left = r;
- n->right = l;
- }
-}
-
-void
-indexshift(Node *n)
-{
- int g;
-
- if(!typechlp[n->type->etype])
- return;
- simplifyshift(n);
- if(n->op == OASHL && n->right->op == OCONST){
- g = vconst(n->right);
- if(g >= 0 && g < 4)
- n->addable = 7;
- }
-}
-
-/*
- * calculate addressability as follows
- * NAME ==> 10/11 name+value(SB/SP)
- * REGISTER ==> 12 register
- * CONST ==> 20 $value
- * *(20) ==> 21 value
- * &(10) ==> 13 $name+value(SB)
- * &(11) ==> 1 $name+value(SP)
- * (13) + (20) ==> 13 fold constants
- * (1) + (20) ==> 1 fold constants
- * *(13) ==> 10 back to name
- * *(1) ==> 11 back to name
- *
- * (20) * (X) ==> 7 multiplier in indexing
- * (X,7) + (13,1) ==> 8 adder in indexing (addresses)
- * (8) ==> &9(OINDEX) index, almost addressable
- * 100 extern register
- *
- * calculate complexity (number of registers)
- */
-void
-xcom(Node *n)
-{
- Node *l, *r;
- int g;
-
- if(n == Z)
- return;
- l = n->left;
- r = n->right;
- n->complex = 0;
- n->addable = 0;
- switch(n->op) {
- case OCONST:
- n->addable = 20;
- break;
-
- case ONAME:
- n->addable = 10;
- if(n->class == CPARAM || n->class == CAUTO)
- n->addable = 11;
- break;
-
- case OEXREG:
- n->addable = 0;
- break;
-
- case OREGISTER:
- n->addable = 12;
- break;
-
- case OINDREG:
- n->addable = 12;
- break;
-
- case OADDR:
- xcom(l);
- if(l->addable == 10)
- n->addable = 13;
- else
- if(l->addable == 11)
- n->addable = 1;
- break;
-
- case OADD:
- xcom(l);
- xcom(r);
- if(n->type->etype != TIND)
- break;
-
- switch(r->addable) {
- case 20:
- switch(l->addable) {
- case 1:
- case 13:
- commadd:
- l->type = n->type;
- *n = *l;
- l = new(0, Z, Z);
- *l = *(n->left);
- l->xoffset += r->vconst;
- n->left = l;
- r = n->right;
- goto brk;
- }
- break;
-
- case 1:
- case 13:
- case 10:
- case 11:
- /* l is the base, r is the index */
- if(l->addable != 20)
- n->addable = 8;
- break;
- }
- switch(l->addable) {
- case 20:
- switch(r->addable) {
- case 13:
- case 1:
- r = n->left;
- l = n->right;
- n->left = l;
- n->right = r;
- goto commadd;
- }
- break;
-
- case 13:
- case 1:
- case 10:
- case 11:
- /* r is the base, l is the index */
- if(r->addable != 20)
- n->addable = 8;
- break;
- }
- if(n->addable == 8 && !side(n)) {
- indx(n);
- l = new1(OINDEX, idx.basetree, idx.regtree);
- l->scale = idx.scale;
- l->addable = 9;
- l->complex = l->right->complex;
- l->type = l->left->type;
- n->op = OADDR;
- n->left = l;
- n->right = Z;
- n->addable = 8;
- break;
- }
- break;
-
- case OINDEX:
- xcom(l);
- xcom(r);
- n->addable = 9;
- break;
-
- case OIND:
- xcom(l);
- if(l->op == OADDR) {
- l = l->left;
- l->type = n->type;
- *n = *l;
- return;
- }
- switch(l->addable) {
- case 20:
- n->addable = 21;
- break;
- case 1:
- n->addable = 11;
- break;
- case 13:
- n->addable = 10;
- break;
- }
- break;
-
- case OASHL:
- xcom(l);
- xcom(r);
- indexshift(n);
- break;
-
- case OMUL:
- case OLMUL:
- xcom(l);
- xcom(r);
- g = vlog(l);
- if(g >= 0) {
- n->left = r;
- n->right = l;
- l = r;
- r = n->right;
- }
- g = vlog(r);
- if(g >= 0) {
- n->op = OASHL;
- r->vconst = g;
- r->type = types[TINT];
- indexshift(n);
- break;
- }
-commute(n);
- break;
-
- case OASLDIV:
- xcom(l);
- xcom(r);
- g = vlog(r);
- if(g >= 0) {
- n->op = OASLSHR;
- r->vconst = g;
- r->type = types[TINT];
- }
- break;
-
- case OLDIV:
- xcom(l);
- xcom(r);
- g = vlog(r);
- if(g >= 0) {
- n->op = OLSHR;
- r->vconst = g;
- r->type = types[TINT];
- indexshift(n);
- break;
- }
- break;
-
- case OASLMOD:
- xcom(l);
- xcom(r);
- g = vlog(r);
- if(g >= 0) {
- n->op = OASAND;
- r->vconst--;
- }
- break;
-
- case OLMOD:
- xcom(l);
- xcom(r);
- g = vlog(r);
- if(g >= 0) {
- n->op = OAND;
- r->vconst--;
- }
- break;
-
- case OASMUL:
- case OASLMUL:
- xcom(l);
- xcom(r);
- g = vlog(r);
- if(g >= 0) {
- n->op = OASASHL;
- r->vconst = g;
- }
- break;
-
- case OLSHR:
- case OASHR:
- xcom(l);
- xcom(r);
- indexshift(n);
- break;
-
- default:
- if(l != Z)
- xcom(l);
- if(r != Z)
- xcom(r);
- break;
- }
-brk:
- if(n->addable >= 10)
- return;
- if(l != Z)
- n->complex = l->complex;
- if(r != Z) {
- if(r->complex == n->complex)
- n->complex = r->complex+1;
- else
- if(r->complex > n->complex)
- n->complex = r->complex;
- }
- if(n->complex == 0)
- n->complex++;
-
- if(com64(n))
- return;
-
- switch(n->op) {
-
- case OFUNC:
- n->complex = FNX;
- break;
-
- case OLMOD:
- case OMOD:
- case OLMUL:
- case OLDIV:
- case OMUL:
- case ODIV:
- case OASLMUL:
- case OASLDIV:
- case OASLMOD:
- case OASMUL:
- case OASDIV:
- case OASMOD:
- if(r->complex >= l->complex) {
- n->complex = l->complex + 3;
- if(r->complex > n->complex)
- n->complex = r->complex;
- } else {
- n->complex = r->complex + 3;
- if(l->complex > n->complex)
- n->complex = l->complex;
- }
- break;
-
- case OLSHR:
- case OASHL:
- case OASHR:
- case OASLSHR:
- case OASASHL:
- case OASASHR:
- if(r->complex >= l->complex) {
- n->complex = l->complex + 2;
- if(r->complex > n->complex)
- n->complex = r->complex;
- } else {
- n->complex = r->complex + 2;
- if(l->complex > n->complex)
- n->complex = l->complex;
- }
- break;
-
- case OADD:
- case OXOR:
- case OAND:
- case OOR:
- /*
- * immediate operators, make const on right
- */
- if(l->op == OCONST) {
- n->left = r;
- n->right = l;
- }
- break;
-
- case OEQ:
- case ONE:
- case OLE:
- case OLT:
- case OGE:
- case OGT:
- case OHI:
- case OHS:
- case OLO:
- case OLS:
- /*
- * compare operators, make const on left
- */
- if(r->op == OCONST) {
- n->left = r;
- n->right = l;
- n->op = invrel[relindex(n->op)];
- }
- break;
- }
-}
-
-void
-indx(Node *n)
-{
- Node *l, *r;
-
- if(debug['x'])
- prtree(n, "indx");
-
- l = n->left;
- r = n->right;
- if(l->addable == 1 || l->addable == 13 || r->complex > l->complex) {
- n->right = l;
- n->left = r;
- l = r;
- r = n->right;
- }
- if(l->addable != 7) {
- idx.regtree = l;
- idx.scale = 1;
- } else
- if(l->right->addable == 20) {
- idx.regtree = l->left;
- idx.scale = 1 << l->right->vconst;
- } else
- if(l->left->addable == 20) {
- idx.regtree = l->right;
- idx.scale = 1 << l->left->vconst;
- } else
- diag(n, "bad index");
-
- idx.basetree = r;
- if(debug['x']) {
- print("scale = %d\n", idx.scale);
- prtree(idx.regtree, "index");
- prtree(idx.basetree, "base");
- }
-}
diff --git a/src/cmd/8c/swt.c b/src/cmd/8c/swt.c
deleted file mode 100644
index d960519e3..000000000
--- a/src/cmd/8c/swt.c
+++ /dev/null
@@ -1,341 +0,0 @@
-// Inferno utils/8c/swt.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/swt.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-void
-swit1(C1 *q, int nc, int32 def, Node *n)
-{
- Node nreg;
-
- if(typev[n->type->etype]) {
- regsalloc(&nreg, n);
- nreg.type = types[TVLONG];
- cgen(n, &nreg);
- swit2(q, nc, def, &nreg);
- return;
- }
-
- regalloc(&nreg, n, Z);
- nreg.type = types[TLONG];
- cgen(n, &nreg);
- swit2(q, nc, def, &nreg);
- regfree(&nreg);
-}
-
-void
-swit2(C1 *q, int nc, int32 def, Node *n)
-{
- C1 *r;
- int i;
- Prog *sp;
-
- if(nc < 5) {
- for(i=0; i<nc; i++) {
- if(debug['W'])
- print("case = %.8ux\n", q->val);
- gopcode(OEQ, n->type, n, nodconst(q->val));
- patch(p, q->label);
- q++;
- }
- gbranch(OGOTO);
- patch(p, def);
- return;
- }
- i = nc / 2;
- r = q+i;
- if(debug['W'])
- print("case > %.8ux\n", r->val);
- gopcode(OGT, n->type, n, nodconst(r->val));
- sp = p;
- gbranch(OGOTO);
- p->as = AJEQ;
- patch(p, r->label);
- swit2(q, i, def, n);
-
- if(debug['W'])
- print("case < %.8ux\n", r->val);
- patch(sp, pc);
- swit2(r+1, nc-i-1, def, n);
-}
-
-void
-bitload(Node *b, Node *n1, Node *n2, Node *n3, Node *nn)
-{
- int sh;
- int32 v;
- Node *l;
-
- /*
- * n1 gets adjusted/masked value
- * n2 gets address of cell
- * n3 gets contents of cell
- */
- l = b->left;
- if(n2 != Z) {
- regalloc(n1, l, nn);
- reglcgen(n2, l, Z);
- regalloc(n3, l, Z);
- gmove(n2, n3);
- gmove(n3, n1);
- } else {
- regalloc(n1, l, nn);
- cgen(l, n1);
- }
- if(b->type->shift == 0 && typeu[b->type->etype]) {
- v = ~0 + (1L << b->type->nbits);
- gopcode(OAND, types[TLONG], nodconst(v), n1);
- } else {
- sh = 32 - b->type->shift - b->type->nbits;
- if(sh > 0)
- gopcode(OASHL, types[TLONG], nodconst(sh), n1);
- sh += b->type->shift;
- if(sh > 0)
- if(typeu[b->type->etype])
- gopcode(OLSHR, types[TLONG], nodconst(sh), n1);
- else
- gopcode(OASHR, types[TLONG], nodconst(sh), n1);
- }
-}
-
-void
-bitstore(Node *b, Node *n1, Node *n2, Node *n3, Node *nn)
-{
- int32 v;
- Node nod;
- int sh;
-
- regalloc(&nod, b->left, Z);
- v = ~0 + (1L << b->type->nbits);
- gopcode(OAND, types[TLONG], nodconst(v), n1);
- gmove(n1, &nod);
- if(nn != Z)
- gmove(n1, nn);
- sh = b->type->shift;
- if(sh > 0)
- gopcode(OASHL, types[TLONG], nodconst(sh), &nod);
- v <<= sh;
- gopcode(OAND, types[TLONG], nodconst(~v), n3);
- gopcode(OOR, types[TLONG], n3, &nod);
- gmove(&nod, n2);
-
- regfree(&nod);
- regfree(n1);
- regfree(n2);
- regfree(n3);
-}
-
-int32
-outstring(char *s, int32 n)
-{
- int32 r;
-
- if(suppress)
- return nstring;
- r = nstring;
- while(n) {
- string[mnstring] = *s++;
- mnstring++;
- nstring++;
- if(mnstring >= NSNAME) {
- gpseudo(ADATA, symstring, nodconst(0L));
- p->from.offset += nstring - NSNAME;
- p->from.scale = NSNAME;
- p->to.type = D_SCONST;
- memmove(p->to.u.sval, string, NSNAME);
- mnstring = 0;
- }
- n--;
- }
- return r;
-}
-
-void
-sextern(Sym *s, Node *a, int32 o, int32 w)
-{
- int32 e, lw;
-
- for(e=0; e<w; e+=NSNAME) {
- lw = NSNAME;
- if(w-e < lw)
- lw = w-e;
- gpseudo(ADATA, s, nodconst(0L));
- p->from.offset += o+e;
- p->from.scale = lw;
- p->to.type = D_SCONST;
- memmove(p->to.u.sval, a->cstring+e, lw);
- }
-}
-
-void
-gextern(Sym *s, Node *a, int32 o, int32 w)
-{
- if(a->op == OCONST && typev[a->type->etype]) {
- gpseudo(ADATA, s, lo64(a));
- p->from.offset += o;
- p->from.scale = 4;
- gpseudo(ADATA, s, hi64(a));
- p->from.offset += o + 4;
- p->from.scale = 4;
- return;
- }
- gpseudo(ADATA, s, a);
- p->from.offset += o;
- p->from.scale = w;
- switch(p->to.type) {
- default:
- p->to.index = p->to.type;
- p->to.type = D_ADDR;
- case D_CONST:
- case D_FCONST:
- case D_ADDR:
- break;
- }
-}
-
-void
-outcode(void)
-{
- int f;
- Biobuf b;
-
- f = open(outfile, OWRITE);
- if(f < 0) {
- diag(Z, "cannot open %s", outfile);
- return;
- }
- Binit(&b, f, OWRITE);
-
- Bprint(&b, "go object %s %s %s\n", getgoos(), getgoarch(), getgoversion());
- if(pragcgobuf.to > pragcgobuf.start) {
- Bprint(&b, "\n");
- Bprint(&b, "$$ // exports\n\n");
- Bprint(&b, "$$ // local types\n\n");
- Bprint(&b, "$$ // cgo\n");
- Bprint(&b, "%s", fmtstrflush(&pragcgobuf));
- Bprint(&b, "\n$$\n\n");
- }
- Bprint(&b, "!\n");
-
- writeobj(ctxt, &b);
- Bterm(&b);
- close(f);
- lastp = P;
-}
-
-int32
-align(int32 i, Type *t, int op, int32 *maxalign)
-{
- int32 o;
- Type *v;
- int w, packw;
-
- o = i;
- w = 1;
- packw = 0;
- switch(op) {
- default:
- diag(Z, "unknown align opcode %d", op);
- break;
-
- case Asu2: /* padding at end of a struct */
- w = *maxalign;
- if(w < 1)
- w = 1;
- if(packflg)
- packw = packflg;
- break;
-
- case Ael1: /* initial align of struct element */
- for(v=t; v->etype==TARRAY; v=v->link)
- ;
- if(v->etype == TSTRUCT || v->etype == TUNION)
- w = v->align;
- else {
- w = ewidth[v->etype];
- if(w == 8)
- w = 4;
- }
- if(w < 1 || w > SZ_LONG)
- fatal(Z, "align");
- if(packflg)
- packw = packflg;
- break;
-
- case Ael2: /* width of a struct element */
- o += t->width;
- break;
-
- case Aarg0: /* initial passbyptr argument in arg list */
- if(typesuv[t->etype]) {
- o = align(o, types[TIND], Aarg1, nil);
- o = align(o, types[TIND], Aarg2, nil);
- }
- break;
-
- case Aarg1: /* initial align of parameter */
- w = ewidth[t->etype];
- if(w <= 0 || w >= SZ_LONG) {
- w = SZ_LONG;
- break;
- }
- w = 1; /* little endian no adjustment */
- break;
-
- case Aarg2: /* width of a parameter */
- o += t->width;
- w = t->width;
- if(w > SZ_LONG)
- w = SZ_LONG;
- break;
-
- case Aaut3: /* total align of automatic */
- o = align(o, t, Ael1, nil);
- o = align(o, t, Ael2, nil);
- break;
- }
- if(packw != 0 && xround(o, w) != xround(o, packw))
- diag(Z, "#pragma pack changes offset of %T", t);
- o = xround(o, w);
- if(maxalign && *maxalign < w)
- *maxalign = w;
- if(debug['A'])
- print("align %s %d %T = %d\n", bnames[op], i, t, o);
- return o;
-}
-
-int32
-maxround(int32 max, int32 v)
-{
- v = xround(v, SZ_LONG);
- if(v > max)
- return v;
- return max;
-}
diff --git a/src/cmd/8c/txt.c b/src/cmd/8c/txt.c
deleted file mode 100644
index 7f87a0a0d..000000000
--- a/src/cmd/8c/txt.c
+++ /dev/null
@@ -1,1537 +0,0 @@
-// Inferno utils/8c/txt.c
-// http://code.google.com/p/inferno-os/source/browse/utils/8c/txt.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-
-int thechar = '8';
-char *thestring = "386";
-
-LinkArch *thelinkarch = &link386;
-
-void
-linkarchinit(void)
-{
-}
-
-void
-ginit(void)
-{
- int i;
- Type *t;
-
- exregoffset = 0;
- exfregoffset = 0;
- listinit();
- nstring = 0;
- mnstring = 0;
- nrathole = 0;
- pc = 0;
- breakpc = -1;
- continpc = -1;
- cases = C;
- lastp = P;
- tfield = types[TLONG];
-
- zprog.link = P;
- zprog.as = AGOK;
- zprog.from.type = D_NONE;
- zprog.from.index = D_NONE;
- zprog.from.scale = 0;
- zprog.to = zprog.from;
-
- regnode.op = OREGISTER;
- regnode.class = CEXREG;
- regnode.reg = REGTMP;
- regnode.complex = 0;
- regnode.addable = 11;
- regnode.type = types[TLONG];
-
- fregnode0 = regnode;
- fregnode0.reg = D_F0;
- fregnode0.type = types[TDOUBLE];
-
- fregnode1 = fregnode0;
- fregnode1.reg = D_F0+1;
-
- constnode.op = OCONST;
- constnode.class = CXXX;
- constnode.complex = 0;
- constnode.addable = 20;
- constnode.type = types[TLONG];
-
- fconstnode.op = OCONST;
- fconstnode.class = CXXX;
- fconstnode.complex = 0;
- fconstnode.addable = 20;
- fconstnode.type = types[TDOUBLE];
-
- nodsafe = new(ONAME, Z, Z);
- nodsafe->sym = slookup(".safe");
- nodsafe->type = types[TINT];
- nodsafe->etype = types[TINT]->etype;
- nodsafe->class = CAUTO;
- complex(nodsafe);
-
- t = typ(TARRAY, types[TCHAR]);
- symrathole = slookup(".rathole");
- symrathole->class = CGLOBL;
- symrathole->type = t;
-
- nodrat = new(ONAME, Z, Z);
- nodrat->sym = symrathole;
- nodrat->type = types[TIND];
- nodrat->etype = TVOID;
- nodrat->class = CGLOBL;
- complex(nodrat);
- nodrat->type = t;
-
- nodret = new(ONAME, Z, Z);
- nodret->sym = slookup(".ret");
- nodret->type = types[TIND];
- nodret->etype = TIND;
- nodret->class = CPARAM;
- nodret = new(OIND, nodret, Z);
- complex(nodret);
-
- com64init();
-
- for(i=0; i<nelem(reg); i++) {
- reg[i] = 1;
- if(i >= D_AX && i <= D_DI && i != D_SP)
- reg[i] = 0;
- }
-}
-
-void
-gclean(void)
-{
- int i;
- Sym *s;
-
- reg[D_SP]--;
- for(i=D_AX; i<=D_DI; i++)
- if(reg[i])
- diag(Z, "reg %R left allocated", i);
- while(mnstring)
- outstring("", 1L);
- symstring->type->width = nstring;
- symrathole->type->width = nrathole;
- for(i=0; i<NHASH; i++)
- for(s = hash[i]; s != S; s = s->link) {
- if(s->type == T)
- continue;
- if(s->type->width == 0)
- continue;
- if(s->class != CGLOBL && s->class != CSTATIC)
- continue;
- if(s->type == types[TENUM])
- continue;
- gpseudo(AGLOBL, s, nodconst(s->type->width));
- }
- nextpc();
- p->as = AEND;
- outcode();
-}
-
-void
-nextpc(void)
-{
- Plist *pl;
-
- p = alloc(sizeof(*p));
- *p = zprog;
- p->lineno = nearln;
- p->pc = pc;
- pc++;
- if(lastp == nil) {
- pl = linknewplist(ctxt);
- pl->firstpc = p;
- } else
- lastp->link = p;
- lastp = p;
-}
-
-void
-gargs(Node *n, Node *tn1, Node *tn2)
-{
- int32 regs;
- Node fnxargs[20], *fnxp;
-
- regs = cursafe;
-
- fnxp = fnxargs;
- garg1(n, tn1, tn2, 0, &fnxp); /* compile fns to temps */
-
- curarg = 0;
- fnxp = fnxargs;
- garg1(n, tn1, tn2, 1, &fnxp); /* compile normal args and temps */
-
- cursafe = regs;
-}
-
-int
-nareg(void)
-{
- int i, n;
-
- n = 0;
- for(i=D_AX; i<=D_DI; i++)
- if(reg[i] == 0)
- n++;
- return n;
-}
-
-void
-garg1(Node *n, Node *tn1, Node *tn2, int f, Node **fnxp)
-{
- Node nod;
-
- if(n == Z)
- return;
- if(n->op == OLIST) {
- garg1(n->left, tn1, tn2, f, fnxp);
- garg1(n->right, tn1, tn2, f, fnxp);
- return;
- }
- if(f == 0) {
- if(n->complex >= FNX) {
- regsalloc(*fnxp, n);
- nod = znode;
- nod.op = OAS;
- nod.left = *fnxp;
- nod.right = n;
- nod.type = n->type;
- cgen(&nod, Z);
- (*fnxp)++;
- }
- return;
- }
- if(typesu[n->type->etype] || typev[n->type->etype]) {
- regaalloc(tn2, n);
- if(n->complex >= FNX) {
- sugen(*fnxp, tn2, n->type->width);
- (*fnxp)++;
- } else
- sugen(n, tn2, n->type->width);
- return;
- }
- if(REGARG >= 0 && curarg == 0 && typeilp[n->type->etype]) {
- regaalloc1(tn1, n);
- if(n->complex >= FNX) {
- cgen(*fnxp, tn1);
- (*fnxp)++;
- } else
- cgen(n, tn1);
- return;
- }
- if(vconst(n) == 0) {
- regaalloc(tn2, n);
- gmove(n, tn2);
- return;
- }
- regalloc(tn1, n, Z);
- if(n->complex >= FNX) {
- cgen(*fnxp, tn1);
- (*fnxp)++;
- } else
- cgen(n, tn1);
- regaalloc(tn2, n);
- gmove(tn1, tn2);
- regfree(tn1);
-}
-
-Node*
-nodconst(int32 v)
-{
- constnode.vconst = v;
- return &constnode;
-}
-
-Node*
-nodfconst(double d)
-{
- fconstnode.fconst = d;
- return &fconstnode;
-}
-
-int
-isreg(Node *n, int r)
-{
-
- if(n->op == OREGISTER)
- if(n->reg == r)
- return 1;
- return 0;
-}
-
-int
-nodreg(Node *n, Node *nn, int r)
-{
-
- *n = regnode;
- n->reg = r;
- if(reg[r] == 0)
- return 0;
- if(nn != Z) {
- n->type = nn->type;
- n->lineno = nn->lineno;
- if(nn->op == OREGISTER)
- if(nn->reg == r)
- return 0;
- }
- return 1;
-}
-
-void
-regret(Node *n, Node *nn, Type *t, int mode)
-{
- int r;
-
- if(mode == 0 || hasdotdotdot(t) || nn->type->width == 0) {
- r = REGRET;
- if(typefd[nn->type->etype])
- r = FREGRET;
- nodreg(n, nn, r);
- reg[r]++;
- return;
- }
-
- if(mode == 1) {
- // fetch returned value after call.
- // already called gargs, so curarg is set.
- curarg = (curarg+3) & ~3;
- regaalloc(n, nn);
- return;
- }
-
- if(mode == 2) {
- // store value to be returned.
- // must compute arg offset.
- if(t->etype != TFUNC)
- fatal(Z, "bad regret func %T", t);
- *n = *nn;
- n->op = ONAME;
- n->class = CPARAM;
- n->sym = slookup(".retx");
- n->complex = 0;
- n->addable = 20;
- n->xoffset = argsize(0);
- return;
- }
-
- fatal(Z, "bad regret");
-}
-
-void
-regalloc(Node *n, Node *tn, Node *o)
-{
- int i;
-
- switch(tn->type->etype) {
- case TCHAR:
- case TUCHAR:
- case TSHORT:
- case TUSHORT:
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- if(o != Z && o->op == OREGISTER) {
- i = o->reg;
- if(i >= D_AX && i <= D_DI)
- goto out;
- }
- for(i=D_AX; i<=D_DI; i++)
- if(reg[i] == 0)
- goto out;
- diag(tn, "out of fixed registers");
- goto err;
-
- case TFLOAT:
- case TDOUBLE:
- case TVLONG:
- i = D_F0;
- goto out;
- }
- diag(tn, "unknown type in regalloc: %T", tn->type);
-err:
- i = 0;
-out:
- if(i)
- reg[i]++;
- nodreg(n, tn, i);
-}
-
-void
-regialloc(Node *n, Node *tn, Node *o)
-{
- Node nod;
-
- nod = *tn;
- nod.type = types[TIND];
- regalloc(n, &nod, o);
-}
-
-void
-regfree(Node *n)
-{
- int i;
-
- i = 0;
- if(n->op != OREGISTER && n->op != OINDREG)
- goto err;
- i = n->reg;
- if(i < 0 || i >= nelem(reg))
- goto err;
- if(reg[i] <= 0)
- goto err;
- reg[i]--;
- return;
-err:
- diag(n, "error in regfree: %R", i);
-}
-
-void
-regsalloc(Node *n, Node *nn)
-{
- cursafe = align(cursafe, nn->type, Aaut3, nil);
- maxargsafe = maxround(maxargsafe, cursafe+curarg);
- *n = *nodsafe;
- n->xoffset = -(stkoff + cursafe);
- n->type = nn->type;
- n->etype = nn->type->etype;
- n->lineno = nn->lineno;
-}
-
-void
-regaalloc1(Node *n, Node *nn)
-{
- if(REGARG < 0) {
- fatal(n, "regaalloc1 and REGARG<0");
- return;
- }
- nodreg(n, nn, REGARG);
- reg[REGARG]++;
- curarg = align(curarg, nn->type, Aarg1, nil);
- curarg = align(curarg, nn->type, Aarg2, nil);
- maxargsafe = maxround(maxargsafe, cursafe+curarg);
-}
-
-void
-regaalloc(Node *n, Node *nn)
-{
- curarg = align(curarg, nn->type, Aarg1, nil);
- *n = *nn;
- n->op = OINDREG;
- n->reg = REGSP;
- n->xoffset = curarg;
- n->complex = 0;
- n->addable = 20;
- curarg = align(curarg, nn->type, Aarg2, nil);
- maxargsafe = maxround(maxargsafe, cursafe+curarg);
-}
-
-void
-regind(Node *n, Node *nn)
-{
-
- if(n->op != OREGISTER) {
- diag(n, "regind not OREGISTER");
- return;
- }
- n->op = OINDREG;
- n->type = nn->type;
-}
-
-void
-naddr(Node *n, Addr *a)
-{
- int32 v;
-
- a->type = D_NONE;
- if(n == Z)
- return;
- switch(n->op) {
- default:
- bad:
- diag(n, "bad in naddr: %O %D", n->op, a);
- break;
-
- case OREGISTER:
- a->type = n->reg;
- a->sym = nil;
- break;
-
- case OEXREG:
- a->type = D_INDIR + D_TLS;
- a->offset = n->reg - 1;
- break;
-
- case OIND:
- naddr(n->left, a);
- if(a->type >= D_AX && a->type <= D_DI)
- a->type += D_INDIR;
- else
- if(a->type == D_CONST)
- a->type = D_NONE+D_INDIR;
- else
- if(a->type == D_ADDR) {
- a->type = a->index;
- a->index = D_NONE;
- } else
- goto bad;
- break;
-
- case OINDEX:
- a->type = idx.ptr;
- if(n->left->op == OADDR || n->left->op == OCONST)
- naddr(n->left, a);
- if(a->type >= D_AX && a->type <= D_DI)
- a->type += D_INDIR;
- else
- if(a->type == D_CONST)
- a->type = D_NONE+D_INDIR;
- else
- if(a->type == D_ADDR) {
- a->type = a->index;
- a->index = D_NONE;
- } else
- goto bad;
- a->index = idx.reg;
- a->scale = n->scale;
- a->offset += n->xoffset;
- break;
-
- case OINDREG:
- a->type = n->reg+D_INDIR;
- a->sym = nil;
- a->offset = n->xoffset;
- break;
-
- case ONAME:
- a->etype = n->etype;
- a->type = D_STATIC;
- a->sym = linksym(n->sym);
- a->offset = n->xoffset;
- if(n->class == CSTATIC)
- break;
- if(n->class == CEXTERN || n->class == CGLOBL) {
- a->type = D_EXTERN;
- break;
- }
- if(n->class == CAUTO) {
- a->type = D_AUTO;
- break;
- }
- if(n->class == CPARAM) {
- a->type = D_PARAM;
- break;
- }
- goto bad;
-
- case OCONST:
- if(typefd[n->type->etype]) {
- a->type = D_FCONST;
- a->u.dval = n->fconst;
- break;
- }
- a->sym = nil;
- a->type = D_CONST;
- a->offset = n->vconst;
- break;
-
- case OADDR:
- naddr(n->left, a);
- if(a->type >= D_INDIR) {
- a->type -= D_INDIR;
- break;
- }
- if(a->type == D_EXTERN || a->type == D_STATIC ||
- a->type == D_AUTO || a->type == D_PARAM)
- if(a->index == D_NONE) {
- a->index = a->type;
- a->type = D_ADDR;
- break;
- }
- goto bad;
-
- case OADD:
- if(n->right->op == OCONST) {
- v = n->right->vconst;
- naddr(n->left, a);
- } else
- if(n->left->op == OCONST) {
- v = n->left->vconst;
- naddr(n->right, a);
- } else
- goto bad;
- a->offset += v;
- break;
-
- }
-}
-
-#define CASE(a,b) ((a<<8)|(b<<0))
-
-void
-gmove(Node *f, Node *t)
-{
- int ft, tt, a;
- Node nod, nod1;
- Prog *p1;
-
- ft = f->type->etype;
- tt = t->type->etype;
- if(debug['M'])
- print("gop: %O %O[%s],%O[%s]\n", OAS,
- f->op, tnames[ft], t->op, tnames[tt]);
- if(typefd[ft] && f->op == OCONST) {
- if(f->fconst == 0)
- gins(AFLDZ, Z, Z);
- else
- if(f->fconst == 1)
- gins(AFLD1, Z, Z);
- else
- gins(AFMOVD, f, &fregnode0);
- gmove(&fregnode0, t);
- return;
- }
-/*
- * load
- */
- if(f->op == ONAME || f->op == OINDREG ||
- f->op == OIND || f->op == OINDEX)
- switch(ft) {
- case TCHAR:
- a = AMOVBLSX;
- goto ld;
- case TUCHAR:
- a = AMOVBLZX;
- goto ld;
- case TSHORT:
- if(typefd[tt]) {
- gins(AFMOVW, f, &fregnode0);
- gmove(&fregnode0, t);
- return;
- }
- a = AMOVWLSX;
- goto ld;
- case TUSHORT:
- a = AMOVWLZX;
- goto ld;
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- if(typefd[tt]) {
- gins(AFMOVL, f, &fregnode0);
- gmove(&fregnode0, t);
- return;
- }
- a = AMOVL;
-
- ld:
- regalloc(&nod, f, t);
- nod.type = types[TLONG];
- gins(a, f, &nod);
- gmove(&nod, t);
- regfree(&nod);
- return;
-
- case TFLOAT:
- gins(AFMOVF, f, t);
- return;
- case TDOUBLE:
- gins(AFMOVD, f, t);
- return;
- case TVLONG:
- gins(AFMOVV, f, t);
- return;
- }
-
-/*
- * store
- */
- if(t->op == ONAME || t->op == OINDREG ||
- t->op == OIND || t->op == OINDEX)
- switch(tt) {
- case TCHAR:
- case TUCHAR:
- a = AMOVB; goto st;
- case TSHORT:
- case TUSHORT:
- a = AMOVW; goto st;
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- a = AMOVL; goto st;
-
- st:
- if(f->op == OCONST) {
- gins(a, f, t);
- return;
- }
- regalloc(&nod, t, f);
- gmove(f, &nod);
- gins(a, &nod, t);
- regfree(&nod);
- return;
-
- case TFLOAT:
- gins(AFMOVFP, f, t);
- return;
- case TDOUBLE:
- gins(AFMOVDP, f, t);
- return;
- case TVLONG:
- gins(AFMOVVP, f, t);
- return;
- }
-
-/*
- * convert
- */
- switch(CASE(ft,tt)) {
- default:
-/*
- * integer to integer
- ********
- a = AGOK; break;
-
- case CASE( TCHAR, TCHAR):
- case CASE( TUCHAR, TCHAR):
- case CASE( TSHORT, TCHAR):
- case CASE( TUSHORT,TCHAR):
- case CASE( TINT, TCHAR):
- case CASE( TUINT, TCHAR):
- case CASE( TLONG, TCHAR):
- case CASE( TULONG, TCHAR):
- case CASE( TIND, TCHAR):
-
- case CASE( TCHAR, TUCHAR):
- case CASE( TUCHAR, TUCHAR):
- case CASE( TSHORT, TUCHAR):
- case CASE( TUSHORT,TUCHAR):
- case CASE( TINT, TUCHAR):
- case CASE( TUINT, TUCHAR):
- case CASE( TLONG, TUCHAR):
- case CASE( TULONG, TUCHAR):
- case CASE( TIND, TUCHAR):
-
- case CASE( TSHORT, TSHORT):
- case CASE( TUSHORT,TSHORT):
- case CASE( TINT, TSHORT):
- case CASE( TUINT, TSHORT):
- case CASE( TLONG, TSHORT):
- case CASE( TULONG, TSHORT):
- case CASE( TIND, TSHORT):
-
- case CASE( TSHORT, TUSHORT):
- case CASE( TUSHORT,TUSHORT):
- case CASE( TINT, TUSHORT):
- case CASE( TUINT, TUSHORT):
- case CASE( TLONG, TUSHORT):
- case CASE( TULONG, TUSHORT):
- case CASE( TIND, TUSHORT):
-
- case CASE( TINT, TINT):
- case CASE( TUINT, TINT):
- case CASE( TLONG, TINT):
- case CASE( TULONG, TINT):
- case CASE( TIND, TINT):
-
- case CASE( TINT, TUINT):
- case CASE( TUINT, TUINT):
- case CASE( TLONG, TUINT):
- case CASE( TULONG, TUINT):
- case CASE( TIND, TUINT):
-
- case CASE( TINT, TLONG):
- case CASE( TUINT, TLONG):
- case CASE( TLONG, TLONG):
- case CASE( TULONG, TLONG):
- case CASE( TIND, TLONG):
-
- case CASE( TINT, TULONG):
- case CASE( TUINT, TULONG):
- case CASE( TLONG, TULONG):
- case CASE( TULONG, TULONG):
- case CASE( TIND, TULONG):
-
- case CASE( TINT, TIND):
- case CASE( TUINT, TIND):
- case CASE( TLONG, TIND):
- case CASE( TULONG, TIND):
- case CASE( TIND, TIND):
- *****/
- a = AMOVL;
- break;
-
- case CASE( TSHORT, TINT):
- case CASE( TSHORT, TUINT):
- case CASE( TSHORT, TLONG):
- case CASE( TSHORT, TULONG):
- case CASE( TSHORT, TIND):
- a = AMOVWLSX;
- if(f->op == OCONST) {
- f->vconst &= 0xffff;
- if(f->vconst & 0x8000)
- f->vconst |= 0xffff0000;
- a = AMOVL;
- }
- break;
-
- case CASE( TUSHORT,TINT):
- case CASE( TUSHORT,TUINT):
- case CASE( TUSHORT,TLONG):
- case CASE( TUSHORT,TULONG):
- case CASE( TUSHORT,TIND):
- a = AMOVWLZX;
- if(f->op == OCONST) {
- f->vconst &= 0xffff;
- a = AMOVL;
- }
- break;
-
- case CASE( TCHAR, TSHORT):
- case CASE( TCHAR, TUSHORT):
- case CASE( TCHAR, TINT):
- case CASE( TCHAR, TUINT):
- case CASE( TCHAR, TLONG):
- case CASE( TCHAR, TULONG):
- case CASE( TCHAR, TIND):
- a = AMOVBLSX;
- if(f->op == OCONST) {
- f->vconst &= 0xff;
- if(f->vconst & 0x80)
- f->vconst |= 0xffffff00;
- a = AMOVL;
- }
- break;
-
- case CASE( TUCHAR, TSHORT):
- case CASE( TUCHAR, TUSHORT):
- case CASE( TUCHAR, TINT):
- case CASE( TUCHAR, TUINT):
- case CASE( TUCHAR, TLONG):
- case CASE( TUCHAR, TULONG):
- case CASE( TUCHAR, TIND):
- a = AMOVBLZX;
- if(f->op == OCONST) {
- f->vconst &= 0xff;
- a = AMOVL;
- }
- break;
-
-/*
- * float to fix
- */
- case CASE( TFLOAT, TCHAR):
- case CASE( TFLOAT, TUCHAR):
- case CASE( TFLOAT, TSHORT):
- case CASE( TFLOAT, TUSHORT):
- case CASE( TFLOAT, TINT):
- case CASE( TFLOAT, TUINT):
- case CASE( TFLOAT, TLONG):
- case CASE( TFLOAT, TULONG):
- case CASE( TFLOAT, TIND):
-
- case CASE( TDOUBLE,TCHAR):
- case CASE( TDOUBLE,TUCHAR):
- case CASE( TDOUBLE,TSHORT):
- case CASE( TDOUBLE,TUSHORT):
- case CASE( TDOUBLE,TINT):
- case CASE( TDOUBLE,TUINT):
- case CASE( TDOUBLE,TLONG):
- case CASE( TDOUBLE,TULONG):
- case CASE( TDOUBLE,TIND):
-
- case CASE( TVLONG, TCHAR):
- case CASE( TVLONG, TUCHAR):
- case CASE( TVLONG, TSHORT):
- case CASE( TVLONG, TUSHORT):
- case CASE( TVLONG, TINT):
- case CASE( TVLONG, TUINT):
- case CASE( TVLONG, TLONG):
- case CASE( TVLONG, TULONG):
- case CASE( TVLONG, TIND):
- if(fproundflg) {
- regsalloc(&nod, &regnode);
- gins(AFMOVLP, f, &nod);
- gmove(&nod, t);
- return;
- }
- regsalloc(&nod, &regnode);
- regsalloc(&nod1, &regnode);
- gins(AFSTCW, Z, &nod1);
- nod1.xoffset += 2;
- gins(AMOVW, nodconst(0xf7f), &nod1);
- gins(AFLDCW, &nod1, Z);
- gins(AFMOVLP, f, &nod);
- nod1.xoffset -= 2;
- gins(AFLDCW, &nod1, Z);
- gmove(&nod, t);
- return;
-
-/*
- * ulong to float
- */
- case CASE( TULONG, TDOUBLE):
- case CASE( TULONG, TVLONG):
- case CASE( TULONG, TFLOAT):
- case CASE( TUINT, TDOUBLE):
- case CASE( TUINT, TVLONG):
- case CASE( TUINT, TFLOAT):
- regalloc(&nod, f, f);
- gmove(f, &nod);
- regsalloc(&nod1, &regnode);
- gmove(&nod, &nod1);
- gins(AFMOVL, &nod1, &fregnode0);
- gins(ACMPL, &nod, nodconst(0));
- gins(AJGE, Z, Z);
- p1 = p;
- gins(AFADDD, nodfconst(4294967296.), &fregnode0);
- patch(p1, pc);
- regfree(&nod);
- return;
-
-/*
- * fix to float
- */
- case CASE( TCHAR, TFLOAT):
- case CASE( TUCHAR, TFLOAT):
- case CASE( TSHORT, TFLOAT):
- case CASE( TUSHORT,TFLOAT):
- case CASE( TINT, TFLOAT):
- case CASE( TLONG, TFLOAT):
- case CASE( TIND, TFLOAT):
-
- case CASE( TCHAR, TDOUBLE):
- case CASE( TUCHAR, TDOUBLE):
- case CASE( TSHORT, TDOUBLE):
- case CASE( TUSHORT,TDOUBLE):
- case CASE( TINT, TDOUBLE):
- case CASE( TLONG, TDOUBLE):
- case CASE( TIND, TDOUBLE):
-
- case CASE( TCHAR, TVLONG):
- case CASE( TUCHAR, TVLONG):
- case CASE( TSHORT, TVLONG):
- case CASE( TUSHORT,TVLONG):
- case CASE( TINT, TVLONG):
- case CASE( TLONG, TVLONG):
- case CASE( TIND, TVLONG):
- regsalloc(&nod, &regnode);
- gmove(f, &nod);
- gins(AFMOVL, &nod, &fregnode0);
- return;
-
-/*
- * float to float
- */
- case CASE( TFLOAT, TFLOAT):
- case CASE( TDOUBLE,TFLOAT):
- case CASE( TVLONG, TFLOAT):
-
- case CASE( TFLOAT, TDOUBLE):
- case CASE( TDOUBLE,TDOUBLE):
- case CASE( TVLONG, TDOUBLE):
-
- case CASE( TFLOAT, TVLONG):
- case CASE( TDOUBLE,TVLONG):
- case CASE( TVLONG, TVLONG):
- a = AFMOVD; break;
- }
- if(a == AMOVL || a == AFMOVD)
- if(samaddr(f, t))
- return;
- gins(a, f, t);
-}
-
-void
-doindex(Node *n)
-{
- Node nod, nod1;
- int32 v;
-
-if(debug['Y'])
-prtree(n, "index");
-
-if(n->left->complex >= FNX)
-print("botch in doindex\n");
-
- regalloc(&nod, &regnode, Z);
- v = constnode.vconst;
- cgen(n->right, &nod);
- idx.ptr = D_NONE;
- if(n->left->op == OCONST)
- idx.ptr = D_CONST;
- else if(n->left->op == OREGISTER)
- idx.ptr = n->left->reg;
- else if(n->left->op != OADDR) {
- reg[D_BP]++; // can't be used as a base
- regalloc(&nod1, &regnode, Z);
- cgen(n->left, &nod1);
- idx.ptr = nod1.reg;
- regfree(&nod1);
- reg[D_BP]--;
- }
- idx.reg = nod.reg;
- regfree(&nod);
- constnode.vconst = v;
-}
-
-void
-gins(int a, Node *f, Node *t)
-{
-
- if(f != Z && f->op == OINDEX)
- doindex(f);
- if(t != Z && t->op == OINDEX)
- doindex(t);
- nextpc();
- p->as = a;
- if(f != Z)
- naddr(f, &p->from);
- if(t != Z)
- naddr(t, &p->to);
- if(debug['g'])
- print("%P\n", p);
-}
-
-void
-fgopcode(int o, Node *f, Node *t, int pop, int rev)
-{
- int a, et;
- Node nod;
-
- et = TLONG;
- if(f != Z && f->type != T)
- et = f->type->etype;
- if(!typefd[et]) {
- diag(f, "fop: integer %O", o);
- return;
- }
- if(debug['M']) {
- if(t != Z && t->type != T)
- print("gop: %O %O-%s Z\n", o, f->op, tnames[et]);
- else
- print("gop: %O %O-%s %O-%s\n", o,
- f->op, tnames[et], t->op, tnames[t->type->etype]);
- }
- a = AGOK;
- switch(o) {
-
- case OASADD:
- case OADD:
- if(et == TFLOAT)
- a = AFADDF;
- else
- if(et == TDOUBLE || et == TVLONG) {
- a = AFADDD;
- if(pop)
- a = AFADDDP;
- }
- break;
-
- case OASSUB:
- case OSUB:
- if(et == TFLOAT) {
- a = AFSUBF;
- if(rev)
- a = AFSUBRF;
- } else
- if(et == TDOUBLE || et == TVLONG) {
- a = AFSUBD;
- if(pop)
- a = AFSUBDP;
- if(rev) {
- a = AFSUBRD;
- if(pop)
- a = AFSUBRDP;
- }
- }
- break;
-
- case OASMUL:
- case OMUL:
- if(et == TFLOAT)
- a = AFMULF;
- else
- if(et == TDOUBLE || et == TVLONG) {
- a = AFMULD;
- if(pop)
- a = AFMULDP;
- }
- break;
-
- case OASMOD:
- case OMOD:
- case OASDIV:
- case ODIV:
- if(et == TFLOAT) {
- a = AFDIVF;
- if(rev)
- a = AFDIVRF;
- } else
- if(et == TDOUBLE || et == TVLONG) {
- a = AFDIVD;
- if(pop)
- a = AFDIVDP;
- if(rev) {
- a = AFDIVRD;
- if(pop)
- a = AFDIVRDP;
- }
- }
- break;
-
- case OEQ:
- case ONE:
- case OLT:
- case OLE:
- case OGE:
- case OGT:
- pop += rev;
- if(et == TFLOAT) {
- a = AFCOMF;
- if(pop) {
- a = AFCOMFP;
- if(pop > 1)
- a = AGOK;
- }
- } else
- if(et == TDOUBLE || et == TVLONG) {
- a = AFCOMF;
- if(pop) {
- a = AFCOMDP;
- if(pop > 1)
- a = AFCOMDPP;
- }
- }
- gins(a, f, t);
- regalloc(&nod, &regnode, Z);
- if(nod.reg != D_AX) {
- regfree(&nod);
- nod.reg = D_AX;
- gins(APUSHL, &nod, Z);
- gins(AWAIT, Z, Z);
- gins(AFSTSW, Z, &nod);
- gins(ASAHF, Z, Z);
- gins(APOPL, Z, &nod);
- } else {
- gins(AWAIT, Z, Z);
- gins(AFSTSW, Z, &nod);
- gins(ASAHF, Z, Z);
- regfree(&nod);
- }
- switch(o) {
- case OEQ: a = AJEQ; break;
- case ONE: a = AJNE; break;
- case OLT: a = AJCS; break;
- case OLE: a = AJLS; break;
- case OGE: a = AJCC; break;
- case OGT: a = AJHI; break;
- }
- gins(a, Z, Z);
- return;
- }
- if(a == AGOK)
- diag(Z, "bad in gopcode %O", o);
- gins(a, f, t);
-}
-
-void
-gopcode(int o, Type *ty, Node *f, Node *t)
-{
- int a, et;
-
- et = TLONG;
- if(ty != T)
- et = ty->etype;
- if(typefd[et] && o != OADDR && o != OFUNC) {
- diag(f, "gop: float %O", o);
- return;
- }
- if(debug['M']) {
- if(f != Z && f->type != T)
- print("gop: %O %O[%s],", o, f->op, tnames[et]);
- else
- print("gop: %O Z,", o);
- if(t != Z && t->type != T)
- print("%O[%s]\n", t->op, tnames[t->type->etype]);
- else
- print("Z\n");
- }
- a = AGOK;
- switch(o) {
- case OCOM:
- a = ANOTL;
- if(et == TCHAR || et == TUCHAR)
- a = ANOTB;
- if(et == TSHORT || et == TUSHORT)
- a = ANOTW;
- break;
-
- case ONEG:
- a = ANEGL;
- if(et == TCHAR || et == TUCHAR)
- a = ANEGB;
- if(et == TSHORT || et == TUSHORT)
- a = ANEGW;
- break;
-
- case OADDR:
- a = ALEAL;
- break;
-
- case OASADD:
- case OADD:
- a = AADDL;
- if(et == TCHAR || et == TUCHAR)
- a = AADDB;
- if(et == TSHORT || et == TUSHORT)
- a = AADDW;
- break;
-
- case OASSUB:
- case OSUB:
- a = ASUBL;
- if(et == TCHAR || et == TUCHAR)
- a = ASUBB;
- if(et == TSHORT || et == TUSHORT)
- a = ASUBW;
- break;
-
- case OASOR:
- case OOR:
- a = AORL;
- if(et == TCHAR || et == TUCHAR)
- a = AORB;
- if(et == TSHORT || et == TUSHORT)
- a = AORW;
- break;
-
- case OASAND:
- case OAND:
- a = AANDL;
- if(et == TCHAR || et == TUCHAR)
- a = AANDB;
- if(et == TSHORT || et == TUSHORT)
- a = AANDW;
- break;
-
- case OASXOR:
- case OXOR:
- a = AXORL;
- if(et == TCHAR || et == TUCHAR)
- a = AXORB;
- if(et == TSHORT || et == TUSHORT)
- a = AXORW;
- break;
-
- case OASLSHR:
- case OLSHR:
- a = ASHRL;
- if(et == TCHAR || et == TUCHAR)
- a = ASHRB;
- if(et == TSHORT || et == TUSHORT)
- a = ASHRW;
- break;
-
- case OASASHR:
- case OASHR:
- a = ASARL;
- if(et == TCHAR || et == TUCHAR)
- a = ASARB;
- if(et == TSHORT || et == TUSHORT)
- a = ASARW;
- break;
-
- case OASASHL:
- case OASHL:
- a = ASALL;
- if(et == TCHAR || et == TUCHAR)
- a = ASALB;
- if(et == TSHORT || et == TUSHORT)
- a = ASALW;
- break;
-
- case OROTL:
- a = AROLL;
- if(et == TCHAR || et == TUCHAR)
- a = AROLB;
- if(et == TSHORT || et == TUSHORT)
- a = AROLW;
- break;
-
- case OFUNC:
- a = ACALL;
- break;
-
- case OASMUL:
- case OMUL:
- if(f->op == OREGISTER && t != Z && isreg(t, D_AX) && reg[D_DX] == 0)
- t = Z;
- a = AIMULL;
- break;
-
- case OASMOD:
- case OMOD:
- case OASDIV:
- case ODIV:
- a = AIDIVL;
- break;
-
- case OASLMUL:
- case OLMUL:
- a = AMULL;
- break;
-
- case OASLMOD:
- case OLMOD:
- case OASLDIV:
- case OLDIV:
- a = ADIVL;
- break;
-
- case OEQ:
- case ONE:
- case OLT:
- case OLE:
- case OGE:
- case OGT:
- case OLO:
- case OLS:
- case OHS:
- case OHI:
- a = ACMPL;
- if(et == TCHAR || et == TUCHAR)
- a = ACMPB;
- if(et == TSHORT || et == TUSHORT)
- a = ACMPW;
- gins(a, f, t);
- switch(o) {
- case OEQ: a = AJEQ; break;
- case ONE: a = AJNE; break;
- case OLT: a = AJLT; break;
- case OLE: a = AJLE; break;
- case OGE: a = AJGE; break;
- case OGT: a = AJGT; break;
- case OLO: a = AJCS; break;
- case OLS: a = AJLS; break;
- case OHS: a = AJCC; break;
- case OHI: a = AJHI; break;
- }
- gins(a, Z, Z);
- return;
- }
- if(a == AGOK)
- diag(Z, "bad in gopcode %O", o);
- gins(a, f, t);
-}
-
-int
-samaddr(Node *f, Node *t)
-{
-
- if(f->op != t->op)
- return 0;
- switch(f->op) {
-
- case OREGISTER:
- if(f->reg != t->reg)
- break;
- return 1;
- }
- return 0;
-}
-
-void
-gbranch(int o)
-{
- int a;
-
- a = AGOK;
- switch(o) {
- case ORETURN:
- a = ARET;
- break;
- case OGOTO:
- a = AJMP;
- break;
- }
- nextpc();
- if(a == AGOK) {
- diag(Z, "bad in gbranch %O", o);
- nextpc();
- }
- p->as = a;
-}
-
-void
-patch(Prog *op, int32 pc)
-{
- op->to.offset = pc;
- op->to.type = D_BRANCH;
- op->to.u.branch = nil;
- op->pcond = nil;
-}
-
-void
-gpseudo(int a, Sym *s, Node *n)
-{
-
- nextpc();
- p->as = a;
- p->from.type = D_EXTERN;
- p->from.sym = linksym(s);
-
- switch(a) {
- case ATEXT:
- p->from.scale = textflag;
- textflag = 0;
- break;
- case AGLOBL:
- p->from.scale = s->dataflag;
- break;
- }
-
- if(s->class == CSTATIC)
- p->from.type = D_STATIC;
- naddr(n, &p->to);
- if(a == ADATA || a == AGLOBL)
- pc--;
-}
-
-void
-gpcdata(int index, int value)
-{
- Node n1;
-
- n1 = *nodconst(index);
- gins(APCDATA, &n1, nodconst(value));
-}
-
-void
-gprefetch(Node *n)
-{
- Node n1;
-
- if(strcmp(getgo386(), "sse2") != 0) // assume no prefetch on old machines
- return;
-
- regalloc(&n1, n, Z);
- gmove(n, &n1);
- n1.op = OINDREG;
- gins(APREFETCHNTA, &n1, Z);
- regfree(&n1);
-}
-
-int
-sconst(Node *n)
-{
- int32 v;
-
- if(n->op == OCONST && !typefd[n->type->etype]) {
- v = n->vconst;
- if(v >= -32766L && v < 32766L)
- return 1;
- }
- return 0;
-}
-
-int32
-exreg(Type *t)
-{
- int32 o;
-
- if(typechlp[t->etype]){
- if(exregoffset >= 32)
- return 0;
- o = exregoffset;
- exregoffset += 4;
- return o+1; // +1 to avoid 0 == failure; naddr case OEXREG will -1.
- }
-
- return 0;
-}
-
-schar ewidth[NTYPE] =
-{
- -1, /*[TXXX]*/
- SZ_CHAR, /*[TCHAR]*/
- SZ_CHAR, /*[TUCHAR]*/
- SZ_SHORT, /*[TSHORT]*/
- SZ_SHORT, /*[TUSHORT]*/
- SZ_INT, /*[TINT]*/
- SZ_INT, /*[TUINT]*/
- SZ_LONG, /*[TLONG]*/
- SZ_LONG, /*[TULONG]*/
- SZ_VLONG, /*[TVLONG]*/
- SZ_VLONG, /*[TUVLONG]*/
- SZ_FLOAT, /*[TFLOAT]*/
- SZ_DOUBLE, /*[TDOUBLE]*/
- SZ_IND, /*[TIND]*/
- 0, /*[TFUNC]*/
- -1, /*[TARRAY]*/
- 0, /*[TVOID]*/
- -1, /*[TSTRUCT]*/
- -1, /*[TUNION]*/
- SZ_INT, /*[TENUM]*/
-};
-int32 ncast[NTYPE] =
-{
- 0, /*[TXXX]*/
- BCHAR|BUCHAR, /*[TCHAR]*/
- BCHAR|BUCHAR, /*[TUCHAR]*/
- BSHORT|BUSHORT, /*[TSHORT]*/
- BSHORT|BUSHORT, /*[TUSHORT]*/
- BINT|BUINT|BLONG|BULONG|BIND, /*[TINT]*/
- BINT|BUINT|BLONG|BULONG|BIND, /*[TUINT]*/
- BINT|BUINT|BLONG|BULONG|BIND, /*[TLONG]*/
- BINT|BUINT|BLONG|BULONG|BIND, /*[TULONG]*/
- BVLONG|BUVLONG, /*[TVLONG]*/
- BVLONG|BUVLONG, /*[TUVLONG]*/
- BFLOAT, /*[TFLOAT]*/
- BDOUBLE, /*[TDOUBLE]*/
- BLONG|BULONG|BIND, /*[TIND]*/
- 0, /*[TFUNC]*/
- 0, /*[TARRAY]*/
- 0, /*[TVOID]*/
- BSTRUCT, /*[TSTRUCT]*/
- BUNION, /*[TUNION]*/
- 0, /*[TENUM]*/
-};
diff --git a/src/cmd/cc/Makefile b/src/cmd/cc/Makefile
deleted file mode 100644
index 34df31d6f..000000000
--- a/src/cmd/cc/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright 2012 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../Make.dist
-
-install: y.tab.h
-
-y.tab.h: cc.y
- LANG=C LANGUAGE=en_US.UTF8 bison -d -v -y cc.y
diff --git a/src/cmd/cc/acid.c b/src/cmd/cc/acid.c
deleted file mode 100644
index 23147e519..000000000
--- a/src/cmd/cc/acid.c
+++ /dev/null
@@ -1,344 +0,0 @@
-// Inferno utils/cc/acid.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/acid.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-
-static char *kwd[] =
-{
- "$adt", "$aggr", "$append", "$complex", "$defn",
- "$delete", "$do", "$else", "$eval", "$head", "$if",
- "$local", "$loop", "$return", "$tail", "$then",
- "$union", "$whatis", "$while",
-};
-
-char*
-amap(char *s)
-{
- int i, bot, top, new;
-
- bot = 0;
- top = bot + nelem(kwd) - 1;
- while(bot <= top){
- new = bot + (top - bot)/2;
- i = strcmp(kwd[new]+1, s);
- if(i == 0)
- return kwd[new];
-
- if(i < 0)
- bot = new + 1;
- else
- top = new - 1;
- }
- return s;
-}
-
-Sym*
-acidsue(Type *t)
-{
- int h;
- Sym *s;
-
- if(t != T)
- for(h=0; h<nelem(hash); h++)
- for(s = hash[h]; s != S; s = s->link)
- if(s->suetag && s->suetag->link == t)
- return s;
- return 0;
-}
-
-Sym*
-acidfun(Type *t)
-{
- int h;
- Sym *s;
-
- for(h=0; h<nelem(hash); h++)
- for(s = hash[h]; s != S; s = s->link)
- if(s->type == t)
- return s;
- return 0;
-}
-
-char acidchar[NTYPE];
-Init acidcinit[] =
-{
- TCHAR, 'C', 0,
- TUCHAR, 'b', 0,
- TSHORT, 'd', 0,
- TUSHORT, 'u', 0,
- TLONG, 'D', 0,
- TULONG, 'U', 0,
- TVLONG, 'V', 0,
- TUVLONG, 'W', 0,
- TFLOAT, 'f', 0,
- TDOUBLE, 'F', 0,
- TARRAY, 'a', 0,
- TIND, 'X', 0,
- -1, 0, 0,
-};
-
-static void
-acidinit(void)
-{
- Init *p;
-
- for(p=acidcinit; p->code >= 0; p++)
- acidchar[p->code] = p->value;
-
- acidchar[TINT] = acidchar[TLONG];
- acidchar[TUINT] = acidchar[TULONG];
- if(types[TINT]->width != types[TLONG]->width) {
- acidchar[TINT] = acidchar[TSHORT];
- acidchar[TUINT] = acidchar[TUSHORT];
- if(types[TINT]->width != types[TSHORT]->width)
- warn(Z, "acidmember int not long or short");
- }
- if(types[TIND]->width == types[TUVLONG]->width)
- acidchar[TIND] = 'Y';
-
-}
-
-void
-acidmember(Type *t, int32 off, int flag)
-{
- Sym *s, *s1;
- Type *l;
- static int acidcharinit = 0;
-
- if(acidcharinit == 0) {
- acidinit();
- acidcharinit = 1;
- }
- s = t->sym;
- switch(t->etype) {
- default:
- Bprint(&outbuf, " T%d\n", t->etype);
- break;
-
- case TIND:
- if(s == S)
- break;
- l = t->link;
- if(flag) {
- if(typesu[l->etype]) {
- s1 = acidsue(l->link);
- if(s1 != S) {
- Bprint(&outbuf, " 'A' %s %d %s;\n",
- amap(s1->name),
- t->offset+off, amap(s->name));
- break;
- }
- }
- } else {
- l = t->link;
- s1 = S;
- if(typesu[l->etype])
- s1 = acidsue(l->link);
- if(s1 != S) {
- Bprint(&outbuf,
- "\tprint(indent, \"%s\t(%s)\", addr.%s\\X, \"\\n\");\n",
- amap(s->name), amap(s1->name), amap(s->name));
- } else {
- Bprint(&outbuf,
- "\tprint(indent, \"%s\t\", addr.%s\\X, \"\\n\");\n",
- amap(s->name), amap(s->name));
- }
- break;
- }
-
- case TINT:
- case TUINT:
- case TCHAR:
- case TUCHAR:
- case TSHORT:
- case TUSHORT:
- case TLONG:
- case TULONG:
- case TVLONG:
- case TUVLONG:
- case TFLOAT:
- case TDOUBLE:
- case TARRAY:
- if(s == S)
- break;
- if(flag) {
- Bprint(&outbuf, " '%c' %d %s;\n",
- acidchar[t->etype], t->offset+off, amap(s->name));
- } else {
- Bprint(&outbuf, "\tprint(indent, \"%s\t\", addr.%s, \"\\n\");\n",
- amap(s->name), amap(s->name));
- }
- break;
-
- case TSTRUCT:
- case TUNION:
- s1 = acidsue(t->link);
- if(s1 == S)
- break;
- if(flag) {
- if(s == S) {
- Bprint(&outbuf, " {\n");
- for(l = t->link; l != T; l = l->down)
- acidmember(l, t->offset+off, flag);
- Bprint(&outbuf, " };\n");
- } else {
- Bprint(&outbuf, " %s %d %s;\n",
- amap(s1->name),
- t->offset+off, amap(s->name));
- }
- } else {
- if(s != S) {
- Bprint(&outbuf, "\tprint(indent, \"%s %s {\\n\");\n",
- amap(s1->name), amap(s->name));
- Bprint(&outbuf, "\tindent_%s(addr.%s, indent+\"\\t\");\n",
- amap(s1->name), amap(s->name));
- Bprint(&outbuf, "\tprint(indent, \"}\\n\");\n");
- } else {
- Bprint(&outbuf, "\tprint(indent, \"%s {\\n\");\n",
- amap(s1->name));
- Bprint(&outbuf, "\tindent_%s(addr+%d, indent+\"\\t\");\n",
- amap(s1->name), t->offset+off);
- Bprint(&outbuf, "\tprint(indent, \"}\\n\");\n");
- }
- }
- break;
- }
-}
-
-void
-acidtype(Type *t)
-{
- Sym *s;
- Type *l;
- Io *i;
- int n;
- char *an;
-
- if(!debug['a'])
- return;
- if(debug['a'] > 1) {
- n = 0;
- for(i=iostack; i; i=i->link)
- n++;
- if(n > 1)
- return;
- }
- s = acidsue(t->link);
- if(s == S)
- return;
- switch(t->etype) {
- default:
- Bprint(&outbuf, "T%d\n", t->etype);
- return;
-
- case TUNION:
- case TSTRUCT:
- if(debug['s'])
- goto asmstr;
- an = amap(s->name);
- Bprint(&outbuf, "sizeof%s = %d;\n", an, t->width);
- Bprint(&outbuf, "aggr %s\n{\n", an);
- for(l = t->link; l != T; l = l->down)
- acidmember(l, 0, 1);
- Bprint(&outbuf, "};\n\n");
-
- Bprint(&outbuf, "defn\n%s(addr) {\n\tindent_%s(addr, \"\\t\");\n}\n", an, an);
- Bprint(&outbuf, "defn\nindent_%s(addr, indent) {\n\tcomplex %s addr;\n", an, an);
- for(l = t->link; l != T; l = l->down)
- acidmember(l, 0, 0);
- Bprint(&outbuf, "};\n\n");
- break;
- asmstr:
- if(s == S)
- break;
- for(l = t->link; l != T; l = l->down)
- if(l->sym != S)
- Bprint(&outbuf, "#define\t%s.%s\t%d\n",
- s->name,
- l->sym->name,
- l->offset);
- break;
- }
-}
-
-void
-acidvar(Sym *s)
-{
- int n;
- Io *i;
- Type *t;
- Sym *s1, *s2;
-
- if(!debug['a'] || debug['s'])
- return;
- if(debug['a'] > 1) {
- n = 0;
- for(i=iostack; i; i=i->link)
- n++;
- if(n > 1)
- return;
- }
- t = s->type;
- while(t && t->etype == TIND)
- t = t->link;
- if(t == T)
- return;
- if(t->etype == TENUM) {
- Bprint(&outbuf, "%s = ", amap(s->name));
- if(!typefd[t->etype])
- Bprint(&outbuf, "%lld;\n", s->vconst);
- else
- Bprint(&outbuf, "%f\n;", s->fconst);
- return;
- }
- if(!typesu[t->etype])
- return;
- s1 = acidsue(t->link);
- if(s1 == S)
- return;
- switch(s->class) {
- case CAUTO:
- case CPARAM:
- s2 = acidfun(thisfn);
- if(s2)
- Bprint(&outbuf, "complex %s %s:%s;\n",
- amap(s1->name), amap(s2->name), amap(s->name));
- break;
-
- case CSTATIC:
- case CEXTERN:
- case CGLOBL:
- case CLOCAL:
- Bprint(&outbuf, "complex %s %s;\n",
- amap(s1->name), amap(s->name));
- break;
- }
-}
diff --git a/src/cmd/cc/bits.c b/src/cmd/cc/bits.c
deleted file mode 100644
index 4496d65e7..000000000
--- a/src/cmd/cc/bits.c
+++ /dev/null
@@ -1,120 +0,0 @@
-// Inferno utils/cc/bits.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/bits.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-
-Bits
-bor(Bits a, Bits b)
-{
- Bits c;
- int i;
-
- for(i=0; i<BITS; i++)
- c.b[i] = a.b[i] | b.b[i];
- return c;
-}
-
-Bits
-band(Bits a, Bits b)
-{
- Bits c;
- int i;
-
- for(i=0; i<BITS; i++)
- c.b[i] = a.b[i] & b.b[i];
- return c;
-}
-
-/*
-Bits
-bnot(Bits a)
-{
- Bits c;
- int i;
-
- for(i=0; i<BITS; i++)
- c.b[i] = ~a.b[i];
- return c;
-}
-*/
-
-int
-bany(Bits *a)
-{
- int i;
-
- for(i=0; i<BITS; i++)
- if(a->b[i])
- return 1;
- return 0;
-}
-
-int
-beq(Bits a, Bits b)
-{
- int i;
-
- for(i=0; i<BITS; i++)
- if(a.b[i] != b.b[i])
- return 0;
- return 1;
-}
-
-int
-bnum(Bits a)
-{
- int i;
- int32 b;
-
- for(i=0; i<BITS; i++)
- if(b = a.b[i])
- return 32*i + bitno(b);
- diag(Z, "bad in bnum");
- return 0;
-}
-
-Bits
-blsh(uint n)
-{
- Bits c;
-
- c = zbits;
- c.b[n/32] = 1L << (n%32);
- return c;
-}
-
-int
-bset(Bits a, uint n)
-{
- if(a.b[n/32] & (1L << (n%32)))
- return 1;
- return 0;
-}
diff --git a/src/cmd/cc/cc.h b/src/cmd/cc/cc.h
deleted file mode 100644
index 9530f5cf6..000000000
--- a/src/cmd/cc/cc.h
+++ /dev/null
@@ -1,835 +0,0 @@
-// Inferno utils/cc/cc.h
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/cc.h
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <libc.h>
-#include <bio.h>
-#include <link.h>
-
-#ifndef EXTERN
-#define EXTERN extern
-#endif
-
-#undef getc
-#undef ungetc
-#undef BUFSIZ
-
-#define getc ccgetc
-#define ungetc ccungetc
-
-typedef struct Node Node;
-typedef struct Sym Sym;
-typedef struct Type Type;
-typedef struct Funct Funct;
-typedef struct Decl Decl;
-typedef struct Io Io;
-typedef struct Term Term;
-typedef struct Init Init;
-typedef struct Bits Bits;
-typedef struct Bvec Bvec;
-typedef struct Dynimp Dynimp;
-typedef struct Dynexp Dynexp;
-typedef struct Var Var;
-
-typedef Rune TRune; /* target system type */
-
-#define BUFSIZ 8192
-#define NSYMB 500
-#define NHASH 1024
-#define STRINGSZ 200
-#define HISTSZ 20
-#define YYMAXDEPTH 500
-#define NTERM 10
-#define MAXALIGN 7
-
-#define SIGN(n) ((uvlong)1<<(n-1))
-#define MASK(n) (SIGN(n)|(SIGN(n)-1))
-
-#define BITS 5
-#define NVAR (BITS*sizeof(uint32)*8)
-struct Bits
-{
- uint32 b[BITS];
-};
-
-struct Bvec
-{
- int32 n; // number of bits
- uint32 b[];
-};
-
-struct Var
-{
- vlong offset;
- LSym* sym;
- char name;
- char etype;
-};
-
-struct Node
-{
- Node* left;
- Node* right;
- void* label;
- int32 pc;
- int reg;
- int32 xoffset;
- double fconst; /* fp constant */
- vlong vconst; /* non fp const */
- char* cstring; /* character string */
- TRune* rstring; /* rune string */
-
- Sym* sym;
- Type* type;
- int32 lineno;
- uchar op;
- uchar oldop;
- uchar xcast;
- uchar class;
- uchar etype;
- uchar complex;
- uchar addable;
- uchar scale;
- uchar garb;
-};
-#define Z ((Node*)0)
-
-struct Sym
-{
- Sym* link;
- LSym* lsym;
- Type* type;
- Type* suetag;
- Type* tenum;
- char* macro;
- int32 varlineno;
- int32 offset;
- vlong vconst;
- double fconst;
- Node* label;
- ushort lexical;
- char *name;
- ushort block;
- ushort sueblock;
- uchar class;
- uchar sym;
- uchar aused;
- uchar sig;
- uchar dataflag;
-};
-#define S ((Sym*)0)
-
-enum{
- SIGNONE = 0,
- SIGDONE = 1,
- SIGINTERN = 2,
-
- SIGNINTERN = 1729*325*1729,
-};
-
-struct Decl
-{
- Decl* link;
- Sym* sym;
- Type* type;
- int32 varlineno;
- int32 offset;
- short val;
- ushort block;
- uchar class;
- uchar aused;
-};
-#define D ((Decl*)0)
-
-struct Type
-{
- Sym* sym;
- Sym* tag;
- Funct* funct;
- Type* link;
- Type* down;
- int32 width;
- int32 offset;
- int32 lineno;
- uchar shift;
- uchar nbits;
- uchar etype;
- uchar garb;
- uchar align;
-};
-
-#define T ((Type*)0)
-#define NODECL ((void(*)(int, Type*, Sym*))0)
-
-struct Init /* general purpose initialization */
-{
- int code;
- uint32 value;
- char* s;
-};
-
-EXTERN struct
-{
- char* p;
- int c;
-} fi;
-
-struct Io
-{
- Io* link;
- char* p;
- char b[BUFSIZ];
- short c;
- short f;
-};
-#define I ((Io*)0)
-
-struct Term
-{
- vlong mult;
- Node *node;
-};
-
-enum
-{
- Axxx,
- Ael1,
- Ael2,
- Asu2,
- Aarg0,
- Aarg1,
- Aarg2,
- Aaut3,
- NALIGN,
-};
-
-enum
-{
- DMARK,
- DAUTO,
- DSUE,
- DLABEL,
-};
-enum
-{
- OXXX,
- OADD,
- OADDR,
- OAND,
- OANDAND,
- OARRAY,
- OAS,
- OASI,
- OASADD,
- OASAND,
- OASASHL,
- OASASHR,
- OASDIV,
- OASHL,
- OASHR,
- OASLDIV,
- OASLMOD,
- OASLMUL,
- OASLSHR,
- OASMOD,
- OASMUL,
- OASOR,
- OASSUB,
- OASXOR,
- OBIT,
- OBREAK,
- OCASE,
- OCAST,
- OCOMMA,
- OCOND,
- OCONST,
- OCONTINUE,
- ODIV,
- ODOT,
- ODOTDOT,
- ODWHILE,
- OENUM,
- OEQ,
- OEXREG,
- OFOR,
- OFUNC,
- OGE,
- OGOTO,
- OGT,
- OHI,
- OHS,
- OIF,
- OIND,
- OINDREG,
- OINIT,
- OLABEL,
- OLDIV,
- OLE,
- OLIST,
- OLMOD,
- OLMUL,
- OLO,
- OLS,
- OLSHR,
- OLT,
- OMOD,
- OMUL,
- ONAME,
- ONE,
- ONOT,
- OOR,
- OOROR,
- OPOSTDEC,
- OPOSTINC,
- OPREDEC,
- OPREINC,
- OPREFETCH,
- OPROTO,
- OREGISTER,
- ORETURN,
- OSET,
- OSIGN,
- OSIZE,
- OSTRING,
- OLSTRING,
- OSTRUCT,
- OSUB,
- OSWITCH,
- OUNION,
- OUSED,
- OWHILE,
- OXOR,
- ONEG,
- OCOM,
- OPOS,
- OELEM,
-
- OTST, /* used in some compilers */
- OINDEX,
- OFAS,
- OREGPAIR,
- OROTL,
-
- OEND
-};
-enum
-{
- TXXX,
- TCHAR,
- TUCHAR,
- TSHORT,
- TUSHORT,
- TINT,
- TUINT,
- TLONG,
- TULONG,
- TVLONG,
- TUVLONG,
- TFLOAT,
- TDOUBLE,
- TIND,
- TFUNC,
- TARRAY,
- TVOID,
- TSTRUCT,
- TUNION,
- TENUM,
- NTYPE,
-
- TAUTO = NTYPE,
- TEXTERN,
- TSTATIC,
- TTYPEDEF,
- TTYPESTR,
- TREGISTER,
- TCONSTNT,
- TVOLATILE,
- TUNSIGNED,
- TSIGNED,
- TDOT,
- TFILE,
- TOLD,
- NALLTYPES,
-
- /* adapt size of Rune to target system's size */
- TRUNE = sizeof(TRune)==4? TUINT: TUSHORT,
-};
-enum
-{
- CXXX,
- CAUTO,
- CEXTERN,
- CGLOBL,
- CSTATIC,
- CLOCAL,
- CTYPEDEF,
- CTYPESTR,
- CPARAM,
- CSELEM,
- CLABEL,
- CEXREG,
- NCTYPES,
-};
-enum
-{
- GXXX = 0,
- GCONSTNT = 1<<0,
- GVOLATILE = 1<<1,
- NGTYPES = 1<<2,
-
- GINCOMPLETE = 1<<2,
-};
-enum
-{
- BCHAR = 1L<<TCHAR,
- BUCHAR = 1L<<TUCHAR,
- BSHORT = 1L<<TSHORT,
- BUSHORT = 1L<<TUSHORT,
- BINT = 1L<<TINT,
- BUINT = 1L<<TUINT,
- BLONG = 1L<<TLONG,
- BULONG = 1L<<TULONG,
- BVLONG = 1L<<TVLONG,
- BUVLONG = 1L<<TUVLONG,
- BFLOAT = 1L<<TFLOAT,
- BDOUBLE = 1L<<TDOUBLE,
- BIND = 1L<<TIND,
- BFUNC = 1L<<TFUNC,
- BARRAY = 1L<<TARRAY,
- BVOID = 1L<<TVOID,
- BSTRUCT = 1L<<TSTRUCT,
- BUNION = 1L<<TUNION,
- BENUM = 1L<<TENUM,
- BFILE = 1L<<TFILE,
- BDOT = 1L<<TDOT,
- BCONSTNT = 1L<<TCONSTNT,
- BVOLATILE = 1L<<TVOLATILE,
- BUNSIGNED = 1L<<TUNSIGNED,
- BSIGNED = 1L<<TSIGNED,
- BAUTO = 1L<<TAUTO,
- BEXTERN = 1L<<TEXTERN,
- BSTATIC = 1L<<TSTATIC,
- BTYPEDEF = 1L<<TTYPEDEF,
- BTYPESTR = 1L<<TTYPESTR,
- BREGISTER = 1L<<TREGISTER,
-
- BINTEGER = BCHAR|BUCHAR|BSHORT|BUSHORT|BINT|BUINT|
- BLONG|BULONG|BVLONG|BUVLONG,
- BNUMBER = BINTEGER|BFLOAT|BDOUBLE,
-
-/* these can be overloaded with complex types */
-
- BCLASS = BAUTO|BEXTERN|BSTATIC|BTYPEDEF|BTYPESTR|BREGISTER,
- BGARB = BCONSTNT|BVOLATILE,
-};
-
-struct Funct
-{
- Sym* sym[OEND];
- Sym* castto[NTYPE];
- Sym* castfr[NTYPE];
-};
-
-EXTERN struct
-{
- Type* tenum; /* type of entire enum */
- Type* cenum; /* type of current enum run */
- vlong lastenum; /* value of current enum */
- double floatenum; /* value of current enum */
-} en;
-
-EXTERN int autobn;
-EXTERN int32 autoffset;
-EXTERN int blockno;
-EXTERN Decl* dclstack;
-EXTERN int debug[256];
-EXTERN int32 firstbit;
-EXTERN Sym* firstarg;
-EXTERN Type* firstargtype;
-EXTERN Decl* firstdcl;
-EXTERN int fperror;
-EXTERN Sym* hash[NHASH];
-EXTERN char* hunk;
-EXTERN char** include;
-EXTERN Io* iofree;
-EXTERN Io* ionext;
-EXTERN Io* iostack;
-EXTERN int32 lastbit;
-EXTERN char lastclass;
-EXTERN Type* lastdcl;
-EXTERN int32 lastfield;
-EXTERN Type* lasttype;
-EXTERN int32 lineno;
-EXTERN int32 nearln;
-EXTERN int nerrors;
-EXTERN int newflag;
-EXTERN int32 nhunk;
-EXTERN int ninclude;
-EXTERN Node* nodproto;
-EXTERN Node* nodcast;
-EXTERN int32 nsymb;
-EXTERN Biobuf outbuf;
-EXTERN Biobuf diagbuf;
-EXTERN char* outfile;
-EXTERN int peekc;
-EXTERN int32 stkoff;
-EXTERN Type* strf;
-EXTERN Type* strl;
-EXTERN char* symb;
-EXTERN Sym* symstring;
-EXTERN int taggen;
-EXTERN Type* tfield;
-EXTERN Type* tufield;
-extern int thechar;
-extern char* thestring;
-extern LinkArch* thelinkarch;
-EXTERN Type* thisfn;
-EXTERN int32 thunk;
-EXTERN Type* types[NALLTYPES];
-EXTERN Type* fntypes[NALLTYPES];
-EXTERN Node* initlist;
-EXTERN Term term[NTERM];
-EXTERN int nterm;
-EXTERN int packflg;
-EXTERN int fproundflg;
-EXTERN int textflag;
-EXTERN int dataflag;
-EXTERN int flag_largemodel;
-EXTERN int ncontin;
-EXTERN int canreach;
-EXTERN int warnreach;
-EXTERN int nacl;
-EXTERN Bits zbits;
-EXTERN Fmt pragcgobuf;
-EXTERN Biobuf bstdout;
-EXTERN Var var[NVAR];
-
-extern char *onames[], *tnames[], *gnames[];
-extern char *cnames[], *qnames[], *bnames[];
-extern uchar tab[NTYPE][NTYPE];
-extern uchar comrel[], invrel[], logrel[];
-extern int32 ncast[], tadd[], tand[];
-extern int32 targ[], tasadd[], tasign[], tcast[];
-extern int32 tdot[], tfunct[], tindir[], tmul[];
-extern int32 tnot[], trel[], tsub[];
-
-extern uchar typeaf[];
-extern uchar typefd[];
-extern uchar typei[];
-extern uchar typesu[];
-extern uchar typesuv[];
-extern uchar typeu[];
-extern uchar typev[];
-extern uchar typec[];
-extern uchar typeh[];
-extern uchar typeil[];
-extern uchar typeilp[];
-extern uchar typechl[];
-extern uchar typechlv[];
-extern uchar typechlvp[];
-extern uchar typechlp[];
-extern uchar typechlpfd[];
-
-EXTERN uchar* typeword;
-EXTERN uchar* typecmplx;
-EXTERN Link* ctxt;
-
-extern uint32 thash1;
-extern uint32 thash2;
-extern uint32 thash3;
-extern uint32 thash[];
-
-/*
- * compat.c/unix.c/windows.c
- */
-int systemtype(int);
-int pathchar(void);
-
-/*
- * parser
- */
-int yyparse(void);
-int mpatov(char*, vlong*);
-
-/*
- * lex.c
- */
-void* allocn(void*, int32, int32);
-void* alloc(int32);
-void ensuresymb(int32);
-void cinit(void);
-int compile(char*, char**, int);
-void errorexit(void);
-int filbuf(void);
-int getc(void);
-int32 getr(void);
-int getnsc(void);
-Sym* lookup(void);
-void main(int, char*[]);
-void newfile(char*, int);
-void newio(void);
-void pushio(void);
-int32 escchar(int32, int, int);
-Sym* slookup(char*);
-void syminit(Sym*);
-void unget(int);
-int32 yylex(void);
-int Lconv(Fmt*);
-int Tconv(Fmt*);
-int FNconv(Fmt*);
-int Oconv(Fmt*);
-int Qconv(Fmt*);
-int VBconv(Fmt*);
-int Bconv(Fmt*);
-void setinclude(char*);
-
-/*
- * mac.c
- */
-void dodefine(char*);
-void domacro(void);
-Sym* getsym(void);
-int32 getnsn(void);
-void macdef(void);
-void macprag(void);
-void macend(void);
-void macexpand(Sym*, char*);
-void macif(int);
-void macinc(void);
-void maclin(void);
-void macund(void);
-
-/*
- * dcl.c
- */
-Node* doinit(Sym*, Type*, int32, Node*);
-Type* tcopy(Type*);
-Node* init1(Sym*, Type*, int32, int);
-Node* newlist(Node*, Node*);
-void adecl(int, Type*, Sym*);
-int anyproto(Node*);
-void argmark(Node*, int);
-void dbgdecl(Sym*);
-Node* dcllabel(Sym*, int);
-Node* dodecl(void(*)(int, Type*, Sym*), int, Type*, Node*);
-Sym* mkstatic(Sym*);
-void doenum(Sym*, Node*);
-void snap(Type*);
-Type* dotag(Sym*, int, int);
-void edecl(int, Type*, Sym*);
-Type* fnproto(Node*);
-Type* fnproto1(Node*);
-void markdcl(void);
-Type* paramconv(Type*, int);
-void pdecl(int, Type*, Sym*);
-Decl* push(void);
-Decl* push1(Sym*);
-Node* revertdcl(void);
-int32 xround(int32, int);
-int rsametype(Type*, Type*, int, int);
-int sametype(Type*, Type*);
-uint32 sign(Sym*);
-uint32 signature(Type*);
-void sualign(Type*);
-void tmerge(Type*, Sym*);
-void walkparam(Node*, int);
-void xdecl(int, Type*, Sym*);
-Node* contig(Sym*, Node*, int32);
-
-/*
- * com.c
- */
-void ccom(Node*);
-void complex(Node*);
-int tcom(Node*);
-int tcoma(Node*, Node*, Type*, int);
-int tcomd(Node*);
-int tcomo(Node*, int);
-int tcomx(Node*);
-int tlvalue(Node*);
-void constas(Node*, Type*, Type*);
-
-/*
- * con.c
- */
-void acom(Node*);
-void acom1(vlong, Node*);
-void acom2(Node*, Type*);
-int acomcmp1(const void*, const void*);
-int acomcmp2(const void*, const void*);
-int addo(Node*);
-void evconst(Node*);
-
-/*
- * funct.c
- */
-int isfunct(Node*);
-void dclfunct(Type*, Sym*);
-
-/*
- * sub.c
- */
-void arith(Node*, int);
-int deadheads(Node*);
-Type* dotsearch(Sym*, Type*, Node*, int32*);
-int32 dotoffset(Type*, Type*, Node*);
-void gethunk(void);
-Node* invert(Node*);
-int bitno(int32);
-void makedot(Node*, Type*, int32);
-int mixedasop(Type*, Type*);
-Node* new(int, Node*, Node*);
-Node* new1(int, Node*, Node*);
-int nilcast(Type*, Type*);
-int nocast(Type*, Type*);
-void prtree(Node*, char*);
-void prtree1(Node*, int, int);
-void relcon(Node*, Node*);
-int relindex(int);
-int simpleg(int32);
-Type* garbt(Type*, int32);
-int simplec(int32);
-Type* simplet(int32);
-int stcompat(Node*, Type*, Type*, int32[]);
-int tcompat(Node*, Type*, Type*, int32[]);
-void tinit(void);
-Type* typ(int, Type*);
-Type* copytyp(Type*);
-void typeext(Type*, Node*);
-void typeext1(Type*, Node*);
-int side(Node*);
-int vconst(Node*);
-int xlog2(uvlong);
-int vlog(Node*);
-int topbit(uint32);
-void simplifyshift(Node*);
-int32 typebitor(int32, int32);
-void diag(Node*, char*, ...);
-void warn(Node*, char*, ...);
-void yyerror(char*, ...);
-void fatal(Node*, char*, ...);
-LSym* linksym(Sym*);
-
-/*
- * acid.c
- */
-void acidtype(Type*);
-void acidvar(Sym*);
-
-/*
- * godefs.c
- */
-int Uconv(Fmt*);
-void godeftype(Type*);
-void godefvar(Sym*);
-
-/*
- * bits.c
- */
-Bits bor(Bits, Bits);
-Bits band(Bits, Bits);
-Bits bnot(Bits);
-int bany(Bits*);
-int bnum(Bits);
-Bits blsh(uint);
-int beq(Bits, Bits);
-int bset(Bits, uint);
-
-/*
- * dpchk.c
- */
-void dpcheck(Node*);
-void arginit(void);
-void pragvararg(void);
-void pragpack(void);
-void pragfpround(void);
-void pragdataflag(void);
-void pragtextflag(void);
-void pragincomplete(void);
-void pragcgo(char*);
-
-/*
- * calls to machine depend part
- */
-void codgen(Node*, Node*);
-void gclean(void);
-void gextern(Sym*, Node*, int32, int32);
-void ginit(void);
-int32 outstring(char*, int32);
-int32 outlstring(TRune*, int32);
-void sextern(Sym*, Node*, int32, int32);
-void xcom(Node*);
-int32 exreg(Type*);
-int32 align(int32, Type*, int, int32*);
-int32 maxround(int32, int32);
-int hasdotdotdot(Type*);
-void linkarchinit(void);
-
-extern schar ewidth[];
-
-/*
- * com64
- */
-int com64(Node*);
-void com64init(void);
-void bool64(Node*);
-double convvtof(vlong);
-vlong convftov(double);
-double convftox(double, int);
-vlong convvtox(vlong, int);
-
-/*
- * machcap
- */
-int machcap(Node*);
-
-#pragma varargck argpos warn 2
-#pragma varargck argpos diag 2
-#pragma varargck argpos yyerror 1
-
-#pragma varargck type "B" Bits
-#pragma varargck type "F" Node*
-#pragma varargck type "L" int32
-#pragma varargck type "Q" int32
-#pragma varargck type "O" int
-#pragma varargck type "O" uint
-#pragma varargck type "T" Type*
-#pragma varargck type "U" char*
-#pragma varargck type "|" int
-
-enum
-{
- Plan9 = 1<<0,
- Unix = 1<<1,
- Windows = 1<<2,
-};
-int pathchar(void);
-int systemtype(int);
-void* alloc(int32 n);
-void* allocn(void*, int32, int32);
diff --git a/src/cmd/cc/cc.y b/src/cmd/cc/cc.y
deleted file mode 100644
index 8d7cb1472..000000000
--- a/src/cmd/cc/cc.y
+++ /dev/null
@@ -1,1220 +0,0 @@
-// Inferno utils/cc/cc.y
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/cc.y
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-%{
-#include <u.h>
-#include <stdio.h> /* if we don't, bison will, and cc.h re-#defines getc */
-#include "cc.h"
-%}
-%union {
- Node* node;
- Sym* sym;
- Type* type;
- struct
- {
- Type* t;
- uchar c;
- } tycl;
- struct
- {
- Type* t1;
- Type* t2;
- Type* t3;
- uchar c;
- } tyty;
- struct
- {
- char* s;
- int32 l;
- } sval;
- int32 lval;
- double dval;
- vlong vval;
-}
-%type <sym> ltag
-%type <lval> gctname gcname cname gname tname
-%type <lval> gctnlist gcnlist zgnlist
-%type <type> tlist sbody complex
-%type <tycl> types
-%type <node> zarglist arglist zcexpr
-%type <node> name block stmnt cexpr expr xuexpr pexpr
-%type <node> zelist elist adecl slist uexpr string lstring
-%type <node> xdecor xdecor2 labels label ulstmnt
-%type <node> adlist edecor tag qual qlist
-%type <node> abdecor abdecor1 abdecor2 abdecor3
-%type <node> zexpr lexpr init ilist forexpr
-
-%left ';'
-%left ','
-%right '=' LPE LME LMLE LDVE LMDE LRSHE LLSHE LANDE LXORE LORE
-%right '?' ':'
-%left LOROR
-%left LANDAND
-%left '|'
-%left '^'
-%left '&'
-%left LEQ LNE
-%left '<' '>' LLE LGE
-%left LLSH LRSH
-%left '+' '-'
-%left '*' '/' '%'
-%right LMM LPP LMG '.' '[' '('
-
-%token <sym> LNAME LTYPE
-%token <dval> LFCONST LDCONST
-%token <vval> LCONST LLCONST LUCONST LULCONST LVLCONST LUVLCONST
-%token <sval> LSTRING LLSTRING
-%token LAUTO LBREAK LCASE LCHAR LCONTINUE LDEFAULT LDO
-%token LDOUBLE LELSE LEXTERN LFLOAT LFOR LGOTO
-%token LIF LINT LLONG LPREFETCH LREGISTER LRETURN LSHORT LSIZEOF LUSED
-%token LSTATIC LSTRUCT LSWITCH LTYPEDEF LTYPESTR LUNION LUNSIGNED
-%token LWHILE LVOID LENUM LSIGNED LCONSTNT LVOLATILE LSET LSIGNOF
-%token LRESTRICT LINLINE
-%%
-prog:
-| prog xdecl
-
-/*
- * external declarator
- */
-xdecl:
- zctlist ';'
- {
- dodecl(xdecl, lastclass, lasttype, Z);
- }
-| zctlist xdlist ';'
-| zctlist xdecor
- {
- lastdcl = T;
- firstarg = S;
- dodecl(xdecl, lastclass, lasttype, $2);
- if(lastdcl == T || lastdcl->etype != TFUNC) {
- diag($2, "not a function");
- lastdcl = types[TFUNC];
- }
- thisfn = lastdcl;
- markdcl();
- firstdcl = dclstack;
- argmark($2, 0);
- }
- pdecl
- {
- argmark($2, 1);
- }
- block
- {
- Node *n;
-
- n = revertdcl();
- if(n)
- $6 = new(OLIST, n, $6);
- if(!debug['a'] && !debug['Z'])
- codgen($6, $2);
- }
-
-xdlist:
- xdecor
- {
- dodecl(xdecl, lastclass, lasttype, $1);
- }
-| xdecor
- {
- $1 = dodecl(xdecl, lastclass, lasttype, $1);
- }
- '=' init
- {
- doinit($1->sym, $1->type, 0L, $4);
- }
-| xdlist ',' xdlist
-
-xdecor:
- xdecor2
-| '*' zgnlist xdecor
- {
- $$ = new(OIND, $3, Z);
- $$->garb = simpleg($2);
- }
-
-xdecor2:
- tag
-| '(' xdecor ')'
- {
- $$ = $2;
- }
-| xdecor2 '(' zarglist ')'
- {
- $$ = new(OFUNC, $1, $3);
- }
-| xdecor2 '[' zexpr ']'
- {
- $$ = new(OARRAY, $1, $3);
- }
-
-/*
- * automatic declarator
- */
-adecl:
- ctlist ';'
- {
- $$ = dodecl(adecl, lastclass, lasttype, Z);
- }
-| ctlist adlist ';'
- {
- $$ = $2;
- }
-
-adlist:
- xdecor
- {
- dodecl(adecl, lastclass, lasttype, $1);
- $$ = Z;
- }
-| xdecor
- {
- $1 = dodecl(adecl, lastclass, lasttype, $1);
- }
- '=' init
- {
- int32 w;
-
- w = $1->sym->type->width;
- $$ = doinit($1->sym, $1->type, 0L, $4);
- $$ = contig($1->sym, $$, w);
- }
-| adlist ',' adlist
- {
- $$ = $1;
- if($3 != Z) {
- $$ = $3;
- if($1 != Z)
- $$ = new(OLIST, $1, $3);
- }
- }
-
-/*
- * parameter declarator
- */
-pdecl:
-| pdecl ctlist pdlist ';'
-
-pdlist:
- xdecor
- {
- dodecl(pdecl, lastclass, lasttype, $1);
- }
-| pdlist ',' pdlist
-
-/*
- * structure element declarator
- */
-edecl:
- tlist
- {
- lasttype = $1;
- }
- zedlist ';'
-| edecl tlist
- {
- lasttype = $2;
- }
- zedlist ';'
-
-zedlist: /* extension */
- {
- lastfield = 0;
- edecl(CXXX, lasttype, S);
- }
-| edlist
-
-edlist:
- edecor
- {
- dodecl(edecl, CXXX, lasttype, $1);
- }
-| edlist ',' edlist
-
-edecor:
- xdecor
- {
- lastbit = 0;
- firstbit = 1;
- }
-| tag ':' lexpr
- {
- $$ = new(OBIT, $1, $3);
- }
-| ':' lexpr
- {
- $$ = new(OBIT, Z, $2);
- }
-
-/*
- * abstract declarator
- */
-abdecor:
- {
- $$ = (Z);
- }
-| abdecor1
-
-abdecor1:
- '*' zgnlist
- {
- $$ = new(OIND, (Z), Z);
- $$->garb = simpleg($2);
- }
-| '*' zgnlist abdecor1
- {
- $$ = new(OIND, $3, Z);
- $$->garb = simpleg($2);
- }
-| abdecor2
-
-abdecor2:
- abdecor3
-| abdecor2 '(' zarglist ')'
- {
- $$ = new(OFUNC, $1, $3);
- }
-| abdecor2 '[' zexpr ']'
- {
- $$ = new(OARRAY, $1, $3);
- }
-
-abdecor3:
- '(' ')'
- {
- $$ = new(OFUNC, (Z), Z);
- }
-| '[' zexpr ']'
- {
- $$ = new(OARRAY, (Z), $2);
- }
-| '(' abdecor1 ')'
- {
- $$ = $2;
- }
-
-init:
- expr
-| '{' ilist '}'
- {
- $$ = new(OINIT, invert($2), Z);
- }
-
-qual:
- '[' lexpr ']'
- {
- $$ = new(OARRAY, $2, Z);
- }
-| '.' ltag
- {
- $$ = new(OELEM, Z, Z);
- $$->sym = $2;
- }
-| qual '='
-
-qlist:
- init ','
-| qlist init ','
- {
- $$ = new(OLIST, $1, $2);
- }
-| qual
-| qlist qual
- {
- $$ = new(OLIST, $1, $2);
- }
-
-ilist:
- qlist
-| init
-| qlist init
- {
- $$ = new(OLIST, $1, $2);
- }
-
-zarglist:
- {
- $$ = Z;
- }
-| arglist
- {
- $$ = invert($1);
- }
-
-
-arglist:
- name
-| tlist abdecor
- {
- $$ = new(OPROTO, $2, Z);
- $$->type = $1;
- }
-| tlist xdecor
- {
- $$ = new(OPROTO, $2, Z);
- $$->type = $1;
- }
-| '.' '.' '.'
- {
- $$ = new(ODOTDOT, Z, Z);
- }
-| arglist ',' arglist
- {
- $$ = new(OLIST, $1, $3);
- }
-
-block:
- '{' slist '}'
- {
- $$ = invert($2);
- // if($2 != Z)
- // $$ = new(OLIST, $2, $$);
- if($$ == Z)
- $$ = new(OLIST, Z, Z);
- }
-
-slist:
- {
- $$ = Z;
- }
-| slist adecl
- {
- $$ = new(OLIST, $1, $2);
- }
-| slist stmnt
- {
- $$ = new(OLIST, $1, $2);
- }
-
-labels:
- label
-| labels label
- {
- $$ = new(OLIST, $1, $2);
- }
-
-label:
- LCASE expr ':'
- {
- $$ = new(OCASE, $2, Z);
- }
-| LDEFAULT ':'
- {
- $$ = new(OCASE, Z, Z);
- }
-| LNAME ':'
- {
- $$ = new(OLABEL, dcllabel($1, 1), Z);
- }
-
-stmnt:
- error ';'
- {
- $$ = Z;
- }
-| ulstmnt
-| labels ulstmnt
- {
- $$ = new(OLIST, $1, $2);
- }
-
-forexpr:
- zcexpr
-| ctlist adlist
- {
- $$ = $2;
- }
-
-ulstmnt:
- zcexpr ';'
-| {
- markdcl();
- }
- block
- {
- $$ = revertdcl();
- if($$)
- $$ = new(OLIST, $$, $2);
- else
- $$ = $2;
- }
-| LIF '(' cexpr ')' stmnt
- {
- $$ = new(OIF, $3, new(OLIST, $5, Z));
- if($5 == Z)
- warn($3, "empty if body");
- }
-| LIF '(' cexpr ')' stmnt LELSE stmnt
- {
- $$ = new(OIF, $3, new(OLIST, $5, $7));
- if($5 == Z)
- warn($3, "empty if body");
- if($7 == Z)
- warn($3, "empty else body");
- }
-| { markdcl(); } LFOR '(' forexpr ';' zcexpr ';' zcexpr ')' stmnt
- {
- $$ = revertdcl();
- if($$){
- if($4)
- $4 = new(OLIST, $$, $4);
- else
- $4 = $$;
- }
- $$ = new(OFOR, new(OLIST, $6, new(OLIST, $4, $8)), $10);
- }
-| LWHILE '(' cexpr ')' stmnt
- {
- $$ = new(OWHILE, $3, $5);
- }
-| LDO stmnt LWHILE '(' cexpr ')' ';'
- {
- $$ = new(ODWHILE, $5, $2);
- }
-| LRETURN zcexpr ';'
- {
- $$ = new(ORETURN, $2, Z);
- $$->type = thisfn->link;
- }
-| LSWITCH '(' cexpr ')' stmnt
- {
- $$ = new(OCONST, Z, Z);
- $$->vconst = 0;
- $$->type = types[TINT];
- $3 = new(OSUB, $$, $3);
-
- $$ = new(OCONST, Z, Z);
- $$->vconst = 0;
- $$->type = types[TINT];
- $3 = new(OSUB, $$, $3);
-
- $$ = new(OSWITCH, $3, $5);
- }
-| LBREAK ';'
- {
- $$ = new(OBREAK, Z, Z);
- }
-| LCONTINUE ';'
- {
- $$ = new(OCONTINUE, Z, Z);
- }
-| LGOTO ltag ';'
- {
- $$ = new(OGOTO, dcllabel($2, 0), Z);
- }
-| LUSED '(' zelist ')' ';'
- {
- $$ = new(OUSED, $3, Z);
- }
-| LPREFETCH '(' zelist ')' ';'
- {
- $$ = new(OPREFETCH, $3, Z);
- }
-| LSET '(' zelist ')' ';'
- {
- $$ = new(OSET, $3, Z);
- }
-
-zcexpr:
- {
- $$ = Z;
- }
-| cexpr
-
-zexpr:
- {
- $$ = Z;
- }
-| lexpr
-
-lexpr:
- expr
- {
- $$ = new(OCAST, $1, Z);
- $$->type = types[TLONG];
- }
-
-cexpr:
- expr
-| cexpr ',' cexpr
- {
- $$ = new(OCOMMA, $1, $3);
- }
-
-expr:
- xuexpr
-| expr '*' expr
- {
- $$ = new(OMUL, $1, $3);
- }
-| expr '/' expr
- {
- $$ = new(ODIV, $1, $3);
- }
-| expr '%' expr
- {
- $$ = new(OMOD, $1, $3);
- }
-| expr '+' expr
- {
- $$ = new(OADD, $1, $3);
- }
-| expr '-' expr
- {
- $$ = new(OSUB, $1, $3);
- }
-| expr LRSH expr
- {
- $$ = new(OASHR, $1, $3);
- }
-| expr LLSH expr
- {
- $$ = new(OASHL, $1, $3);
- }
-| expr '<' expr
- {
- $$ = new(OLT, $1, $3);
- }
-| expr '>' expr
- {
- $$ = new(OGT, $1, $3);
- }
-| expr LLE expr
- {
- $$ = new(OLE, $1, $3);
- }
-| expr LGE expr
- {
- $$ = new(OGE, $1, $3);
- }
-| expr LEQ expr
- {
- $$ = new(OEQ, $1, $3);
- }
-| expr LNE expr
- {
- $$ = new(ONE, $1, $3);
- }
-| expr '&' expr
- {
- $$ = new(OAND, $1, $3);
- }
-| expr '^' expr
- {
- $$ = new(OXOR, $1, $3);
- }
-| expr '|' expr
- {
- $$ = new(OOR, $1, $3);
- }
-| expr LANDAND expr
- {
- $$ = new(OANDAND, $1, $3);
- }
-| expr LOROR expr
- {
- $$ = new(OOROR, $1, $3);
- }
-| expr '?' cexpr ':' expr
- {
- $$ = new(OCOND, $1, new(OLIST, $3, $5));
- }
-| expr '=' expr
- {
- $$ = new(OAS, $1, $3);
- }
-| expr LPE expr
- {
- $$ = new(OASADD, $1, $3);
- }
-| expr LME expr
- {
- $$ = new(OASSUB, $1, $3);
- }
-| expr LMLE expr
- {
- $$ = new(OASMUL, $1, $3);
- }
-| expr LDVE expr
- {
- $$ = new(OASDIV, $1, $3);
- }
-| expr LMDE expr
- {
- $$ = new(OASMOD, $1, $3);
- }
-| expr LLSHE expr
- {
- $$ = new(OASASHL, $1, $3);
- }
-| expr LRSHE expr
- {
- $$ = new(OASASHR, $1, $3);
- }
-| expr LANDE expr
- {
- $$ = new(OASAND, $1, $3);
- }
-| expr LXORE expr
- {
- $$ = new(OASXOR, $1, $3);
- }
-| expr LORE expr
- {
- $$ = new(OASOR, $1, $3);
- }
-
-xuexpr:
- uexpr
-| '(' tlist abdecor ')' xuexpr
- {
- $$ = new(OCAST, $5, Z);
- dodecl(NODECL, CXXX, $2, $3);
- $$->type = lastdcl;
- $$->xcast = 1;
- }
-| '(' tlist abdecor ')' '{' ilist '}' /* extension */
- {
- $$ = new(OSTRUCT, $6, Z);
- dodecl(NODECL, CXXX, $2, $3);
- $$->type = lastdcl;
- }
-
-uexpr:
- pexpr
-| '*' xuexpr
- {
- $$ = new(OIND, $2, Z);
- }
-| '&' xuexpr
- {
- $$ = new(OADDR, $2, Z);
- }
-| '+' xuexpr
- {
- $$ = new(OPOS, $2, Z);
- }
-| '-' xuexpr
- {
- $$ = new(ONEG, $2, Z);
- }
-| '!' xuexpr
- {
- $$ = new(ONOT, $2, Z);
- }
-| '~' xuexpr
- {
- $$ = new(OCOM, $2, Z);
- }
-| LPP xuexpr
- {
- $$ = new(OPREINC, $2, Z);
- }
-| LMM xuexpr
- {
- $$ = new(OPREDEC, $2, Z);
- }
-| LSIZEOF uexpr
- {
- $$ = new(OSIZE, $2, Z);
- }
-| LSIGNOF uexpr
- {
- $$ = new(OSIGN, $2, Z);
- }
-
-pexpr:
- '(' cexpr ')'
- {
- $$ = $2;
- }
-| LSIZEOF '(' tlist abdecor ')'
- {
- $$ = new(OSIZE, Z, Z);
- dodecl(NODECL, CXXX, $3, $4);
- $$->type = lastdcl;
- }
-| LSIGNOF '(' tlist abdecor ')'
- {
- $$ = new(OSIGN, Z, Z);
- dodecl(NODECL, CXXX, $3, $4);
- $$->type = lastdcl;
- }
-| pexpr '(' zelist ')'
- {
- $$ = new(OFUNC, $1, Z);
- if($1->op == ONAME)
- if($1->type == T)
- dodecl(xdecl, CXXX, types[TINT], $$);
- $$->right = invert($3);
- }
-| pexpr '[' cexpr ']'
- {
- $$ = new(OIND, new(OADD, $1, $3), Z);
- }
-| pexpr LMG ltag
- {
- $$ = new(ODOT, new(OIND, $1, Z), Z);
- $$->sym = $3;
- }
-| pexpr '.' ltag
- {
- $$ = new(ODOT, $1, Z);
- $$->sym = $3;
- }
-| pexpr LPP
- {
- $$ = new(OPOSTINC, $1, Z);
- }
-| pexpr LMM
- {
- $$ = new(OPOSTDEC, $1, Z);
- }
-| name
-| LCONST
- {
- $$ = new(OCONST, Z, Z);
- $$->type = types[TINT];
- $$->vconst = $1;
- $$->cstring = strdup(symb);
- }
-| LLCONST
- {
- $$ = new(OCONST, Z, Z);
- $$->type = types[TLONG];
- $$->vconst = $1;
- $$->cstring = strdup(symb);
- }
-| LUCONST
- {
- $$ = new(OCONST, Z, Z);
- $$->type = types[TUINT];
- $$->vconst = $1;
- $$->cstring = strdup(symb);
- }
-| LULCONST
- {
- $$ = new(OCONST, Z, Z);
- $$->type = types[TULONG];
- $$->vconst = $1;
- $$->cstring = strdup(symb);
- }
-| LDCONST
- {
- $$ = new(OCONST, Z, Z);
- $$->type = types[TDOUBLE];
- $$->fconst = $1;
- $$->cstring = strdup(symb);
- }
-| LFCONST
- {
- $$ = new(OCONST, Z, Z);
- $$->type = types[TFLOAT];
- $$->fconst = $1;
- $$->cstring = strdup(symb);
- }
-| LVLCONST
- {
- $$ = new(OCONST, Z, Z);
- $$->type = types[TVLONG];
- $$->vconst = $1;
- $$->cstring = strdup(symb);
- }
-| LUVLCONST
- {
- $$ = new(OCONST, Z, Z);
- $$->type = types[TUVLONG];
- $$->vconst = $1;
- $$->cstring = strdup(symb);
- }
-| string
-| lstring
-
-string:
- LSTRING
- {
- $$ = new(OSTRING, Z, Z);
- $$->type = typ(TARRAY, types[TCHAR]);
- $$->type->width = $1.l + 1;
- $$->cstring = $1.s;
- $$->sym = symstring;
- $$->etype = TARRAY;
- $$->class = CSTATIC;
- }
-| string LSTRING
- {
- char *s;
- int n;
-
- n = $1->type->width - 1;
- s = alloc(n+$2.l+MAXALIGN);
-
- memcpy(s, $1->cstring, n);
- memcpy(s+n, $2.s, $2.l);
- s[n+$2.l] = 0;
-
- $$ = $1;
- $$->type->width += $2.l;
- $$->cstring = s;
- }
-
-lstring:
- LLSTRING
- {
- $$ = new(OLSTRING, Z, Z);
- $$->type = typ(TARRAY, types[TRUNE]);
- $$->type->width = $1.l + sizeof(TRune);
- $$->rstring = (TRune*)$1.s;
- $$->sym = symstring;
- $$->etype = TARRAY;
- $$->class = CSTATIC;
- }
-| lstring LLSTRING
- {
- char *s;
- int n;
-
- n = $1->type->width - sizeof(TRune);
- s = alloc(n+$2.l+MAXALIGN);
-
- memcpy(s, $1->rstring, n);
- memcpy(s+n, $2.s, $2.l);
- *(TRune*)(s+n+$2.l) = 0;
-
- $$ = $1;
- $$->type->width += $2.l;
- $$->rstring = (TRune*)s;
- }
-
-zelist:
- {
- $$ = Z;
- }
-| elist
-
-elist:
- expr
-| elist ',' elist
- {
- $$ = new(OLIST, $1, $3);
- }
-
-sbody:
- '{'
- {
- $<tyty>$.t1 = strf;
- $<tyty>$.t2 = strl;
- $<tyty>$.t3 = lasttype;
- $<tyty>$.c = lastclass;
- strf = T;
- strl = T;
- lastbit = 0;
- firstbit = 1;
- lastclass = CXXX;
- lasttype = T;
- }
- edecl '}'
- {
- $$ = strf;
- strf = $<tyty>2.t1;
- strl = $<tyty>2.t2;
- lasttype = $<tyty>2.t3;
- lastclass = $<tyty>2.c;
- }
-
-zctlist:
- {
- lastclass = CXXX;
- lasttype = types[TINT];
- }
-| ctlist
-
-types:
- complex
- {
- $$.t = $1;
- $$.c = CXXX;
- }
-| tname
- {
- $$.t = simplet($1);
- $$.c = CXXX;
- }
-| gcnlist
- {
- $$.t = simplet($1);
- $$.c = simplec($1);
- $$.t = garbt($$.t, $1);
- }
-| complex gctnlist
- {
- $$.t = $1;
- $$.c = simplec($2);
- $$.t = garbt($$.t, $2);
- if($2 & ~BCLASS & ~BGARB)
- diag(Z, "duplicate types given: %T and %Q", $1, $2);
- }
-| tname gctnlist
- {
- $$.t = simplet(typebitor($1, $2));
- $$.c = simplec($2);
- $$.t = garbt($$.t, $2);
- }
-| gcnlist complex zgnlist
- {
- $$.t = $2;
- $$.c = simplec($1);
- $$.t = garbt($$.t, $1|$3);
- }
-| gcnlist tname
- {
- $$.t = simplet($2);
- $$.c = simplec($1);
- $$.t = garbt($$.t, $1);
- }
-| gcnlist tname gctnlist
- {
- $$.t = simplet(typebitor($2, $3));
- $$.c = simplec($1|$3);
- $$.t = garbt($$.t, $1|$3);
- }
-
-tlist:
- types
- {
- $$ = $1.t;
- if($1.c != CXXX)
- diag(Z, "illegal combination of class 4: %s", cnames[$1.c]);
- }
-
-ctlist:
- types
- {
- lasttype = $1.t;
- lastclass = $1.c;
- }
-
-complex:
- LSTRUCT ltag
- {
- dotag($2, TSTRUCT, 0);
- $$ = $2->suetag;
- }
-| LSTRUCT ltag
- {
- dotag($2, TSTRUCT, autobn);
- }
- sbody
- {
- $$ = $2->suetag;
- if($$->link != T)
- diag(Z, "redeclare tag: %s", $2->name);
- $$->link = $4;
- sualign($$);
- }
-| LSTRUCT sbody
- {
- diag(Z, "struct must have tag");
- taggen++;
- sprint(symb, "_%d_", taggen);
- $$ = dotag(lookup(), TSTRUCT, autobn);
- $$->link = $2;
- sualign($$);
- }
-| LUNION ltag
- {
- dotag($2, TUNION, 0);
- $$ = $2->suetag;
- }
-| LUNION ltag
- {
- dotag($2, TUNION, autobn);
- }
- sbody
- {
- $$ = $2->suetag;
- if($$->link != T)
- diag(Z, "redeclare tag: %s", $2->name);
- $$->link = $4;
- sualign($$);
- }
-| LUNION sbody
- {
- taggen++;
- sprint(symb, "_%d_", taggen);
- $$ = dotag(lookup(), TUNION, autobn);
- $$->link = $2;
- sualign($$);
- }
-| LENUM ltag
- {
- dotag($2, TENUM, 0);
- $$ = $2->suetag;
- if($$->link == T)
- $$->link = types[TINT];
- $$ = $$->link;
- }
-| LENUM ltag
- {
- dotag($2, TENUM, autobn);
- }
- '{'
- {
- en.tenum = T;
- en.cenum = T;
- }
- enum '}'
- {
- $$ = $2->suetag;
- if($$->link != T)
- diag(Z, "redeclare tag: %s", $2->name);
- if(en.tenum == T) {
- diag(Z, "enum type ambiguous: %s", $2->name);
- en.tenum = types[TINT];
- }
- $$->link = en.tenum;
- $$ = en.tenum;
- }
-| LENUM '{'
- {
- en.tenum = T;
- en.cenum = T;
- }
- enum '}'
- {
- $$ = en.tenum;
- }
-| LTYPE
- {
- $$ = tcopy($1->type);
- }
-
-gctnlist:
- gctname
-| gctnlist gctname
- {
- $$ = typebitor($1, $2);
- }
-
-zgnlist:
- {
- $$ = 0;
- }
-| zgnlist gname
- {
- $$ = typebitor($1, $2);
- }
-
-gctname:
- tname
-| gname
-| cname
-
-gcnlist:
- gcname
-| gcnlist gcname
- {
- $$ = typebitor($1, $2);
- }
-
-gcname:
- gname
-| cname
-
-enum:
- LNAME
- {
- doenum($1, Z);
- }
-| LNAME '=' expr
- {
- doenum($1, $3);
- }
-| enum ','
-| enum ',' enum
-
-tname: /* type words */
- LCHAR { $$ = BCHAR; }
-| LSHORT { $$ = BSHORT; }
-| LINT { $$ = BINT; }
-| LLONG { $$ = BLONG; }
-| LSIGNED { $$ = BSIGNED; }
-| LUNSIGNED { $$ = BUNSIGNED; }
-| LFLOAT { $$ = BFLOAT; }
-| LDOUBLE { $$ = BDOUBLE; }
-| LVOID { $$ = BVOID; }
-
-cname: /* class words */
- LAUTO { $$ = BAUTO; }
-| LSTATIC { $$ = BSTATIC; }
-| LEXTERN { $$ = BEXTERN; }
-| LTYPEDEF { $$ = BTYPEDEF; }
-| LTYPESTR { $$ = BTYPESTR; }
-| LREGISTER { $$ = BREGISTER; }
-| LINLINE { $$ = 0; }
-
-gname: /* garbage words */
- LCONSTNT { $$ = BCONSTNT; }
-| LVOLATILE { $$ = BVOLATILE; }
-| LRESTRICT { $$ = 0; }
-
-name:
- LNAME
- {
- $$ = new(ONAME, Z, Z);
- if($1->class == CLOCAL)
- $1 = mkstatic($1);
- $$->sym = $1;
- $$->type = $1->type;
- $$->etype = TVOID;
- if($$->type != T)
- $$->etype = $$->type->etype;
- $$->xoffset = $1->offset;
- $$->class = $1->class;
- $1->aused = 1;
- }
-tag:
- ltag
- {
- $$ = new(ONAME, Z, Z);
- $$->sym = $1;
- $$->type = $1->type;
- $$->etype = TVOID;
- if($$->type != T)
- $$->etype = $$->type->etype;
- $$->xoffset = $1->offset;
- $$->class = $1->class;
- }
-ltag:
- LNAME
-| LTYPE
-%%
diff --git a/src/cmd/cc/com.c b/src/cmd/cc/com.c
deleted file mode 100644
index 4886b73eb..000000000
--- a/src/cmd/cc/com.c
+++ /dev/null
@@ -1,1384 +0,0 @@
-// Inferno utils/cc/com.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/com.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-
-int compar(Node*, int);
-
-void
-complex(Node *n)
-{
-
- if(n == Z)
- return;
-
- nearln = n->lineno;
- if(debug['t'])
- if(n->op != OCONST)
- prtree(n, "pre complex");
- if(tcom(n))
- return;
- if(debug['t'])
- if(n->op != OCONST)
- prtree(n, "t complex");
- ccom(n);
- if(debug['t'])
- if(n->op != OCONST)
- prtree(n, "c complex");
- acom(n);
- if(debug['t'])
- if(n->op != OCONST)
- prtree(n, "a complex");
- xcom(n);
- if(debug['t'])
- if(n->op != OCONST)
- prtree(n, "x complex");
-}
-
-/*
- * evaluate types
- * evaluate lvalues (addable == 1)
- */
-enum
-{
- ADDROF = 1<<0,
- CASTOF = 1<<1,
- ADDROP = 1<<2,
-};
-
-int
-tcom(Node *n)
-{
-
- return tcomo(n, ADDROF);
-}
-
-int
-tcomo(Node *n, int f)
-{
- Node *l, *r;
- Type *t;
- int o;
- static TRune zer;
-
- if(n == Z) {
- diag(Z, "Z in tcom");
- errorexit();
- }
- n->addable = 0;
- l = n->left;
- r = n->right;
-
- switch(n->op) {
- default:
- diag(n, "unknown op in type complex: %O", n->op);
- goto bad;
-
- case ODOTDOT:
- /*
- * tcom has already been called on this subtree
- */
- *n = *n->left;
- if(n->type == T)
- goto bad;
- break;
-
- case OCAST:
- if(n->type == T)
- break;
- if(n->type->width == types[TLONG]->width) {
- if(tcomo(l, ADDROF|CASTOF))
- goto bad;
- } else
- if(tcom(l))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, l->type, n->type, tcast))
- goto bad;
- break;
-
- case ORETURN:
- if(l == Z) {
- if(n->type->etype != TVOID)
- diag(n, "null return of a typed function");
- break;
- }
- if(tcom(l))
- goto bad;
- typeext(n->type, l);
- if(tcompat(n, n->type, l->type, tasign))
- break;
- constas(n, n->type, l->type);
- if(!sametype(n->type, l->type)) {
- l = new1(OCAST, l, Z);
- l->type = n->type;
- n->left = l;
- }
- break;
-
- case OASI: /* same as as, but no test for const */
- n->op = OAS;
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
-
- typeext(l->type, r);
- if(tlvalue(l) || tcompat(n, l->type, r->type, tasign))
- goto bad;
- if(!sametype(l->type, r->type)) {
- r = new1(OCAST, r, Z);
- r->type = l->type;
- n->right = r;
- }
- n->type = l->type;
- break;
-
- case OAS:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(tlvalue(l))
- goto bad;
- if(isfunct(n))
- break;
- typeext(l->type, r);
- if(tcompat(n, l->type, r->type, tasign))
- goto bad;
- constas(n, l->type, r->type);
- if(!sametype(l->type, r->type)) {
- r = new1(OCAST, r, Z);
- r->type = l->type;
- n->right = r;
- }
- n->type = l->type;
- break;
-
- case OASADD:
- case OASSUB:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(tlvalue(l))
- goto bad;
- if(isfunct(n))
- break;
- typeext1(l->type, r);
- if(tcompat(n, l->type, r->type, tasadd))
- goto bad;
- constas(n, l->type, r->type);
- t = l->type;
- arith(n, 0);
- while(n->left->op == OCAST)
- n->left = n->left->left;
- if(!sametype(t, n->type) && !mixedasop(t, n->type)) {
- r = new1(OCAST, n->right, Z);
- r->type = t;
- n->right = r;
- n->type = t;
- }
- break;
-
- case OASMUL:
- case OASLMUL:
- case OASDIV:
- case OASLDIV:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(tlvalue(l))
- goto bad;
- if(isfunct(n))
- break;
- typeext1(l->type, r);
- if(tcompat(n, l->type, r->type, tmul))
- goto bad;
- constas(n, l->type, r->type);
- t = l->type;
- arith(n, 0);
- while(n->left->op == OCAST)
- n->left = n->left->left;
- if(!sametype(t, n->type) && !mixedasop(t, n->type)) {
- r = new1(OCAST, n->right, Z);
- r->type = t;
- n->right = r;
- n->type = t;
- }
- if(typeu[n->type->etype]) {
- if(n->op == OASDIV)
- n->op = OASLDIV;
- if(n->op == OASMUL)
- n->op = OASLMUL;
- }
- break;
-
- case OASLSHR:
- case OASASHR:
- case OASASHL:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(tlvalue(l))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, l->type, r->type, tand))
- goto bad;
- n->type = l->type;
- if(typeu[n->type->etype]) {
- if(n->op == OASASHR)
- n->op = OASLSHR;
- }
- break;
-
- case OASMOD:
- case OASLMOD:
- case OASOR:
- case OASAND:
- case OASXOR:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(tlvalue(l))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, l->type, r->type, tand))
- goto bad;
- t = l->type;
- arith(n, 0);
- while(n->left->op == OCAST)
- n->left = n->left->left;
- if(!sametype(t, n->type) && !mixedasop(t, n->type)) {
- r = new1(OCAST, n->right, Z);
- r->type = t;
- n->right = r;
- n->type = t;
- }
- if(typeu[n->type->etype]) {
- if(n->op == OASMOD)
- n->op = OASLMOD;
- }
- break;
-
- case OPREINC:
- case OPREDEC:
- case OPOSTINC:
- case OPOSTDEC:
- if(tcom(l))
- goto bad;
- if(tlvalue(l))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, l->type, types[TINT], tadd))
- goto bad;
- n->type = l->type;
- if(n->type->etype == TIND)
- if(n->type->link->width < 1)
- diag(n, "inc/dec of a void pointer");
- break;
-
- case OEQ:
- case ONE:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(isfunct(n))
- break;
- typeext(l->type, r);
- typeext(r->type, l);
- if(tcompat(n, l->type, r->type, trel))
- goto bad;
- arith(n, 0);
- n->type = types[TINT];
- break;
-
- case OLT:
- case OGE:
- case OGT:
- case OLE:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(isfunct(n))
- break;
- typeext1(l->type, r);
- typeext1(r->type, l);
- if(tcompat(n, l->type, r->type, trel))
- goto bad;
- arith(n, 0);
- if(typeu[n->type->etype])
- n->op = logrel[relindex(n->op)];
- n->type = types[TINT];
- break;
-
- case OCOND:
- o = tcom(l);
- o |= tcom(r->left);
- if(o | tcom(r->right))
- goto bad;
- if(r->right->type->etype == TIND && vconst(r->left) == 0) {
- r->left->type = r->right->type;
- r->left->vconst = 0;
- }
- if(r->left->type->etype == TIND && vconst(r->right) == 0) {
- r->right->type = r->left->type;
- r->right->vconst = 0;
- }
- if(sametype(r->right->type, r->left->type)) {
- r->type = r->right->type;
- n->type = r->type;
- break;
- }
- if(tcompat(r, r->left->type, r->right->type, trel))
- goto bad;
- arith(r, 0);
- n->type = r->type;
- break;
-
- case OADD:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, l->type, r->type, tadd))
- goto bad;
- arith(n, 1);
- break;
-
- case OSUB:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, l->type, r->type, tsub))
- goto bad;
- arith(n, 1);
- break;
-
- case OMUL:
- case OLMUL:
- case ODIV:
- case OLDIV:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, l->type, r->type, tmul))
- goto bad;
- arith(n, 1);
- if(typeu[n->type->etype]) {
- if(n->op == ODIV)
- n->op = OLDIV;
- if(n->op == OMUL)
- n->op = OLMUL;
- }
- break;
-
- case OLSHR:
- case OASHL:
- case OASHR:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, l->type, r->type, tand))
- goto bad;
- n->right = Z;
- arith(n, 1);
- n->right = new1(OCAST, r, Z);
- n->right->type = types[TINT];
- if(typeu[n->type->etype])
- if(n->op == OASHR)
- n->op = OLSHR;
- break;
-
- case OAND:
- case OOR:
- case OXOR:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, l->type, r->type, tand))
- goto bad;
- arith(n, 1);
- break;
-
- case OMOD:
- case OLMOD:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, l->type, r->type, tand))
- goto bad;
- arith(n, 1);
- if(typeu[n->type->etype])
- n->op = OLMOD;
- break;
-
- case OPOS:
- if(tcom(l))
- goto bad;
- if(isfunct(n))
- break;
-
- r = l;
- l = new(OCONST, Z, Z);
- l->vconst = 0;
- l->type = types[TINT];
- n->op = OADD;
- n->right = r;
- n->left = l;
-
- if(tcom(l))
- goto bad;
- if(tcompat(n, l->type, r->type, tsub))
- goto bad;
- arith(n, 1);
- break;
-
- case ONEG:
- if(tcom(l))
- goto bad;
- if(isfunct(n))
- break;
-
- if(!machcap(n)) {
- r = l;
- l = new(OCONST, Z, Z);
- l->vconst = 0;
- l->type = types[TINT];
- n->op = OSUB;
- n->right = r;
- n->left = l;
-
- if(tcom(l))
- goto bad;
- if(tcompat(n, l->type, r->type, tsub))
- goto bad;
- }
- arith(n, 1);
- break;
-
- case OCOM:
- if(tcom(l))
- goto bad;
- if(isfunct(n))
- break;
-
- if(!machcap(n)) {
- r = l;
- l = new(OCONST, Z, Z);
- l->vconst = -1;
- l->type = types[TINT];
- n->op = OXOR;
- n->right = r;
- n->left = l;
-
- if(tcom(l))
- goto bad;
- if(tcompat(n, l->type, r->type, tand))
- goto bad;
- }
- arith(n, 1);
- break;
-
- case ONOT:
- if(tcom(l))
- goto bad;
- if(isfunct(n))
- break;
- if(tcompat(n, T, l->type, tnot))
- goto bad;
- n->type = types[TINT];
- break;
-
- case OANDAND:
- case OOROR:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- if(tcompat(n, T, l->type, tnot) |
- tcompat(n, T, r->type, tnot))
- goto bad;
- n->type = types[TINT];
- break;
-
- case OCOMMA:
- o = tcom(l);
- if(o | tcom(r))
- goto bad;
- n->type = r->type;
- break;
-
-
- case OSIGN: /* extension signof(type) returns a hash */
- if(l != Z) {
- if(l->op != OSTRING && l->op != OLSTRING)
- if(tcomo(l, 0))
- goto bad;
- if(l->op == OBIT) {
- diag(n, "signof bitfield");
- goto bad;
- }
- n->type = l->type;
- }
- if(n->type == T)
- goto bad;
- if(n->type->width < 0) {
- diag(n, "signof undefined type");
- goto bad;
- }
- n->op = OCONST;
- n->left = Z;
- n->right = Z;
- n->vconst = convvtox(signature(n->type), TULONG);
- n->type = types[TULONG];
- break;
-
- case OSIZE:
- if(l != Z) {
- if(l->op != OSTRING && l->op != OLSTRING)
- if(tcomo(l, 0))
- goto bad;
- if(l->op == OBIT) {
- diag(n, "sizeof bitfield");
- goto bad;
- }
- n->type = l->type;
- }
- if(n->type == T)
- goto bad;
- if(n->type->width <= 0) {
- diag(n, "sizeof undefined type");
- goto bad;
- }
- if(n->type->etype == TFUNC) {
- diag(n, "sizeof function");
- goto bad;
- }
- n->op = OCONST;
- n->left = Z;
- n->right = Z;
- n->vconst = convvtox(n->type->width, TINT);
- n->type = types[TINT];
- break;
-
- case OFUNC:
- o = tcomo(l, 0);
- if(o)
- goto bad;
- if(l->type->etype == TIND && l->type->link->etype == TFUNC) {
- l = new1(OIND, l, Z);
- l->type = l->left->type->link;
- n->left = l;
- }
- if(tcompat(n, T, l->type, tfunct))
- goto bad;
- if(o | tcoma(l, r, l->type->down, 1))
- goto bad;
- n->type = l->type->link;
- if(!debug['B'])
- if(l->type->down == T || l->type->down->etype == TOLD) {
- nerrors--;
- diag(n, "function args not checked: %F", l);
- }
- dpcheck(n);
- break;
-
- case ONAME:
- if(n->type == T) {
- diag(n, "name not declared: %F", n);
- goto bad;
- }
- if(n->type->etype == TENUM) {
- n->op = OCONST;
- n->type = n->sym->tenum;
- if(!typefd[n->type->etype])
- n->vconst = n->sym->vconst;
- else
- n->fconst = n->sym->fconst;
- break;
- }
- n->addable = 1;
- if(n->class == CEXREG) {
- n->op = OREGISTER;
- // on 386 or amd64, "extern register" generates
- // memory references relative to the
- // gs or fs segment.
- if(thechar == '8' || thechar == '6') // [sic]
- n->op = OEXREG;
- n->reg = n->sym->offset;
- n->xoffset = 0;
- break;
- }
- break;
-
- case OLSTRING:
- if(n->type->link != types[TRUNE]) {
- o = outstring(0, 0);
- while(o & 3) {
- outlstring(&zer, sizeof(TRune));
- o = outlstring(0, 0);
- }
- }
- n->op = ONAME;
- n->xoffset = outlstring(n->rstring, n->type->width);
- n->addable = 1;
- break;
-
- case OSTRING:
- if(n->type->link != types[TCHAR]) {
- o = outstring(0, 0);
- while(o & 3) {
- outstring("", 1);
- o = outstring(0, 0);
- }
- }
- n->op = ONAME;
- n->xoffset = outstring(n->cstring, n->type->width);
- n->addable = 1;
- break;
-
- case OCONST:
- break;
-
- case ODOT:
- if(tcom(l))
- goto bad;
- if(tcompat(n, T, l->type, tdot))
- goto bad;
- if(tcomd(n))
- goto bad;
- break;
-
- case OADDR:
- if(tcomo(l, ADDROP))
- goto bad;
- if(tlvalue(l))
- goto bad;
- if(l->type->nbits) {
- diag(n, "address of a bit field");
- goto bad;
- }
- if(l->op == OREGISTER) {
- diag(n, "address of a register");
- goto bad;
- }
- n->type = typ(TIND, l->type);
- n->type->width = types[TIND]->width;
- break;
-
- case OIND:
- if(tcom(l))
- goto bad;
- if(tcompat(n, T, l->type, tindir))
- goto bad;
- n->type = l->type->link;
- n->addable = 1;
- break;
-
- case OSTRUCT:
- if(tcomx(n))
- goto bad;
- break;
- }
- t = n->type;
- if(t == T)
- goto bad;
- if(t->width < 0) {
- snap(t);
- if(t->width < 0) {
- if(typesu[t->etype] && t->tag)
- diag(n, "structure not fully declared %s", t->tag->name);
- else
- diag(n, "structure not fully declared");
- goto bad;
- }
- }
- if(typeaf[t->etype]) {
- if(f & ADDROF)
- goto addaddr;
- if(f & ADDROP)
- warn(n, "address of array/func ignored");
- }
- return 0;
-
-addaddr:
- if(tlvalue(n))
- goto bad;
- l = new1(OXXX, Z, Z);
- *l = *n;
- n->op = OADDR;
- if(l->type->etype == TARRAY)
- l->type = l->type->link;
- n->left = l;
- n->right = Z;
- n->addable = 0;
- n->type = typ(TIND, l->type);
- n->type->width = types[TIND]->width;
- return 0;
-
-bad:
- n->type = T;
- return 1;
-}
-
-int
-tcoma(Node *l, Node *n, Type *t, int f)
-{
- Node *n1;
- int o;
-
- if(t != T)
- if(t->etype == TOLD || t->etype == TDOT) /* .../old in prototype */
- t = T;
- if(n == Z) {
- if(t != T && !sametype(t, types[TVOID])) {
- diag(n, "not enough function arguments: %F", l);
- return 1;
- }
- return 0;
- }
- if(n->op == OLIST) {
- o = tcoma(l, n->left, t, 0);
- if(t != T) {
- t = t->down;
- if(t == T)
- t = types[TVOID];
- }
- return o | tcoma(l, n->right, t, 1);
- }
- if(f && t != T)
- tcoma(l, Z, t->down, 0);
- if(tcom(n) || tcompat(n, T, n->type, targ))
- return 1;
- if(sametype(t, types[TVOID])) {
- diag(n, "too many function arguments: %F", l);
- return 1;
- }
- if(t != T) {
- typeext(t, n);
- if(stcompat(nodproto, t, n->type, tasign)) {
- diag(l, "argument prototype mismatch \"%T\" for \"%T\": %F",
- n->type, t, l);
- return 1;
- }
-// switch(t->etype) {
-// case TCHAR:
-// case TSHORT:
-// t = types[TINT];
-// break;
-//
-// case TUCHAR:
-// case TUSHORT:
-// t = types[TUINT];
-// break;
-// }
- } else {
- switch(n->type->etype) {
- case TCHAR:
- case TSHORT:
- t = types[TINT];
- break;
-
- case TUCHAR:
- case TUSHORT:
- t = types[TUINT];
- break;
-
- case TFLOAT:
- t = types[TDOUBLE];
- }
- }
-
- if(t != T && !sametype(t, n->type)) {
- n1 = new1(OXXX, Z, Z);
- *n1 = *n;
- n->op = OCAST;
- n->left = n1;
- n->right = Z;
- n->type = t;
- n->addable = 0;
- }
- return 0;
-}
-
-int
-tcomd(Node *n)
-{
- Type *t;
- int32 o;
-
- o = 0;
- t = dotsearch(n->sym, n->left->type->link, n, &o);
- if(t == T) {
- diag(n, "not a member of struct/union: %F", n);
- return 1;
- }
- makedot(n, t, o);
- return 0;
-}
-
-int
-tcomx(Node *n)
-{
- Type *t;
- Node *l, *r, **ar, **al;
- int e;
-
- e = 0;
- if(n->type->etype != TSTRUCT) {
- diag(n, "constructor must be a structure");
- return 1;
- }
- l = invert(n->left);
- n->left = l;
- al = &n->left;
- for(t = n->type->link; t != T; t = t->down) {
- if(l == Z) {
- diag(n, "constructor list too short");
- return 1;
- }
- if(l->op == OLIST) {
- r = l->left;
- ar = &l->left;
- al = &l->right;
- l = l->right;
- } else {
- r = l;
- ar = al;
- l = Z;
- }
- if(tcom(r))
- e++;
- typeext(t, r);
- if(tcompat(n, t, r->type, tasign))
- e++;
- constas(n, t, r->type);
- if(!e && !sametype(t, r->type)) {
- r = new1(OCAST, r, Z);
- r->type = t;
- *ar = r;
- }
- }
- if(l != Z) {
- diag(n, "constructor list too long");
- return 1;
- }
- return e;
-}
-
-int
-tlvalue(Node *n)
-{
-
- if(!n->addable) {
- diag(n, "not an l-value");
- return 1;
- }
- return 0;
-}
-
-/*
- * general rewrite
- * (IND(ADDR x)) ==> x
- * (ADDR(IND x)) ==> x
- * remove some zero operands
- * remove no op casts
- * evaluate constants
- */
-void
-ccom(Node *n)
-{
- Node *l, *r;
- int t;
-
-loop:
- if(n == Z)
- return;
- l = n->left;
- r = n->right;
- switch(n->op) {
-
- case OAS:
- case OASXOR:
- case OASAND:
- case OASOR:
- case OASMOD:
- case OASLMOD:
- case OASLSHR:
- case OASASHR:
- case OASASHL:
- case OASDIV:
- case OASLDIV:
- case OASMUL:
- case OASLMUL:
- case OASSUB:
- case OASADD:
- ccom(l);
- ccom(r);
- if(n->op == OASLSHR || n->op == OASASHR || n->op == OASASHL)
- if(r->op == OCONST) {
- t = n->type->width * 8; /* bits per byte */
- if(r->vconst >= t || r->vconst < 0)
- warn(n, "stupid shift: %lld", r->vconst);
- }
- break;
-
- case OCAST:
- ccom(l);
- if(l->op == OCONST) {
- evconst(n);
- if(n->op == OCONST)
- break;
- }
- if(nocast(l->type, n->type)) {
- l->type = n->type;
- *n = *l;
- }
- break;
-
- case OCOND:
- ccom(l);
- ccom(r);
- if(l->op == OCONST)
- if(vconst(l) == 0)
- *n = *r->right;
- else
- *n = *r->left;
- break;
-
- case OREGISTER:
- case OINDREG:
- case OCONST:
- case ONAME:
- break;
-
- case OADDR:
- ccom(l);
- l->etype = TVOID;
- if(l->op == OIND) {
- l->left->type = n->type;
- *n = *l->left;
- break;
- }
- goto common;
-
- case OIND:
- ccom(l);
- if(l->op == OADDR) {
- l->left->type = n->type;
- *n = *l->left;
- break;
- }
- goto common;
-
- case OEQ:
- case ONE:
-
- case OLE:
- case OGE:
- case OLT:
- case OGT:
-
- case OLS:
- case OHS:
- case OLO:
- case OHI:
- ccom(l);
- ccom(r);
- if(compar(n, 0) || compar(n, 1))
- break;
- relcon(l, r);
- relcon(r, l);
- goto common;
-
- case OASHR:
- case OASHL:
- case OLSHR:
- ccom(l);
- if(vconst(l) == 0 && !side(r)) {
- *n = *l;
- break;
- }
- ccom(r);
- if(vconst(r) == 0) {
- *n = *l;
- break;
- }
- if(r->op == OCONST) {
- t = n->type->width * 8; /* bits per byte */
- if(r->vconst >= t || r->vconst <= -t)
- warn(n, "stupid shift: %lld", r->vconst);
- }
- goto common;
-
- case OMUL:
- case OLMUL:
- ccom(l);
- t = vconst(l);
- if(t == 0 && !side(r)) {
- *n = *l;
- break;
- }
- if(t == 1) {
- *n = *r;
- goto loop;
- }
- ccom(r);
- t = vconst(r);
- if(t == 0 && !side(l)) {
- *n = *r;
- break;
- }
- if(t == 1) {
- *n = *l;
- break;
- }
- goto common;
-
- case ODIV:
- case OLDIV:
- ccom(l);
- if(vconst(l) == 0 && !side(r)) {
- *n = *l;
- break;
- }
- ccom(r);
- t = vconst(r);
- if(t == 0) {
- diag(n, "divide check");
- *n = *r;
- break;
- }
- if(t == 1) {
- *n = *l;
- break;
- }
- goto common;
-
- case OSUB:
- ccom(r);
- if(r->op == OCONST) {
- if(typefd[r->type->etype]) {
- n->op = OADD;
- r->fconst = -r->fconst;
- goto loop;
- } else {
- n->op = OADD;
- r->vconst = -r->vconst;
- goto loop;
- }
- }
- ccom(l);
- goto common;
-
- case OXOR:
- case OOR:
- case OADD:
- ccom(l);
- if(vconst(l) == 0) {
- *n = *r;
- goto loop;
- }
- ccom(r);
- if(vconst(r) == 0) {
- *n = *l;
- break;
- }
- goto commute;
-
- case OAND:
- ccom(l);
- ccom(r);
- if(vconst(l) == 0 && !side(r)) {
- *n = *l;
- break;
- }
- if(vconst(r) == 0 && !side(l)) {
- *n = *r;
- break;
- }
-
- commute:
- /* look for commutative constant */
- if(r->op == OCONST) {
- if(l->op == n->op) {
- if(l->left->op == OCONST) {
- n->right = l->right;
- l->right = r;
- goto loop;
- }
- if(l->right->op == OCONST) {
- n->right = l->left;
- l->left = r;
- goto loop;
- }
- }
- }
- if(l->op == OCONST) {
- if(r->op == n->op) {
- if(r->left->op == OCONST) {
- n->left = r->right;
- r->right = l;
- goto loop;
- }
- if(r->right->op == OCONST) {
- n->left = r->left;
- r->left = l;
- goto loop;
- }
- }
- }
- goto common;
-
- case OANDAND:
- ccom(l);
- if(vconst(l) == 0) {
- *n = *l;
- break;
- }
- ccom(r);
- goto common;
-
- case OOROR:
- ccom(l);
- if(l->op == OCONST && l->vconst != 0) {
- *n = *l;
- n->vconst = 1;
- break;
- }
- ccom(r);
- goto common;
-
- default:
- if(l != Z)
- ccom(l);
- if(r != Z)
- ccom(r);
- common:
- if(l != Z)
- if(l->op != OCONST)
- break;
- if(r != Z)
- if(r->op != OCONST)
- break;
- evconst(n);
- }
-}
-
-/* OEQ, ONE, OLE, OLS, OLT, OLO, OGE, OHS, OGT, OHI */
-static char *cmps[12] =
-{
- "==", "!=", "<=", "<=", "<", "<", ">=", ">=", ">", ">",
-};
-
-/* 128-bit numbers */
-typedef struct Big Big;
-struct Big
-{
- vlong a;
- uvlong b;
-};
-static int
-cmp(Big x, Big y)
-{
- if(x.a != y.a){
- if(x.a < y.a)
- return -1;
- return 1;
- }
- if(x.b != y.b){
- if(x.b < y.b)
- return -1;
- return 1;
- }
- return 0;
-}
-static Big
-add(Big x, int y)
-{
- uvlong ob;
-
- ob = x.b;
- x.b += y;
- if(y > 0 && x.b < ob)
- x.a++;
- if(y < 0 && x.b > ob)
- x.a--;
- return x;
-}
-
-Big
-big(vlong a, uvlong b)
-{
- Big x;
-
- x.a = a;
- x.b = b;
- return x;
-}
-
-int
-compar(Node *n, int reverse)
-{
- Big lo, hi, x;
- int op;
- char xbuf[40], cmpbuf[50];
- Node *l, *r;
- Type *lt, *rt;
-
- /*
- * The point of this function is to diagnose comparisons
- * that can never be true or that look misleading because
- * of the `usual arithmetic conversions'. As an example
- * of the latter, if x is a ulong, then if(x <= -1) really means
- * if(x <= 0xFFFFFFFF), while if(x <= -1LL) really means
- * what it says (but 8c compiles it wrong anyway).
- */
-
- if(reverse){
- r = n->left;
- l = n->right;
- op = comrel[relindex(n->op)];
- }else{
- l = n->left;
- r = n->right;
- op = n->op;
- }
-
- /*
- * Skip over left casts to find out the original expression range.
- */
- while(l->op == OCAST)
- l = l->left;
- if(l->op == OCONST)
- return 0;
- lt = l->type;
- if(l->op == ONAME && l->sym->type){
- lt = l->sym->type;
- if(lt->etype == TARRAY)
- lt = lt->link;
- }
- if(lt == T)
- return 0;
- if(lt->etype == TXXX || lt->etype > TUVLONG)
- return 0;
-
- /*
- * Skip over the right casts to find the on-screen value.
- */
- if(r->op != OCONST)
- return 0;
- while(r->oldop == OCAST && !r->xcast)
- r = r->left;
- rt = r->type;
- if(rt == T)
- return 0;
-
- x.b = r->vconst;
- x.a = 0;
- if((rt->etype&1) && r->vconst < 0) /* signed negative */
- x.a = ~0ULL;
-
- if((lt->etype&1)==0){
- /* unsigned */
- lo = big(0, 0);
- if(lt->width == 8)
- hi = big(0, ~0ULL);
- else
- hi = big(0, (1ULL<<(l->type->width*8))-1);
- }else{
- lo = big(~0ULL, -(1ULL<<(l->type->width*8-1)));
- hi = big(0, (1ULL<<(l->type->width*8-1))-1);
- }
-
- switch(op){
- case OLT:
- case OLO:
- case OGE:
- case OHS:
- if(cmp(x, lo) <= 0)
- goto useless;
- if(cmp(x, add(hi, 1)) >= 0)
- goto useless;
- break;
- case OLE:
- case OLS:
- case OGT:
- case OHI:
- if(cmp(x, add(lo, -1)) <= 0)
- goto useless;
- if(cmp(x, hi) >= 0)
- goto useless;
- break;
- case OEQ:
- case ONE:
- /*
- * Don't warn about comparisons if the expression
- * is as wide as the value: the compiler-supplied casts
- * will make both outcomes possible.
- */
- if(lt->width >= rt->width && debug['w'] < 2)
- return 0;
- if(cmp(x, lo) < 0 || cmp(x, hi) > 0)
- goto useless;
- break;
- }
- return 0;
-
-useless:
- if((x.a==0 && x.b<=9) || (x.a==~0LL && x.b >= -9ULL))
- snprint(xbuf, sizeof xbuf, "%lld", x.b);
- else if(x.a == 0)
- snprint(xbuf, sizeof xbuf, "%#llux", x.b);
- else
- snprint(xbuf, sizeof xbuf, "%#llx", x.b);
- if(reverse)
- snprint(cmpbuf, sizeof cmpbuf, "%s %s %T",
- xbuf, cmps[relindex(n->op)], lt);
- else
- snprint(cmpbuf, sizeof cmpbuf, "%T %s %s",
- lt, cmps[relindex(n->op)], xbuf);
- warn(n, "useless or misleading comparison: %s", cmpbuf);
- return 0;
-}
-
diff --git a/src/cmd/cc/com64.c b/src/cmd/cc/com64.c
deleted file mode 100644
index f46fedc16..000000000
--- a/src/cmd/cc/com64.c
+++ /dev/null
@@ -1,644 +0,0 @@
-// Inferno utils/cc/com64.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/com64.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-
-/*
- * this is machine depend, but it is totally
- * common on all of the 64-bit symulating machines.
- */
-
-#define FNX 100 /* botch -- redefinition */
-
-Node* nodaddv;
-Node* nodsubv;
-Node* nodmulv;
-Node* noddivv;
-Node* noddivvu;
-Node* nodmodv;
-Node* nodmodvu;
-Node* nodlshv;
-Node* nodrshav;
-Node* nodrshlv;
-Node* nodandv;
-Node* nodorv;
-Node* nodxorv;
-Node* nodnegv;
-Node* nodcomv;
-
-Node* nodtestv;
-Node* nodeqv;
-Node* nodnev;
-Node* nodlev;
-Node* nodltv;
-Node* nodgev;
-Node* nodgtv;
-Node* nodhiv;
-Node* nodhsv;
-Node* nodlov;
-Node* nodlsv;
-
-Node* nodf2v;
-Node* nodd2v;
-Node* nodp2v;
-Node* nodsi2v;
-Node* nodui2v;
-Node* nodsl2v;
-Node* nodul2v;
-Node* nodsh2v;
-Node* noduh2v;
-Node* nodsc2v;
-Node* noduc2v;
-
-Node* nodv2f;
-Node* nodv2d;
-Node* nodv2ui;
-Node* nodv2si;
-Node* nodv2ul;
-Node* nodv2sl;
-Node* nodv2uh;
-Node* nodv2sh;
-Node* nodv2uc;
-Node* nodv2sc;
-
-Node* nodvpp;
-Node* nodppv;
-Node* nodvmm;
-Node* nodmmv;
-
-Node* nodvasop;
-
-char etconv[NALLTYPES]; /* for _vasop */
-Init initetconv[] =
-{
- TCHAR, 1, 0,
- TUCHAR, 2, 0,
- TSHORT, 3, 0,
- TUSHORT, 4, 0,
- TLONG, 5, 0,
- TULONG, 6, 0,
- TVLONG, 7, 0,
- TUVLONG, 8, 0,
- TINT, 9, 0,
- TUINT, 10, 0,
- -1, 0, 0,
-};
-
-Node*
-fvn(char *name, int type)
-{
- Node *n;
-
- n = new(ONAME, Z, Z);
- n->sym = slookup(name);
- n->sym->sig = SIGINTERN;
- if(fntypes[type] == 0)
- fntypes[type] = typ(TFUNC, types[type]);
- n->type = fntypes[type];
- n->etype = type;
- n->class = CGLOBL;
- n->addable = 10;
- n->complex = 0;
- return n;
-}
-
-void
-com64init(void)
-{
- Init *p;
-
- nodaddv = fvn("_addv", TVLONG);
- nodsubv = fvn("_subv", TVLONG);
- nodmulv = fvn("_mulv", TVLONG);
- noddivv = fvn("_divv", TVLONG);
- noddivvu = fvn("_divvu", TVLONG);
- nodmodv = fvn("_modv", TVLONG);
- nodmodvu = fvn("_modvu", TVLONG);
- nodlshv = fvn("_lshv", TVLONG);
- nodrshav = fvn("_rshav", TVLONG);
- nodrshlv = fvn("_rshlv", TVLONG);
- nodandv = fvn("_andv", TVLONG);
- nodorv = fvn("_orv", TVLONG);
- nodxorv = fvn("_xorv", TVLONG);
- nodnegv = fvn("_negv", TVLONG);
- nodcomv = fvn("_comv", TVLONG);
-
- nodtestv = fvn("_testv", TLONG);
- nodeqv = fvn("_eqv", TLONG);
- nodnev = fvn("_nev", TLONG);
- nodlev = fvn("_lev", TLONG);
- nodltv = fvn("_ltv", TLONG);
- nodgev = fvn("_gev", TLONG);
- nodgtv = fvn("_gtv", TLONG);
- nodhiv = fvn("_hiv", TLONG);
- nodhsv = fvn("_hsv", TLONG);
- nodlov = fvn("_lov", TLONG);
- nodlsv = fvn("_lsv", TLONG);
-
- nodf2v = fvn("_f2v", TVLONG);
- nodd2v = fvn("_d2v", TVLONG);
- nodp2v = fvn("_p2v", TVLONG);
- nodsi2v = fvn("_si2v", TVLONG);
- nodui2v = fvn("_ui2v", TVLONG);
- nodsl2v = fvn("_sl2v", TVLONG);
- nodul2v = fvn("_ul2v", TVLONG);
- nodsh2v = fvn("_sh2v", TVLONG);
- noduh2v = fvn("_uh2v", TVLONG);
- nodsc2v = fvn("_sc2v", TVLONG);
- noduc2v = fvn("_uc2v", TVLONG);
-
- nodv2f = fvn("_v2f", TFLOAT);
- nodv2d = fvn("_v2d", TDOUBLE);
- nodv2sl = fvn("_v2sl", TLONG);
- nodv2ul = fvn("_v2ul", TULONG);
- nodv2si = fvn("_v2si", TINT);
- nodv2ui = fvn("_v2ui", TUINT);
- nodv2sh = fvn("_v2sh", TSHORT);
- nodv2uh = fvn("_v2ul", TUSHORT);
- nodv2sc = fvn("_v2sc", TCHAR);
- nodv2uc = fvn("_v2uc", TUCHAR);
-
- nodvpp = fvn("_vpp", TVLONG);
- nodppv = fvn("_ppv", TVLONG);
- nodvmm = fvn("_vmm", TVLONG);
- nodmmv = fvn("_mmv", TVLONG);
-
- nodvasop = fvn("_vasop", TVLONG);
-
- for(p = initetconv; p->code >= 0; p++)
- etconv[p->code] = p->value;
-}
-
-int
-com64(Node *n)
-{
- Node *l, *r, *a, *t;
- int lv, rv;
-
- if(n->type == 0)
- return 0;
-
- l = n->left;
- r = n->right;
-
- lv = 0;
- if(l && l->type && typev[l->type->etype])
- lv = 1;
- rv = 0;
- if(r && r->type && typev[r->type->etype])
- rv = 1;
-
- if(lv) {
- switch(n->op) {
- case OEQ:
- a = nodeqv;
- goto setbool;
- case ONE:
- a = nodnev;
- goto setbool;
- case OLE:
- a = nodlev;
- goto setbool;
- case OLT:
- a = nodltv;
- goto setbool;
- case OGE:
- a = nodgev;
- goto setbool;
- case OGT:
- a = nodgtv;
- goto setbool;
- case OHI:
- a = nodhiv;
- goto setbool;
- case OHS:
- a = nodhsv;
- goto setbool;
- case OLO:
- a = nodlov;
- goto setbool;
- case OLS:
- a = nodlsv;
- goto setbool;
-
- case OANDAND:
- case OOROR:
- if(machcap(n))
- return 1;
-
- if(rv) {
- r = new(OFUNC, nodtestv, r);
- n->right = r;
- r->complex = FNX;
- r->op = OFUNC;
- r->type = types[TLONG];
- }
-
- case OCOND:
- case ONOT:
- if(machcap(n))
- return 1;
-
- l = new(OFUNC, nodtestv, l);
- n->left = l;
- l->complex = FNX;
- l->op = OFUNC;
- l->type = types[TLONG];
- n->complex = FNX;
- return 1;
- }
- }
-
- if(rv) {
- if(machcap(n))
- return 1;
- switch(n->op) {
- case OANDAND:
- case OOROR:
- r = new(OFUNC, nodtestv, r);
- n->right = r;
- r->complex = FNX;
- r->op = OFUNC;
- r->type = types[TLONG];
- return 1;
- }
- }
-
- if(typev[n->type->etype]) {
- if(machcap(n))
- return 1;
- switch(n->op) {
- default:
- diag(n, "unknown vlong %O", n->op);
- case OFUNC:
- n->complex = FNX;
- case ORETURN:
- case OAS:
- case OIND:
- return 1;
- case OADD:
- a = nodaddv;
- goto setbop;
- case OSUB:
- a = nodsubv;
- goto setbop;
- case OMUL:
- case OLMUL:
- a = nodmulv;
- goto setbop;
- case ODIV:
- a = noddivv;
- goto setbop;
- case OLDIV:
- a = noddivvu;
- goto setbop;
- case OMOD:
- a = nodmodv;
- goto setbop;
- case OLMOD:
- a = nodmodvu;
- goto setbop;
- case OASHL:
- a = nodlshv;
- goto setbop;
- case OASHR:
- a = nodrshav;
- goto setbop;
- case OLSHR:
- a = nodrshlv;
- goto setbop;
- case OAND:
- a = nodandv;
- goto setbop;
- case OOR:
- a = nodorv;
- goto setbop;
- case OXOR:
- a = nodxorv;
- goto setbop;
- case OPOSTINC:
- a = nodvpp;
- goto setvinc;
- case OPOSTDEC:
- a = nodvmm;
- goto setvinc;
- case OPREINC:
- a = nodppv;
- goto setvinc;
- case OPREDEC:
- a = nodmmv;
- goto setvinc;
- case ONEG:
- a = nodnegv;
- goto setfnx;
- case OCOM:
- a = nodcomv;
- goto setfnx;
- case OCAST:
- switch(l->type->etype) {
- case TCHAR:
- a = nodsc2v;
- goto setfnxl;
- case TUCHAR:
- a = noduc2v;
- goto setfnxl;
- case TSHORT:
- a = nodsh2v;
- goto setfnxl;
- case TUSHORT:
- a = noduh2v;
- goto setfnxl;
- case TINT:
- a = nodsi2v;
- goto setfnx;
- case TUINT:
- a = nodui2v;
- goto setfnx;
- case TLONG:
- a = nodsl2v;
- goto setfnx;
- case TULONG:
- a = nodul2v;
- goto setfnx;
- case TFLOAT:
- a = nodf2v;
- goto setfnx;
- case TDOUBLE:
- a = nodd2v;
- goto setfnx;
- case TIND:
- a = nodp2v;
- goto setfnx;
- }
- diag(n, "unknown %T->vlong cast", l->type);
- return 1;
- case OASADD:
- a = nodaddv;
- goto setasop;
- case OASSUB:
- a = nodsubv;
- goto setasop;
- case OASMUL:
- case OASLMUL:
- a = nodmulv;
- goto setasop;
- case OASDIV:
- a = noddivv;
- goto setasop;
- case OASLDIV:
- a = noddivvu;
- goto setasop;
- case OASMOD:
- a = nodmodv;
- goto setasop;
- case OASLMOD:
- a = nodmodvu;
- goto setasop;
- case OASASHL:
- a = nodlshv;
- goto setasop;
- case OASASHR:
- a = nodrshav;
- goto setasop;
- case OASLSHR:
- a = nodrshlv;
- goto setasop;
- case OASAND:
- a = nodandv;
- goto setasop;
- case OASOR:
- a = nodorv;
- goto setasop;
- case OASXOR:
- a = nodxorv;
- goto setasop;
- }
- }
-
- if(typefd[n->type->etype] && l && l->op == OFUNC) {
- switch(n->op) {
- case OASADD:
- case OASSUB:
- case OASMUL:
- case OASLMUL:
- case OASDIV:
- case OASLDIV:
- case OASMOD:
- case OASLMOD:
- case OASASHL:
- case OASASHR:
- case OASLSHR:
- case OASAND:
- case OASOR:
- case OASXOR:
- if(l->right && typev[l->right->etype]) {
- diag(n, "sorry float <asop> vlong not implemented\n");
- }
- }
- }
-
- if(n->op == OCAST) {
- if(l->type && typev[l->type->etype]) {
- if(machcap(n))
- return 1;
- switch(n->type->etype) {
- case TDOUBLE:
- a = nodv2d;
- goto setfnx;
- case TFLOAT:
- a = nodv2f;
- goto setfnx;
- case TLONG:
- a = nodv2sl;
- goto setfnx;
- case TULONG:
- a = nodv2ul;
- goto setfnx;
- case TINT:
- a = nodv2si;
- goto setfnx;
- case TUINT:
- a = nodv2ui;
- goto setfnx;
- case TSHORT:
- a = nodv2sh;
- goto setfnx;
- case TUSHORT:
- a = nodv2uh;
- goto setfnx;
- case TCHAR:
- a = nodv2sc;
- goto setfnx;
- case TUCHAR:
- a = nodv2uc;
- goto setfnx;
- case TIND: // small pun here
- a = nodv2ul;
- goto setfnx;
- }
- diag(n, "unknown vlong->%T cast", n->type);
- return 1;
- }
- }
-
- return 0;
-
-setbop:
- n->left = a;
- n->right = new(OLIST, l, r);
- n->complex = FNX;
- n->op = OFUNC;
- return 1;
-
-setfnxl:
- l = new(OCAST, l, 0);
- l->type = types[TLONG];
- l->complex = l->left->complex;
-
-setfnx:
- n->left = a;
- n->right = l;
- n->complex = FNX;
- n->op = OFUNC;
- return 1;
-
-setvinc:
- n->left = a;
- l = new(OADDR, l, Z);
- l->type = typ(TIND, l->left->type);
- n->right = new(OLIST, l, r);
- n->complex = FNX;
- n->op = OFUNC;
- return 1;
-
-setbool:
- if(machcap(n))
- return 1;
- n->left = a;
- n->right = new(OLIST, l, r);
- n->complex = FNX;
- n->op = OFUNC;
- n->type = types[TLONG];
- return 1;
-
-setasop:
- if(l->op == OFUNC) {
- l = l->right;
- goto setasop;
- }
-
- t = new(OCONST, 0, 0);
- t->vconst = etconv[l->type->etype];
- t->type = types[TLONG];
- t->addable = 20;
- r = new(OLIST, t, r);
-
- t = new(OADDR, a, 0);
- t->type = typ(TIND, a->type);
- r = new(OLIST, t, r);
-
- t = new(OADDR, l, 0);
- t->type = typ(TIND, l->type);
- r = new(OLIST, t, r);
-
- n->left = nodvasop;
- n->right = r;
- n->complex = FNX;
- n->op = OFUNC;
-
- return 1;
-}
-
-void
-bool64(Node *n)
-{
- Node *n1;
-
- if(machcap(Z))
- return;
- if(typev[n->type->etype]) {
- n1 = new(OXXX, 0, 0);
- *n1 = *n;
-
- n->right = n1;
- n->left = nodtestv;
- n->complex = FNX;
- n->addable = 0;
- n->op = OFUNC;
- n->type = types[TLONG];
- }
-}
-
-/*
- * more machine depend stuff.
- * this is common for 8,16,32,64 bit machines.
- * this is common for ieee machines.
- */
-double
-convvtof(vlong v)
-{
- double d;
-
- d = v; /* BOTCH */
- return d;
-}
-
-vlong
-convftov(double d)
-{
- vlong v;
-
-
- v = d; /* BOTCH */
- return v;
-}
-
-double
-convftox(double d, int et)
-{
-
- if(!typefd[et])
- diag(Z, "bad type in castftox %s", tnames[et]);
- return d;
-}
-
-vlong
-convvtox(vlong c, int et)
-{
- int n;
-
- n = 8 * ewidth[et];
- c &= MASK(n);
- if(!typeu[et])
- if(c & SIGN(n))
- c |= ~MASK(n);
- return c;
-}
diff --git a/src/cmd/cc/dcl.c b/src/cmd/cc/dcl.c
deleted file mode 100644
index 117508fd6..000000000
--- a/src/cmd/cc/dcl.c
+++ /dev/null
@@ -1,1707 +0,0 @@
-// Inferno utils/cc/dcl.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/dcl.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-#include "../ld/textflag.h"
-
-static int haspointers(Type*);
-
-Node*
-dodecl(void (*f)(int,Type*,Sym*), int c, Type *t, Node *n)
-{
- Sym *s;
- Node *n1;
- int32 v;
-
- nearln = lineno;
- lastfield = 0;
-
-loop:
- if(n != Z)
- switch(n->op) {
- default:
- diag(n, "unknown declarator: %O", n->op);
- break;
-
- case OARRAY:
- t = typ(TARRAY, t);
- t->width = 0;
- n1 = n->right;
- n = n->left;
- if(n1 != Z) {
- complex(n1);
- v = -1;
- if(n1->op == OCONST)
- v = n1->vconst;
- if(v <= 0) {
- diag(n, "array size must be a positive constant");
- v = 1;
- }
- t->width = v * t->link->width;
- }
- goto loop;
-
- case OIND:
- t = typ(TIND, t);
- t->garb = n->garb;
- n = n->left;
- goto loop;
-
- case OFUNC:
- t = typ(TFUNC, t);
- t->down = fnproto(n);
- n = n->left;
- goto loop;
-
- case OBIT:
- n1 = n->right;
- complex(n1);
- lastfield = -1;
- if(n1->op == OCONST)
- lastfield = n1->vconst;
- if(lastfield < 0) {
- diag(n, "field width must be non-negative constant");
- lastfield = 1;
- }
- if(lastfield == 0) {
- lastbit = 0;
- firstbit = 1;
- if(n->left != Z) {
- diag(n, "zero width named field");
- lastfield = 1;
- }
- }
- if(!typei[t->etype]) {
- diag(n, "field type must be int-like");
- t = types[TINT];
- lastfield = 1;
- }
- if(lastfield > tfield->width*8) {
- diag(n, "field width larger than field unit");
- lastfield = 1;
- }
- lastbit += lastfield;
- if(lastbit > tfield->width*8) {
- lastbit = lastfield;
- firstbit = 1;
- }
- n = n->left;
- goto loop;
-
- case ONAME:
- if(f == NODECL)
- break;
- s = n->sym;
- (*f)(c, t, s);
- if(s->class == CLOCAL)
- s = mkstatic(s);
- if(dataflag) {
- s->dataflag = dataflag;
- dataflag = 0;
- } else if(s->type != T && !haspointers(s->type))
- s->dataflag = NOPTR;
- firstbit = 0;
- n->sym = s;
- n->type = s->type;
- n->xoffset = s->offset;
- n->class = s->class;
- n->etype = TVOID;
- if(n->type != T)
- n->etype = n->type->etype;
- if(debug['d'])
- dbgdecl(s);
- acidvar(s);
- godefvar(s);
- s->varlineno = lineno;
- break;
- }
- lastdcl = t;
- return n;
-}
-
-Sym*
-mkstatic(Sym *s)
-{
- Sym *s1;
-
- if(s->class != CLOCAL)
- return s;
- snprint(symb, NSYMB, "%s$%d", s->name, s->block);
- s1 = lookup();
- if(s1->class != CSTATIC) {
- s1->type = s->type;
- s1->offset = s->offset;
- s1->block = s->block;
- s1->class = CSTATIC;
- }
- return s1;
-}
-
-/*
- * make a copy of a typedef
- * the problem is to split out incomplete
- * arrays so that it is in the variable
- * rather than the typedef.
- */
-Type*
-tcopy(Type *t)
-{
- Type *tl, *tx;
- int et;
-
- if(t == T)
- return t;
- et = t->etype;
- if(typesu[et])
- return t;
- tl = tcopy(t->link);
- if(tl != t->link ||
- (et == TARRAY && t->width == 0)) {
- tx = copytyp(t);
- tx->link = tl;
- return tx;
- }
- return t;
-}
-
-Node*
-doinit(Sym *s, Type *t, int32 o, Node *a)
-{
- Node *n;
-
- if(t == T)
- return Z;
- if(s->class == CEXTERN) {
- s->class = CGLOBL;
- if(debug['d'])
- dbgdecl(s);
- }
- if(debug['i']) {
- print("t = %T; o = %d; n = %s\n", t, o, s->name);
- prtree(a, "doinit value");
- }
-
-
- n = initlist;
- if(a->op == OINIT)
- a = a->left;
- initlist = a;
-
- a = init1(s, t, o, 0);
- if(initlist != Z)
- diag(initlist, "more initializers than structure: %s",
- s->name);
- initlist = n;
-
- return a;
-}
-
-/*
- * get next major operator,
- * dont advance initlist.
- */
-Node*
-peekinit(void)
-{
- Node *a;
-
- a = initlist;
-
-loop:
- if(a == Z)
- return a;
- if(a->op == OLIST) {
- a = a->left;
- goto loop;
- }
- return a;
-}
-
-/*
- * consume and return next element on
- * initlist. expand strings.
- */
-Node*
-nextinit(void)
-{
- Node *a, *b, *n;
-
- a = initlist;
- n = Z;
-
- if(a == Z)
- return a;
- if(a->op == OLIST) {
- n = a->right;
- a = a->left;
- }
- if(a->op == OUSED) {
- a = a->left;
- b = new(OCONST, Z, Z);
- b->type = a->type->link;
- if(a->op == OSTRING) {
- b->vconst = convvtox(*a->cstring, TCHAR);
- a->cstring++;
- }
- if(a->op == OLSTRING) {
- b->vconst = convvtox(*a->rstring, TRUNE);
- a->rstring++;
- }
- a->type->width -= b->type->width;
- if(a->type->width <= 0)
- initlist = n;
- return b;
- }
- initlist = n;
- return a;
-}
-
-int
-isstruct(Node *a, Type *t)
-{
- Node *n;
-
- switch(a->op) {
- case ODOTDOT:
- n = a->left;
- if(n && n->type && sametype(n->type, t))
- return 1;
- case OSTRING:
- case OLSTRING:
- case OCONST:
- case OINIT:
- case OELEM:
- return 0;
- }
-
- n = new(ODOTDOT, Z, Z);
- *n = *a;
-
- /*
- * ODOTDOT is a flag for tcom
- * a second tcom will not be performed
- */
- a->op = ODOTDOT;
- a->left = n;
- a->right = Z;
-
- if(tcom(n))
- return 0;
-
- if(sametype(n->type, t))
- return 1;
- return 0;
-}
-
-Node*
-init1(Sym *s, Type *t, int32 o, int exflag)
-{
- Node *a, *l, *r, nod;
- Type *t1;
- int32 e, w, so, mw;
-
- a = peekinit();
- if(a == Z)
- return Z;
-
- if(debug['i']) {
- print("t = %T; o = %d; n = %s\n", t, o, s->name);
- prtree(a, "init1 value");
- }
-
- if(exflag && a->op == OINIT)
- return doinit(s, t, o, nextinit());
-
- switch(t->etype) {
- default:
- diag(Z, "unknown type in initialization: %T to: %s", t, s->name);
- return Z;
-
- case TCHAR:
- case TUCHAR:
- case TINT:
- case TUINT:
- case TSHORT:
- case TUSHORT:
- case TLONG:
- case TULONG:
- case TVLONG:
- case TUVLONG:
- case TFLOAT:
- case TDOUBLE:
- case TIND:
- single:
- if(a->op == OARRAY || a->op == OELEM)
- return Z;
-
- a = nextinit();
- if(a == Z)
- return Z;
-
- if(t->nbits)
- diag(Z, "cannot initialize bitfields");
- if(s->class == CAUTO) {
- l = new(ONAME, Z, Z);
- l->sym = s;
- l->type = t;
- l->etype = TVOID;
- if(s->type)
- l->etype = s->type->etype;
- l->xoffset = s->offset + o;
- l->class = s->class;
-
- l = new(OASI, l, a);
- return l;
- }
-
- complex(a);
- if(a->type == T)
- return Z;
-
- if(a->op == OCONST) {
- if(vconst(a) && t->etype == TIND && a->type && a->type->etype != TIND){
- diag(a, "initialize pointer to an integer: %s", s->name);
- return Z;
- }
- if(!sametype(a->type, t)) {
- /* hoop jumping to save malloc */
- if(nodcast == Z)
- nodcast = new(OCAST, Z, Z);
- nod = *nodcast;
- nod.left = a;
- nod.type = t;
- nod.lineno = a->lineno;
- complex(&nod);
- if(nod.type)
- *a = nod;
- }
- if(a->op != OCONST) {
- diag(a, "initializer is not a constant: %s",
- s->name);
- return Z;
- }
- if(vconst(a) == 0)
- return Z;
- goto gext;
- }
- if(t->etype == TIND) {
- while(a->op == OCAST) {
- warn(a, "CAST in initialization ignored");
- a = a->left;
- }
- if(!sametype(t, a->type)) {
- diag(a, "initialization of incompatible pointers: %s\n%T and %T",
- s->name, t, a->type);
- }
- if(a->op == OADDR)
- a = a->left;
- goto gext;
- }
-
- while(a->op == OCAST)
- a = a->left;
- if(a->op == OADDR) {
- warn(a, "initialize pointer to an integer: %s", s->name);
- a = a->left;
- goto gext;
- }
- diag(a, "initializer is not a constant: %s", s->name);
- return Z;
-
- gext:
- gextern(s, a, o, t->width);
-
- return Z;
-
- case TARRAY:
- w = t->link->width;
- if(a->op == OSTRING || a->op == OLSTRING)
- if(typei[t->link->etype]) {
- /*
- * get rid of null if sizes match exactly
- */
- a = nextinit();
- mw = t->width/w;
- so = a->type->width/a->type->link->width;
- if(mw && so > mw) {
- if(so != mw+1)
- diag(a, "string initialization larger than array");
- a->type->width -= a->type->link->width;
- }
-
- /*
- * arrange strings to be expanded
- * inside OINIT braces.
- */
- a = new(OUSED, a, Z);
- return doinit(s, t, o, a);
- }
-
- mw = -w;
- l = Z;
- for(e=0;;) {
- /*
- * peek ahead for element initializer
- */
- a = peekinit();
- if(a == Z)
- break;
- if(a->op == OELEM && t->link->etype != TSTRUCT)
- break;
- if(a->op == OARRAY) {
- if(e && exflag)
- break;
- a = nextinit();
- r = a->left;
- complex(r);
- if(r->op != OCONST) {
- diag(r, "initializer subscript must be constant");
- return Z;
- }
- e = r->vconst;
- if(t->width != 0)
- if(e < 0 || e*w >= t->width) {
- diag(a, "initialization index out of range: %d", e);
- continue;
- }
- }
-
- so = e*w;
- if(so > mw)
- mw = so;
- if(t->width != 0)
- if(mw >= t->width)
- break;
- r = init1(s, t->link, o+so, 1);
- l = newlist(l, r);
- e++;
- }
- if(t->width == 0)
- t->width = mw+w;
- return l;
-
- case TUNION:
- case TSTRUCT:
- /*
- * peek ahead to find type of rhs.
- * if its a structure, then treat
- * this element as a variable
- * rather than an aggregate.
- */
- if(isstruct(a, t))
- goto single;
-
- if(t->width <= 0) {
- diag(Z, "incomplete structure: %s", s->name);
- return Z;
- }
- l = Z;
-
- again:
- for(t1 = t->link; t1 != T; t1 = t1->down) {
- if(a->op == OARRAY && t1->etype != TARRAY)
- break;
- if(a->op == OELEM) {
- if(t1->sym != a->sym)
- continue;
- nextinit();
- }
- r = init1(s, t1, o+t1->offset, 1);
- l = newlist(l, r);
- a = peekinit();
- if(a == Z)
- break;
- if(a->op == OELEM)
- goto again;
- }
- if(a && a->op == OELEM)
- diag(a, "structure element not found %F", a);
- return l;
- }
-}
-
-Node*
-newlist(Node *l, Node *r)
-{
- if(r == Z)
- return l;
- if(l == Z)
- return r;
- return new(OLIST, l, r);
-}
-
-static int
-haspointers(Type *t)
-{
- Type *fld;
-
- switch(t->etype) {
- case TSTRUCT:
- for(fld = t->link; fld != T; fld = fld->down) {
- if(haspointers(fld))
- return 1;
- }
- return 0;
- case TARRAY:
- return haspointers(t->link);
- case TIND:
- return t->link->etype != TFUNC;
- default:
- return 0;
- }
-}
-
-void
-sualign(Type *t)
-{
- Type *l;
- int32 o, w, maxal;
-
- o = 0;
- maxal = 0;
- switch(t->etype) {
-
- case TSTRUCT:
- t->offset = 0;
- w = 0;
- for(l = t->link; l != T; l = l->down) {
- if(l->nbits) {
- if(l->shift <= 0) {
- l->shift = -l->shift;
- w = xround(w, tfield->width);
- o = w;
- w += tfield->width;
- }
- l->offset = o;
- } else {
- if(l->width <= 0)
- if(l->down != T)
- if(l->sym)
- diag(Z, "incomplete structure element: %s",
- l->sym->name);
- else
- diag(Z, "incomplete structure element");
- w = align(w, l, Ael1, &maxal);
- l->offset = w;
- w = align(w, l, Ael2, &maxal);
- }
- }
- w = align(w, t, Asu2, &maxal);
- t->width = w;
- t->align = maxal;
- acidtype(t);
- godeftype(t);
- return;
-
- case TUNION:
- t->offset = 0;
- w = 0;
- for(l = t->link; l != T; l = l->down) {
- if(l->width <= 0)
- if(l->sym)
- diag(Z, "incomplete union element: %s",
- l->sym->name);
- else
- diag(Z, "incomplete union element");
- l->offset = 0;
- l->shift = 0;
- if((debug['q'] || debug['Q']) && haspointers(l))
- diag(Z, "precise garbage collector cannot handle unions with pointers");
-
- o = align(align(0, l, Ael1, &maxal), l, Ael2, &maxal);
- if(o > w)
- w = o;
- }
- w = align(w, t, Asu2, &maxal);
- t->width = w;
- t->align = maxal;
- acidtype(t);
- godeftype(t);
- return;
-
- default:
- diag(Z, "unknown type in sualign: %T", t);
- break;
- }
-}
-
-int32
-xround(int32 v, int w)
-{
- int r;
-
- if(w <= 0 || w > 8) {
- diag(Z, "rounding by %d", w);
- w = 1;
- }
- r = v%w;
- if(r)
- v += w-r;
- return v;
-}
-
-Type*
-ofnproto(Node *n)
-{
- Type *tl, *tr, *t;
-
- if(n == Z)
- return T;
- switch(n->op) {
- case OLIST:
- tl = ofnproto(n->left);
- tr = ofnproto(n->right);
- if(tl == T)
- return tr;
- tl->down = tr;
- return tl;
-
- case ONAME:
- t = copytyp(n->sym->type);
- t->down = T;
- return t;
- }
- return T;
-}
-
-#define ANSIPROTO 1
-#define OLDPROTO 2
-
-void
-argmark(Node *n, int pass)
-{
- Type *t;
-
- if(hasdotdotdot(thisfn->link))
- autoffset = align(0, thisfn->link, Aarg0, nil);
- stkoff = 0;
- for(; n->left != Z; n = n->left) {
- if(n->op != OFUNC || n->left->op != ONAME)
- continue;
- walkparam(n->right, pass);
- if(pass != 0 && anyproto(n->right) == OLDPROTO) {
- t = typ(TFUNC, n->left->sym->type->link);
- t->down = typ(TOLD, T);
- t->down->down = ofnproto(n->right);
- tmerge(t, n->left->sym);
- n->left->sym->type = t;
- }
- break;
- }
- autoffset = 0;
- stkoff = 0;
-}
-
-void
-walkparam(Node *n, int pass)
-{
- Sym *s;
- Node *n1;
-
- if(n != Z && n->op == OPROTO && n->left == Z && n->type == types[TVOID])
- return;
-
-loop:
- if(n == Z)
- return;
- switch(n->op) {
- default:
- diag(n, "argument not a name/prototype: %O", n->op);
- break;
-
- case OLIST:
- walkparam(n->left, pass);
- n = n->right;
- goto loop;
-
- case OPROTO:
- for(n1 = n; n1 != Z; n1=n1->left)
- if(n1->op == ONAME) {
- if(pass == 0) {
- s = n1->sym;
- push1(s);
- s->offset = -1;
- break;
- }
- dodecl(pdecl, CPARAM, n->type, n->left);
- break;
- }
- if(n1)
- break;
- if(pass == 0) {
- /*
- * extension:
- * allow no name in argument declaration
- diag(Z, "no name in argument declaration");
- */
- break;
- }
- dodecl(NODECL, CPARAM, n->type, n->left);
- pdecl(CPARAM, lastdcl, S);
- break;
-
- case ODOTDOT:
- break;
-
- case ONAME:
- s = n->sym;
- if(pass == 0) {
- push1(s);
- s->offset = -1;
- break;
- }
- if(s->offset != -1) {
- if(autoffset == 0) {
- firstarg = s;
- firstargtype = s->type;
- }
- autoffset = align(autoffset, s->type, Aarg1, nil);
- s->offset = autoffset;
- autoffset = align(autoffset, s->type, Aarg2, nil);
- } else
- dodecl(pdecl, CXXX, types[TINT], n);
- break;
- }
-}
-
-void
-markdcl(void)
-{
- Decl *d;
-
- blockno++;
- d = push();
- d->val = DMARK;
- d->offset = autoffset;
- d->block = autobn;
- autobn = blockno;
-}
-
-Node*
-revertdcl(void)
-{
- Decl *d;
- Sym *s;
- Node *n, *n1;
-
- n = Z;
- for(;;) {
- d = dclstack;
- if(d == D) {
- diag(Z, "pop off dcl stack");
- break;
- }
- dclstack = d->link;
- s = d->sym;
- switch(d->val) {
- case DMARK:
- autoffset = d->offset;
- autobn = d->block;
- return n;
-
- case DAUTO:
- if(debug['d'])
- print("revert1 \"%s\"\n", s->name);
- if(s->aused == 0) {
- nearln = s->varlineno;
- if(s->class == CAUTO)
- warn(Z, "auto declared and not used: %s", s->name);
- if(s->class == CPARAM)
- warn(Z, "param declared and not used: %s", s->name);
- }
- if(s->type && (s->type->garb & GVOLATILE)) {
- n1 = new(ONAME, Z, Z);
- n1->sym = s;
- n1->type = s->type;
- n1->etype = TVOID;
- if(n1->type != T)
- n1->etype = n1->type->etype;
- n1->xoffset = s->offset;
- n1->class = s->class;
-
- n1 = new(OADDR, n1, Z);
- n1 = new(OUSED, n1, Z);
- if(n == Z)
- n = n1;
- else
- n = new(OLIST, n1, n);
- }
- s->type = d->type;
- s->class = d->class;
- s->offset = d->offset;
- s->block = d->block;
- s->varlineno = d->varlineno;
- s->aused = d->aused;
- break;
-
- case DSUE:
- if(debug['d'])
- print("revert2 \"%s\"\n", s->name);
- s->suetag = d->type;
- s->sueblock = d->block;
- break;
-
- case DLABEL:
- if(debug['d'])
- print("revert3 \"%s\"\n", s->name);
- if(s->label && s->label->addable == 0)
- warn(s->label, "label declared and not used \"%s\"", s->name);
- s->label = Z;
- break;
- }
- }
- return n;
-}
-
-Type*
-fnproto(Node *n)
-{
- int r;
-
- r = anyproto(n->right);
- if(r == 0 || (r & OLDPROTO)) {
- if(r & ANSIPROTO)
- diag(n, "mixed ansi/old function declaration: %F", n->left);
- return T;
- }
- return fnproto1(n->right);
-}
-
-int
-anyproto(Node *n)
-{
- int r;
-
- r = 0;
-
-loop:
- if(n == Z)
- return r;
- switch(n->op) {
- case OLIST:
- r |= anyproto(n->left);
- n = n->right;
- goto loop;
-
- case ODOTDOT:
- case OPROTO:
- return r | ANSIPROTO;
- }
- return r | OLDPROTO;
-}
-
-Type*
-fnproto1(Node *n)
-{
- Type *t;
-
- if(n == Z)
- return T;
- switch(n->op) {
- case OLIST:
- t = fnproto1(n->left);
- if(t != T)
- t->down = fnproto1(n->right);
- return t;
-
- case OPROTO:
- lastdcl = T;
- dodecl(NODECL, CXXX, n->type, n->left);
- t = typ(TXXX, T);
- if(lastdcl != T)
- *t = *paramconv(lastdcl, 1);
- return t;
-
- case ONAME:
- diag(n, "incomplete argument prototype");
- return typ(TINT, T);
-
- case ODOTDOT:
- return typ(TDOT, T);
- }
- diag(n, "unknown op in fnproto");
- return T;
-}
-
-void
-dbgdecl(Sym *s)
-{
- print("decl \"%s\": C=%s [B=%d:O=%d] T=%T\n",
- s->name, cnames[s->class], s->block, s->offset, s->type);
-}
-
-Decl*
-push(void)
-{
- Decl *d;
-
- d = alloc(sizeof(*d));
- d->link = dclstack;
- dclstack = d;
- return d;
-}
-
-Decl*
-push1(Sym *s)
-{
- Decl *d;
-
- d = push();
- d->sym = s;
- d->val = DAUTO;
- d->type = s->type;
- d->class = s->class;
- d->offset = s->offset;
- d->block = s->block;
- d->varlineno = s->varlineno;
- d->aused = s->aused;
- return d;
-}
-
-int
-sametype(Type *t1, Type *t2)
-{
-
- if(t1 == t2)
- return 1;
- return rsametype(t1, t2, 5, 1);
-}
-
-int
-rsametype(Type *t1, Type *t2, int n, int f)
-{
- int et;
-
- n--;
- for(;;) {
- if(t1 == t2)
- return 1;
- if(t1 == T || t2 == T)
- return 0;
- if(n <= 0)
- return 1;
- et = t1->etype;
- if(et != t2->etype)
- return 0;
- if(et == TFUNC) {
- if(!rsametype(t1->link, t2->link, n, 0))
- return 0;
- t1 = t1->down;
- t2 = t2->down;
- while(t1 != T && t2 != T) {
- if(t1->etype == TOLD) {
- t1 = t1->down;
- continue;
- }
- if(t2->etype == TOLD) {
- t2 = t2->down;
- continue;
- }
- while(t1 != T || t2 != T) {
- if(!rsametype(t1, t2, n, 0))
- return 0;
- t1 = t1->down;
- t2 = t2->down;
- }
- break;
- }
- return 1;
- }
- if(et == TARRAY)
- if(t1->width != t2->width && t1->width != 0 && t2->width != 0)
- return 0;
- if(typesu[et]) {
- if(t1->link == T)
- snap(t1);
- if(t2->link == T)
- snap(t2);
- t1 = t1->link;
- t2 = t2->link;
- for(;;) {
- if(t1 == t2)
- return 1;
- if(!rsametype(t1, t2, n, 0))
- return 0;
- t1 = t1->down;
- t2 = t2->down;
- }
- }
- t1 = t1->link;
- t2 = t2->link;
- if((f || !debug['V']) && et == TIND) {
- if(t1 != T && t1->etype == TVOID)
- return 1;
- if(t2 != T && t2->etype == TVOID)
- return 1;
- }
- }
-}
-
-typedef struct Typetab Typetab;
-
-struct Typetab{
- int n;
- Type **a;
-};
-
-static int
-sigind(Type *t, Typetab *tt)
-{
- int n;
- Type **a, **na, **p, **e;
-
- n = tt->n;
- a = tt->a;
- e = a+n;
- /* linear search seems ok */
- for(p = a ; p < e; p++)
- if(sametype(*p, t))
- return p-a;
- if((n&15) == 0){
- na = malloc((n+16)*sizeof(Type*));
- if(na == nil) {
- print("%s: out of memory", argv0);
- errorexit();
- }
- memmove(na, a, n*sizeof(Type*));
- free(a);
- a = tt->a = na;
- }
- a[tt->n++] = t;
- return -1;
-}
-
-static uint32
-signat(Type *t, Typetab *tt)
-{
- int i;
- Type *t1;
- int32 s;
-
- s = 0;
- for(; t; t=t->link) {
- s = s*thash1 + thash[t->etype];
- if(t->garb&GINCOMPLETE)
- return s;
- switch(t->etype) {
- default:
- return s;
- case TARRAY:
- s = s*thash2 + 0; /* was t->width */
- break;
- case TFUNC:
- for(t1=t->down; t1; t1=t1->down)
- s = s*thash3 + signat(t1, tt);
- break;
- case TSTRUCT:
- case TUNION:
- if((i = sigind(t, tt)) >= 0){
- s = s*thash2 + i;
- return s;
- }
- for(t1=t->link; t1; t1=t1->down)
- s = s*thash3 + signat(t1, tt);
- return s;
- case TIND:
- break;
- }
- }
- return s;
-}
-
-uint32
-signature(Type *t)
-{
- uint32 s;
- Typetab tt;
-
- tt.n = 0;
- tt.a = nil;
- s = signat(t, &tt);
- free(tt.a);
- return s;
-}
-
-uint32
-sign(Sym *s)
-{
- uint32 v;
- Type *t;
-
- if(s->sig == SIGINTERN)
- return SIGNINTERN;
- if((t = s->type) == T)
- return 0;
- v = signature(t);
- if(v == 0)
- v = SIGNINTERN;
- return v;
-}
-
-void
-snap(Type *t)
-{
- if(typesu[t->etype])
- if(t->link == T && t->tag && t->tag->suetag) {
- t->link = t->tag->suetag->link;
- t->width = t->tag->suetag->width;
- }
-}
-
-Type*
-dotag(Sym *s, int et, int bn)
-{
- Decl *d;
-
- if(bn != 0 && bn != s->sueblock) {
- d = push();
- d->sym = s;
- d->val = DSUE;
- d->type = s->suetag;
- d->block = s->sueblock;
- s->suetag = T;
- }
- if(s->suetag == T) {
- s->suetag = typ(et, T);
- s->sueblock = autobn;
- }
- if(s->suetag->etype != et)
- diag(Z, "tag used for more than one type: %s",
- s->name);
- if(s->suetag->tag == S)
- s->suetag->tag = s;
- return s->suetag;
-}
-
-Node*
-dcllabel(Sym *s, int f)
-{
- Decl *d, d1;
- Node *n;
-
- n = s->label;
- if(n != Z) {
- if(f) {
- if(n->complex)
- diag(Z, "label reused: %s", s->name);
- n->complex = 1; // declared
- } else
- n->addable = 1; // used
- return n;
- }
-
- d = push();
- d->sym = s;
- d->val = DLABEL;
- dclstack = d->link;
-
- d1 = *firstdcl;
- *firstdcl = *d;
- *d = d1;
-
- firstdcl->link = d;
- firstdcl = d;
-
- n = new(OXXX, Z, Z);
- n->sym = s;
- n->complex = f;
- n->addable = !f;
- s->label = n;
-
- if(debug['d'])
- dbgdecl(s);
- return n;
-}
-
-Type*
-paramconv(Type *t, int f)
-{
-
- switch(t->etype) {
- case TUNION:
- case TSTRUCT:
- if(t->width <= 0)
- diag(Z, "incomplete structure: %s", t->tag->name);
- break;
-
- case TARRAY:
- t = typ(TIND, t->link);
- t->width = types[TIND]->width;
- break;
-
- case TFUNC:
- t = typ(TIND, t);
- t->width = types[TIND]->width;
- break;
-
- case TFLOAT:
- if(!f)
- t = types[TDOUBLE];
- break;
-
- case TCHAR:
- case TSHORT:
- if(!f)
- t = types[TINT];
- break;
-
- case TUCHAR:
- case TUSHORT:
- if(!f)
- t = types[TUINT];
- break;
- }
- return t;
-}
-
-void
-adecl(int c, Type *t, Sym *s)
-{
-
- if(c == CSTATIC)
- c = CLOCAL;
- if(t->etype == TFUNC) {
- if(c == CXXX)
- c = CEXTERN;
- if(c == CLOCAL)
- c = CSTATIC;
- if(c == CAUTO || c == CEXREG)
- diag(Z, "function cannot be %s %s", cnames[c], s->name);
- }
- if(c == CXXX)
- c = CAUTO;
- if(s) {
- if(s->class == CSTATIC)
- if(c == CEXTERN || c == CGLOBL) {
- warn(Z, "just say static: %s", s->name);
- c = CSTATIC;
- }
- if(s->class == CAUTO || s->class == CPARAM || s->class == CLOCAL)
- if(s->block == autobn)
- diag(Z, "auto redeclaration of: %s", s->name);
- if(c != CPARAM)
- push1(s);
- s->block = autobn;
- s->offset = 0;
- s->type = t;
- s->class = c;
- s->aused = 0;
- }
- switch(c) {
- case CAUTO:
- autoffset = align(autoffset, t, Aaut3, nil);
- stkoff = maxround(stkoff, autoffset);
- s->offset = -autoffset;
- break;
-
- case CPARAM:
- if(autoffset == 0) {
- firstarg = s;
- firstargtype = t;
- }
- autoffset = align(autoffset, t, Aarg1, nil);
- if(s)
- s->offset = autoffset;
- autoffset = align(autoffset, t, Aarg2, nil);
- break;
- }
-}
-
-void
-pdecl(int c, Type *t, Sym *s)
-{
- if(s && s->offset != -1) {
- diag(Z, "not a parameter: %s", s->name);
- return;
- }
- t = paramconv(t, c==CPARAM);
- if(c == CXXX)
- c = CPARAM;
- if(c != CPARAM) {
- diag(Z, "parameter cannot have class: %s", s->name);
- c = CPARAM;
- }
- adecl(c, t, s);
-}
-
-void
-xdecl(int c, Type *t, Sym *s)
-{
- int32 o;
-
- o = 0;
- switch(c) {
- case CEXREG:
- o = exreg(t);
- if(o == 0)
- c = CEXTERN;
- if(s->class == CGLOBL)
- c = CGLOBL;
- break;
-
- case CEXTERN:
- if(s->class == CGLOBL)
- c = CGLOBL;
- break;
-
- case CXXX:
- c = CGLOBL;
- if(s->class == CEXTERN)
- s->class = CGLOBL;
- break;
-
- case CAUTO:
- diag(Z, "overspecified class: %s %s %s", s->name, cnames[c], cnames[s->class]);
- c = CEXTERN;
- break;
-
- case CTYPESTR:
- if(!typesuv[t->etype]) {
- diag(Z, "typestr must be struct/union: %s", s->name);
- break;
- }
- dclfunct(t, s);
- break;
- }
-
- if(s->class == CSTATIC)
- if(c == CEXTERN || c == CGLOBL) {
- warn(Z, "overspecified class: %s %s %s", s->name, cnames[c], cnames[s->class]);
- c = CSTATIC;
- }
- if(s->type != T)
- if(s->class != c || !sametype(t, s->type) || t->etype == TENUM) {
- diag(Z, "external redeclaration of: %s", s->name);
- Bprint(&diagbuf, " %s %T %L\n", cnames[c], t, nearln);
- Bprint(&diagbuf, " %s %T %L\n", cnames[s->class], s->type, s->varlineno);
- }
- tmerge(t, s);
- s->type = t;
- if(c == CTYPEDEF && (typechlv[t->etype] || typefd[t->etype])) {
- s->type = copytyp(t);
- s->type->tag = s;
- }
- s->class = c;
- s->block = 0;
- s->offset = o;
-}
-
-void
-tmerge(Type *t1, Sym *s)
-{
- Type *ta, *tb, *t2;
-
- t2 = s->type;
- for(;;) {
- if(t1 == T || t2 == T || t1 == t2)
- break;
- if(t1->etype != t2->etype)
- break;
- switch(t1->etype) {
- case TFUNC:
- ta = t1->down;
- tb = t2->down;
- if(ta == T) {
- t1->down = tb;
- break;
- }
- if(tb == T)
- break;
- while(ta != T && tb != T) {
- if(ta == tb)
- break;
- /* ignore old-style flag */
- if(ta->etype == TOLD) {
- ta = ta->down;
- continue;
- }
- if(tb->etype == TOLD) {
- tb = tb->down;
- continue;
- }
- /* checking terminated by ... */
- if(ta->etype == TDOT && tb->etype == TDOT) {
- ta = T;
- tb = T;
- break;
- }
- if(!sametype(ta, tb))
- break;
- ta = ta->down;
- tb = tb->down;
- }
- if(ta != tb)
- diag(Z, "function inconsistently declared: %s", s->name);
-
- /* take new-style over old-style */
- ta = t1->down;
- tb = t2->down;
- if(ta != T && ta->etype == TOLD)
- if(tb != T && tb->etype != TOLD)
- t1->down = tb;
- break;
-
- case TARRAY:
- /* should we check array size change? */
- if(t2->width > t1->width)
- t1->width = t2->width;
- break;
-
- case TUNION:
- case TSTRUCT:
- return;
- }
- t1 = t1->link;
- t2 = t2->link;
- }
-}
-
-void
-edecl(int c, Type *t, Sym *s)
-{
- Type *t1;
-
- if(s == S)
- diag(Z, "unnamed structure elements not supported");
- else
- if(c != CXXX)
- diag(Z, "structure element cannot have class: %s", s->name);
- t1 = t;
- t = copytyp(t1);
- t->sym = s;
- t->down = T;
- if(lastfield) {
- t->shift = lastbit - lastfield;
- t->nbits = lastfield;
- if(firstbit)
- t->shift = -t->shift;
- if(typeu[t->etype])
- t->etype = tufield->etype;
- else
- t->etype = tfield->etype;
- }
- if(strf == T)
- strf = t;
- else
- strl->down = t;
- strl = t;
-}
-
-/*
- * this routine is very suspect.
- * ansi requires the enum type to
- * be represented as an 'int'
- * this means that 0x81234567
- * would be illegal. this routine
- * makes signed and unsigned go
- * to unsigned.
- */
-Type*
-maxtype(Type *t1, Type *t2)
-{
-
- if(t1 == T)
- return t2;
- if(t2 == T)
- return t1;
- if(t1->etype > t2->etype)
- return t1;
- return t2;
-}
-
-void
-doenum(Sym *s, Node *n)
-{
-
- if(n) {
- complex(n);
- if(n->op != OCONST) {
- diag(n, "enum not a constant: %s", s->name);
- return;
- }
- en.cenum = n->type;
- en.tenum = maxtype(en.cenum, en.tenum);
-
- if(!typefd[en.cenum->etype])
- en.lastenum = n->vconst;
- else
- en.floatenum = n->fconst;
- }
- if(dclstack)
- push1(s);
- xdecl(CXXX, types[TENUM], s);
-
- if(en.cenum == T) {
- en.tenum = types[TINT];
- en.cenum = types[TINT];
- en.lastenum = 0;
- }
- s->tenum = en.cenum;
-
- if(!typefd[s->tenum->etype]) {
- s->vconst = convvtox(en.lastenum, s->tenum->etype);
- en.lastenum++;
- } else {
- s->fconst = en.floatenum;
- en.floatenum++;
- }
-
- if(debug['d'])
- dbgdecl(s);
- acidvar(s);
- godefvar(s);
-}
-
-void
-symadjust(Sym *s, Node *n, int32 del)
-{
-
- switch(n->op) {
- default:
- if(n->left)
- symadjust(s, n->left, del);
- if(n->right)
- symadjust(s, n->right, del);
- return;
-
- case ONAME:
- if(n->sym == s)
- n->xoffset -= del;
- return;
-
- case OCONST:
- case OSTRING:
- case OLSTRING:
- case OINDREG:
- case OREGISTER:
- return;
- }
-}
-
-Node*
-contig(Sym *s, Node *n, int32 v)
-{
- Node *p, *r, *q, *m;
- int32 w;
- Type *zt;
-
- if(debug['i']) {
- print("contig v = %d; s = %s\n", v, s->name);
- prtree(n, "doinit value");
- }
-
- if(n == Z)
- goto no;
- w = s->type->width;
-
- /*
- * nightmare: an automatic array whose size
- * increases when it is initialized
- */
- if(v != w) {
- if(v != 0)
- diag(n, "automatic adjustable array: %s", s->name);
- v = s->offset;
- autoffset = align(autoffset, s->type, Aaut3, nil);
- s->offset = -autoffset;
- stkoff = maxround(stkoff, autoffset);
- symadjust(s, n, v - s->offset);
- }
- if(w <= ewidth[TIND])
- goto no;
- if(n->op == OAS)
- diag(Z, "oops in contig");
-/*ZZZ this appears incorrect
-need to check if the list completely covers the data.
-if not, bail
- */
- if(n->op == OLIST)
- goto no;
- if(n->op == OASI)
- if(n->left->type)
- if(n->left->type->width == w)
- goto no;
- while(w & (ewidth[TIND]-1))
- w++;
-/*
- * insert the following code, where long becomes vlong if pointers are fat
- *
- *(long**)&X = (long*)((char*)X + sizeof(X));
- do {
- *(long**)&X -= 1;
- **(long**)&X = 0;
- } while(*(long**)&X);
- */
-
- for(q=n; q->op != ONAME; q=q->left)
- ;
-
- zt = ewidth[TIND] > ewidth[TLONG]? types[TVLONG]: types[TLONG];
-
- p = new(ONAME, Z, Z);
- *p = *q;
- p->type = typ(TIND, zt);
- p->xoffset = s->offset;
-
- r = new(ONAME, Z, Z);
- *r = *p;
- r = new(OPOSTDEC, r, Z);
-
- q = new(ONAME, Z, Z);
- *q = *p;
- q = new(OIND, q, Z);
-
- m = new(OCONST, Z, Z);
- m->vconst = 0;
- m->type = zt;
-
- q = new(OAS, q, m);
-
- r = new(OLIST, r, q);
-
- q = new(ONAME, Z, Z);
- *q = *p;
- r = new(ODWHILE, q, r);
-
- q = new(ONAME, Z, Z);
- *q = *p;
- q->type = q->type->link;
- q->xoffset += w;
- q = new(OADDR, q, 0);
-
- q = new(OASI, p, q);
- r = new(OLIST, q, r);
-
- n = new(OLIST, r, n);
-
-no:
- return n;
-}
diff --git a/src/cmd/cc/doc.go b/src/cmd/cc/doc.go
deleted file mode 100644
index 10901b441..000000000
--- a/src/cmd/cc/doc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-/*
-
-This directory contains the portable section of the Plan 9 C compilers.
-See ../6c, ../8c, and ../5c for more information.
-
-*/
-package main
diff --git a/src/cmd/cc/dpchk.c b/src/cmd/cc/dpchk.c
deleted file mode 100644
index 606bf40dd..000000000
--- a/src/cmd/cc/dpchk.c
+++ /dev/null
@@ -1,793 +0,0 @@
-// Inferno utils/cc/dpchk.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/dpchk.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-#include "y.tab.h"
-
-enum
-{
- Fnone = 0,
- Fl,
- Fvl,
- Fignor,
- Fstar,
- Fadj,
-
- Fverb = 10,
-};
-
-typedef struct Tprot Tprot;
-struct Tprot
-{
- Type* type;
- Bits flag;
- Tprot* link;
-};
-
-typedef struct Tname Tname;
-struct Tname
-{
- char* name;
- int param;
- int count;
- Tname* link;
- Tprot* prot;
-};
-
-static Type* indchar;
-static uchar flagbits[512];
-static char* lastfmt;
-static int lastadj;
-static int lastverb;
-static int nstar;
-static Tprot* tprot;
-static Tname* tname;
-
-void
-argflag(int c, int v)
-{
-
- switch(v) {
- case Fignor:
- case Fstar:
- case Fl:
- case Fvl:
- flagbits[c] = v;
- break;
- case Fverb:
- flagbits[c] = lastverb;
-/*print("flag-v %c %d\n", c, lastadj);*/
- lastverb++;
- break;
- case Fadj:
- flagbits[c] = lastadj;
-/*print("flag-l %c %d\n", c, lastadj);*/
- lastadj++;
- break;
- }
-}
-
-Bits
-getflag(char *s)
-{
- Bits flag;
- int f;
- Fmt fmt;
- Rune c;
-
- flag = zbits;
- nstar = 0;
- fmtstrinit(&fmt);
- for(;;) {
- s += chartorune(&c, s);
- if(c == 0 || c >= nelem(flagbits))
- break;
- fmtrune(&fmt, c);
- f = flagbits[c];
- switch(f) {
- case Fnone:
- argflag(c, Fverb);
- f = flagbits[c];
- break;
- case Fstar:
- nstar++;
- case Fignor:
- continue;
- case Fl:
- if(bset(flag, Fl))
- flag = bor(flag, blsh(Fvl));
- }
- flag = bor(flag, blsh(f));
- if(f >= Fverb)
- break;
- }
- free(lastfmt);
- lastfmt = fmtstrflush(&fmt);
- return flag;
-}
-
-static void
-newprot(Sym *m, Type *t, char *s, Tprot **prot)
-{
- Bits flag;
- Tprot *l;
-
- if(t == T) {
- warn(Z, "%s: newprot: type not defined", m->name);
- return;
- }
- flag = getflag(s);
- for(l=*prot; l; l=l->link)
- if(beq(flag, l->flag) && sametype(t, l->type))
- return;
- l = alloc(sizeof(*l));
- l->type = t;
- l->flag = flag;
- l->link = *prot;
- *prot = l;
-}
-
-static Tname*
-newname(char *s, int p, int count)
-{
- Tname *l;
-
- for(l=tname; l; l=l->link)
- if(strcmp(l->name, s) == 0) {
- if(p >= 0 && l->param != p)
- yyerror("vargck %s already defined\n", s);
- return l;
- }
- if(p < 0)
- return nil;
-
- l = alloc(sizeof(*l));
- l->name = s;
- l->param = p;
- l->link = tname;
- l->count = count;
- tname = l;
- return l;
-}
-
-void
-arginit(void)
-{
- int i;
-
-/* debug['F'] = 1;*/
-/* debug['w'] = 1;*/
-
- lastadj = Fadj;
- lastverb = Fverb;
- indchar = typ(TIND, types[TCHAR]);
-
- memset(flagbits, Fnone, sizeof(flagbits));
-
- for(i='0'; i<='9'; i++)
- argflag(i, Fignor);
- argflag('.', Fignor);
- argflag('#', Fignor);
- argflag('u', Fignor);
- argflag('h', Fignor);
- argflag('+', Fignor);
- argflag('-', Fignor);
-
- argflag('*', Fstar);
- argflag('l', Fl);
-
- argflag('o', Fverb);
- flagbits['x'] = flagbits['o'];
- flagbits['X'] = flagbits['o'];
-}
-
-static char*
-getquoted(void)
-{
- int c;
- Rune r;
- Fmt fmt;
-
- c = getnsc();
- if(c != '"')
- return nil;
- fmtstrinit(&fmt);
- for(;;) {
- r = getr();
- if(r == '\n') {
- free(fmtstrflush(&fmt));
- return nil;
- }
- if(r == '"')
- break;
- fmtrune(&fmt, r);
- }
- free(lastfmt);
- lastfmt = fmtstrflush(&fmt);
- return strdup(lastfmt);
-}
-
-void
-pragvararg(void)
-{
- Sym *s;
- int n, c;
- char *t;
- Type *ty;
- Tname *l;
-
- if(!debug['F'])
- goto out;
- s = getsym();
- if(s && strcmp(s->name, "argpos") == 0)
- goto ckpos;
- if(s && strcmp(s->name, "type") == 0)
- goto cktype;
- if(s && strcmp(s->name, "flag") == 0)
- goto ckflag;
- if(s && strcmp(s->name, "countpos") == 0)
- goto ckcount;
- yyerror("syntax in #pragma varargck");
- goto out;
-
-ckpos:
-/*#pragma varargck argpos warn 2*/
- s = getsym();
- if(s == S)
- goto bad;
- n = getnsn();
- if(n < 0)
- goto bad;
- newname(s->name, n, 0);
- goto out;
-
-ckcount:
-/*#pragma varargck countpos name 2*/
- s = getsym();
- if(s == S)
- goto bad;
- n = getnsn();
- if(n < 0)
- goto bad;
- newname(s->name, 0, n);
- goto out;
-
-ckflag:
-/*#pragma varargck flag 'c'*/
- c = getnsc();
- if(c != '\'')
- goto bad;
- c = getr();
- if(c == '\\')
- c = getr();
- else if(c == '\'')
- goto bad;
- if(c == '\n')
- goto bad;
- if(getc() != '\'')
- goto bad;
- argflag(c, Fignor);
- goto out;
-
-cktype:
- c = getnsc();
- unget(c);
- if(c != '"') {
-/*#pragma varargck type name int*/
- s = getsym();
- if(s == S)
- goto bad;
- l = newname(s->name, -1, -1);
- s = getsym();
- if(s == S)
- goto bad;
- ty = s->type;
- while((c = getnsc()) == '*')
- ty = typ(TIND, ty);
- unget(c);
- newprot(s, ty, "a", &l->prot);
- goto out;
- }
-
-/*#pragma varargck type O int*/
- t = getquoted();
- if(t == nil)
- goto bad;
- s = getsym();
- if(s == S)
- goto bad;
- ty = s->type;
- while((c = getnsc()) == '*')
- ty = typ(TIND, ty);
- unget(c);
- newprot(s, ty, t, &tprot);
- goto out;
-
-bad:
- yyerror("syntax in #pragma varargck");
-
-out:
- while(getnsc() != '\n')
- ;
-}
-
-Node*
-nextarg(Node *n, Node **a)
-{
- if(n == Z) {
- *a = Z;
- return Z;
- }
- if(n->op == OLIST) {
- *a = n->left;
- return n->right;
- }
- *a = n;
- return Z;
-}
-
-void
-checkargs(Node *nn, char *s, int pos)
-{
- Node *a, *n;
- Bits flag;
- Tprot *l;
-
- if(!debug['F'])
- return;
- n = nn;
- for(;;) {
- s = strchr(s, '%');
- if(s == 0) {
- nextarg(n, &a);
- if(a != Z)
- warn(nn, "more arguments than format %T",
- a->type);
- return;
- }
- s++;
- flag = getflag(s);
- while(nstar > 0) {
- n = nextarg(n, &a);
- pos++;
- nstar--;
- if(a == Z) {
- warn(nn, "more format than arguments %s",
- lastfmt);
- return;
- }
- if(a->type == T)
- continue;
- if(!sametype(types[TINT], a->type) &&
- !sametype(types[TUINT], a->type))
- warn(nn, "format mismatch '*' in %s %T, arg %d",
- lastfmt, a->type, pos);
- }
- for(l=tprot; l; l=l->link)
- if(sametype(types[TVOID], l->type)) {
- if(beq(flag, l->flag)) {
- s++;
- goto loop;
- }
- }
-
- n = nextarg(n, &a);
- pos++;
- if(a == Z) {
- warn(nn, "more format than arguments %s",
- lastfmt);
- return;
- }
- if(a->type == 0)
- continue;
- for(l=tprot; l; l=l->link)
- if(sametype(a->type, l->type)) {
-/*print("checking %T/%ux %T/%ux\n", a->type, flag.b[0], l->type, l->flag.b[0]);*/
- if(beq(flag, l->flag))
- goto loop;
- }
- warn(nn, "format mismatch %s %T, arg %d", lastfmt, a->type, pos);
- loop:;
- }
-}
-
-void
-dpcheck(Node *n)
-{
- char *s;
- Node *a, *b;
- Tname *l;
- Tprot *tl;
- int i, j;
-
- if(n == Z)
- return;
- b = n->left;
- if(b == Z || b->op != ONAME)
- return;
- s = b->sym->name;
- for(l=tname; l; l=l->link)
- if(strcmp(s, l->name) == 0)
- break;
- if(l == 0)
- return;
-
- if(l->count > 0) {
- // fetch count, then check remaining length
- i = l->count;
- a = nil;
- b = n->right;
- while(i > 0) {
- b = nextarg(b, &a);
- i--;
- }
- if(a == Z) {
- diag(n, "can't find count arg");
- return;
- }
- if(a->op != OCONST || !typechl[a->type->etype]) {
- diag(n, "count is invalid constant");
- return;
- }
- j = a->vconst;
- i = 0;
- while(b != Z) {
- b = nextarg(b, &a);
- i++;
- }
- if(i != j)
- diag(n, "found %d argument%s after count %d", i, i == 1 ? "" : "s", j);
- }
-
- if(l->prot != nil) {
- // check that all arguments after param or count
- // are listed in type list.
- i = l->count;
- if(i == 0)
- i = l->param;
- if(i == 0)
- return;
- a = nil;
- b = n->right;
- while(i > 0) {
- b = nextarg(b, &a);
- i--;
- }
- if(a == Z) {
- diag(n, "can't find count/param arg");
- return;
- }
- while(b != Z) {
- b = nextarg(b, &a);
- for(tl=l->prot; tl; tl=tl->link)
- if(sametype(a->type, tl->type))
- break;
- if(tl == nil)
- diag(a, "invalid type %T in call to %s", a->type, s);
- }
- }
-
- if(l->param <= 0)
- return;
- i = l->param;
- a = nil;
- b = n->right;
- while(i > 0) {
- b = nextarg(b, &a);
- i--;
- }
- if(a == Z) {
- diag(n, "can't find format arg");
- return;
- }
- if(!sametype(indchar, a->type)) {
- diag(n, "format arg type %T", a->type);
- return;
- }
- if(a->op != OADDR || a->left->op != ONAME || a->left->sym != symstring) {
-/* warn(n, "format arg not constant string");*/
- return;
- }
- s = a->left->cstring;
- checkargs(b, s, l->param);
-}
-
-void
-pragpack(void)
-{
- Sym *s;
-
- packflg = 0;
- s = getsym();
- if(s) {
- packflg = atoi(s->name+1);
- if(strcmp(s->name, "on") == 0 ||
- strcmp(s->name, "yes") == 0)
- packflg = 1;
- }
- while(getnsc() != '\n')
- ;
- if(debug['f'])
- if(packflg)
- print("%4d: pack %d\n", lineno, packflg);
- else
- print("%4d: pack off\n", lineno);
-}
-
-void
-pragfpround(void)
-{
- Sym *s;
-
- fproundflg = 0;
- s = getsym();
- if(s) {
- fproundflg = atoi(s->name+1);
- if(strcmp(s->name, "on") == 0 ||
- strcmp(s->name, "yes") == 0)
- fproundflg = 1;
- }
- while(getnsc() != '\n')
- ;
- if(debug['f'])
- if(fproundflg)
- print("%4d: fproundflg %d\n", lineno, fproundflg);
- else
- print("%4d: fproundflg off\n", lineno);
-}
-
-void
-pragtextflag(void)
-{
- Sym *s;
-
- s = getsym();
- if(s == S) {
- textflag = getnsn();
- } else {
- if(s->macro) {
- macexpand(s, symb);
- }
- if(symb[0] < '0' || symb[0] > '9')
- yyerror("pragma textflag not an integer");
- textflag = atoi(symb);
- }
- while(getnsc() != '\n')
- ;
- if(debug['f'])
- print("%4d: textflag %d\n", lineno, textflag);
-}
-
-void
-pragdataflag(void)
-{
- Sym *s;
-
- s = getsym();
- if(s == S) {
- dataflag = getnsn();
- } else {
- if(s->macro) {
- macexpand(s, symb);
- }
- if(symb[0] < '0' || symb[0] > '9')
- yyerror("pragma dataflag not an integer");
- dataflag = atoi(symb);
- }
- while(getnsc() != '\n')
- ;
- if(debug['f'])
- print("%4d: dataflag %d\n", lineno, dataflag);
-}
-
-void
-pragincomplete(void)
-{
- Sym *s;
- Type *t;
- int istag, w, et;
-
- istag = 0;
- s = getsym();
- if(s == nil)
- goto out;
- et = 0;
- w = s->lexical;
- if(w == LSTRUCT)
- et = TSTRUCT;
- else if(w == LUNION)
- et = TUNION;
- if(et != 0){
- s = getsym();
- if(s == nil){
- yyerror("missing struct/union tag in pragma incomplete");
- goto out;
- }
- if(s->lexical != LNAME && s->lexical != LTYPE){
- yyerror("invalid struct/union tag: %s", s->name);
- goto out;
- }
- dotag(s, et, 0);
- istag = 1;
- }else if(strcmp(s->name, "_off_") == 0){
- debug['T'] = 0;
- goto out;
- }else if(strcmp(s->name, "_on_") == 0){
- debug['T'] = 1;
- goto out;
- }
- t = s->type;
- if(istag)
- t = s->suetag;
- if(t == T)
- yyerror("unknown type %s in pragma incomplete", s->name);
- else if(!typesu[t->etype])
- yyerror("not struct/union type in pragma incomplete: %s", s->name);
- else
- t->garb |= GINCOMPLETE;
-out:
- while(getnsc() != '\n')
- ;
- if(debug['f'])
- print("%s incomplete\n", s->name);
-}
-
-Sym*
-getimpsym(void)
-{
- int c;
- char *cp;
-
- c = getnsc();
- if(isspace(c) || c == '"') {
- unget(c);
- return S;
- }
- for(cp = symb;;) {
- if(cp <= symb+NSYMB-4)
- *cp++ = c;
- c = getc();
- if(c > 0 && !isspace(c) && c != '"')
- continue;
- unget(c);
- break;
- }
- *cp = 0;
- if(cp > symb+NSYMB-4)
- yyerror("symbol too large: %s", symb);
- return lookup();
-}
-
-static int
-more(void)
-{
- int c;
-
- do
- c = getnsc();
- while(c == ' ' || c == '\t');
- unget(c);
- return c != '\n';
-}
-
-void
-pragcgo(char *verb)
-{
- Sym *local, *remote;
- char *p;
-
- if(strcmp(verb, "cgo_dynamic_linker") == 0 || strcmp(verb, "dynlinker") == 0) {
- p = getquoted();
- if(p == nil)
- goto err1;
- fmtprint(&pragcgobuf, "cgo_dynamic_linker %q\n", p);
- goto out;
-
- err1:
- yyerror("usage: #pragma cgo_dynamic_linker \"path\"");
- goto out;
- }
-
- if(strcmp(verb, "dynexport") == 0)
- verb = "cgo_export_dynamic";
- if(strcmp(verb, "cgo_export_static") == 0 || strcmp(verb, "cgo_export_dynamic") == 0) {
- local = getimpsym();
- if(local == nil)
- goto err2;
- if(!more()) {
- fmtprint(&pragcgobuf, "%s %q\n", verb, local->name);
- goto out;
- }
- remote = getimpsym();
- if(remote == nil)
- goto err2;
- fmtprint(&pragcgobuf, "%s %q %q\n", verb, local->name, remote->name);
- goto out;
-
- err2:
- yyerror("usage: #pragma %s local [remote]", verb);
- goto out;
- }
-
- if(strcmp(verb, "cgo_import_dynamic") == 0 || strcmp(verb, "dynimport") == 0) {
- local = getimpsym();
- if(local == nil)
- goto err3;
- if(!more()) {
- fmtprint(&pragcgobuf, "cgo_import_dynamic %q\n", local->name);
- goto out;
- }
- remote = getimpsym();
- if(remote == nil)
- goto err3;
- if(!more()) {
- fmtprint(&pragcgobuf, "cgo_import_dynamic %q %q\n", local->name, remote->name);
- goto out;
- }
- p = getquoted();
- if(p == nil)
- goto err3;
- fmtprint(&pragcgobuf, "cgo_import_dynamic %q %q %q\n", local->name, remote->name, p);
- goto out;
-
- err3:
- yyerror("usage: #pragma cgo_import_dynamic local [remote [\"library\"]]");
- goto out;
- }
-
- if(strcmp(verb, "cgo_import_static") == 0) {
- local = getimpsym();
- if(local == nil)
- goto err4;
- fmtprint(&pragcgobuf, "cgo_import_static %q\n", local->name);
- goto out;
-
- err4:
- yyerror("usage: #pragma cgo_import_static local [remote]");
- goto out;
- }
-
- if(strcmp(verb, "cgo_ldflag") == 0) {
- p = getquoted();
- if(p == nil)
- goto err5;
- fmtprint(&pragcgobuf, "cgo_ldflag %q\n", p);
- goto out;
-
- err5:
- yyerror("usage: #pragma cgo_ldflag \"arg\"");
- goto out;
- }
-
-out:
- while(getnsc() != '\n')
- ;
-}
diff --git a/src/cmd/cc/funct.c b/src/cmd/cc/funct.c
deleted file mode 100644
index 92c067db8..000000000
--- a/src/cmd/cc/funct.c
+++ /dev/null
@@ -1,431 +0,0 @@
-// Inferno utils/cc/funct.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/funct.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-
-typedef struct Ftab Ftab;
-struct Ftab
-{
- char op;
- char* name;
- char typ;
-};
-typedef struct Gtab Gtab;
-struct Gtab
-{
- char etype;
- char* name;
-};
-
-Ftab ftabinit[OEND];
-Gtab gtabinit[NALLTYPES];
-
-int
-isfunct(Node *n)
-{
- Type *t, *t1;
- Funct *f;
- Node *l;
- Sym *s;
- int o;
-
- o = n->op;
- if(n->left == Z)
- goto no;
- t = n->left->type;
- if(t == T)
- goto no;
- f = t->funct;
-
- switch(o) {
- case OAS: // put cast on rhs
- case OASI:
- case OASADD:
- case OASAND:
- case OASASHL:
- case OASASHR:
- case OASDIV:
- case OASLDIV:
- case OASLMOD:
- case OASLMUL:
- case OASLSHR:
- case OASMOD:
- case OASMUL:
- case OASOR:
- case OASSUB:
- case OASXOR:
- if(n->right == Z)
- goto no;
- t1 = n->right->type;
- if(t1 == T)
- goto no;
- if(t1->funct == f)
- break;
-
- l = new(OXXX, Z, Z);
- *l = *n->right;
-
- n->right->left = l;
- n->right->right = Z;
- n->right->type = t;
- n->right->op = OCAST;
-
- if(!isfunct(n->right))
- prtree(n, "isfunc !");
- break;
-
- case OCAST: // t f(T) or T f(t)
- t1 = n->type;
- if(t1 == T)
- goto no;
- if(f != nil) {
- s = f->castfr[t1->etype];
- if(s == S)
- goto no;
- n->right = n->left;
- goto build;
- }
- f = t1->funct;
- if(f != nil) {
- s = f->castto[t->etype];
- if(s == S)
- goto no;
- n->right = n->left;
- goto build;
- }
- goto no;
- }
-
- if(f == nil)
- goto no;
- s = f->sym[o];
- if(s == S)
- goto no;
-
- /*
- * the answer is yes,
- * now we rewrite the node
- * and give diagnostics
- */
- switch(o) {
- default:
- diag(n, "isfunct op missing %O\n", o);
- goto bad;
-
- case OADD: // T f(T, T)
- case OAND:
- case OASHL:
- case OASHR:
- case ODIV:
- case OLDIV:
- case OLMOD:
- case OLMUL:
- case OLSHR:
- case OMOD:
- case OMUL:
- case OOR:
- case OSUB:
- case OXOR:
-
- case OEQ: // int f(T, T)
- case OGE:
- case OGT:
- case OHI:
- case OHS:
- case OLE:
- case OLO:
- case OLS:
- case OLT:
- case ONE:
- if(n->right == Z)
- goto bad;
- t1 = n->right->type;
- if(t1 == T)
- goto bad;
- if(t1->funct != f)
- goto bad;
- n->right = new(OLIST, n->left, n->right);
- break;
-
- case OAS: // structure copies done by the compiler
- case OASI:
- goto no;
-
- case OASADD: // T f(T*, T)
- case OASAND:
- case OASASHL:
- case OASASHR:
- case OASDIV:
- case OASLDIV:
- case OASLMOD:
- case OASLMUL:
- case OASLSHR:
- case OASMOD:
- case OASMUL:
- case OASOR:
- case OASSUB:
- case OASXOR:
- if(n->right == Z)
- goto bad;
- t1 = n->right->type;
- if(t1 == T)
- goto bad;
- if(t1->funct != f)
- goto bad;
- n->right = new(OLIST, new(OADDR, n->left, Z), n->right);
- break;
-
- case OPOS: // T f(T)
- case ONEG:
- case ONOT:
- case OCOM:
- n->right = n->left;
- break;
-
-
- }
-
-build:
- l = new(ONAME, Z, Z);
- l->sym = s;
- l->type = s->type;
- l->etype = s->type->etype;
- l->xoffset = s->offset;
- l->class = s->class;
- tcomo(l, 0);
-
- n->op = OFUNC;
- n->left = l;
- n->type = l->type->link;
- if(tcompat(n, T, l->type, tfunct))
- goto bad;
- if(tcoma(n->left, n->right, l->type->down, 1))
- goto bad;
- return 1;
-
-no:
- return 0;
-
-bad:
- diag(n, "can't rewrite typestr for op %O\n", o);
- prtree(n, "isfunct");
- n->type = T;
- return 1;
-}
-
-void
-dclfunct(Type *t, Sym *s)
-{
- Funct *f;
- Node *n;
- Type *f1, *f2, *f3, *f4;
- int o, i, c;
- char str[100];
-
- if(t->funct)
- return;
-
- // recognize generated tag of dorm _%d_
- if(t->tag == S)
- goto bad;
- for(i=0; c = t->tag->name[i]; i++) {
- if(c == '_') {
- if(i == 0 || t->tag->name[i+1] == 0)
- continue;
- break;
- }
- if(c < '0' || c > '9')
- break;
- }
- if(c == 0)
- goto bad;
-
- f = alloc(sizeof(*f));
- for(o=0; o<nelem(f->sym); o++)
- f->sym[o] = S;
-
- t->funct = f;
-
- f1 = typ(TFUNC, t);
- f1->down = copytyp(t);
- f1->down->down = t;
-
- f2 = typ(TFUNC, types[TINT]);
- f2->down = copytyp(t);
- f2->down->down = t;
-
- f3 = typ(TFUNC, t);
- f3->down = typ(TIND, t);
- f3->down->down = t;
-
- f4 = typ(TFUNC, t);
- f4->down = t;
-
- for(i=0;; i++) {
- o = ftabinit[i].op;
- if(o == OXXX)
- break;
- sprint(str, "%s_%s_", t->tag->name, ftabinit[i].name);
- n = new(ONAME, Z, Z);
- n->sym = slookup(str);
- f->sym[o] = n->sym;
- switch(ftabinit[i].typ) {
- default:
- diag(Z, "dclfunct op missing %d\n", ftabinit[i].typ);
- break;
-
- case 1: // T f(T,T) +
- dodecl(xdecl, CEXTERN, f1, n);
- break;
-
- case 2: // int f(T,T) ==
- dodecl(xdecl, CEXTERN, f2, n);
- break;
-
- case 3: // void f(T*,T) +=
- dodecl(xdecl, CEXTERN, f3, n);
- break;
-
- case 4: // T f(T) ~
- dodecl(xdecl, CEXTERN, f4, n);
- break;
- }
- }
- for(i=0;; i++) {
- o = gtabinit[i].etype;
- if(o == TXXX)
- break;
-
- /*
- * OCAST types T1 _T2_T1_(T2)
- */
- sprint(str, "_%s%s_", gtabinit[i].name, t->tag->name);
- n = new(ONAME, Z, Z);
- n->sym = slookup(str);
- f->castto[o] = n->sym;
-
- f1 = typ(TFUNC, t);
- f1->down = types[o];
- dodecl(xdecl, CEXTERN, f1, n);
-
- sprint(str, "%s_%s_", t->tag->name, gtabinit[i].name);
- n = new(ONAME, Z, Z);
- n->sym = slookup(str);
- f->castfr[o] = n->sym;
-
- f1 = typ(TFUNC, types[o]);
- f1->down = t;
- dodecl(xdecl, CEXTERN, f1, n);
- }
- return;
-bad:
- diag(Z, "dclfunct bad %T %s\n", t, s->name);
-}
-
-Gtab gtabinit[NALLTYPES] =
-{
- TCHAR, "c",
- TUCHAR, "uc",
- TSHORT, "h",
- TUSHORT, "uh",
- TINT, "i",
- TUINT, "ui",
- TLONG, "l",
- TULONG, "ul",
- TVLONG, "v",
- TUVLONG, "uv",
- TFLOAT, "f",
- TDOUBLE, "d",
- TXXX
-};
-
-Ftab ftabinit[OEND] =
-{
- OADD, "add", 1,
- OAND, "and", 1,
- OASHL, "ashl", 1,
- OASHR, "ashr", 1,
- ODIV, "div", 1,
- OLDIV, "ldiv", 1,
- OLMOD, "lmod", 1,
- OLMUL, "lmul", 1,
- OLSHR, "lshr", 1,
- OMOD, "mod", 1,
- OMUL, "mul", 1,
- OOR, "or", 1,
- OSUB, "sub", 1,
- OXOR, "xor", 1,
-
- OEQ, "eq", 2,
- OGE, "ge", 2,
- OGT, "gt", 2,
- OHI, "hi", 2,
- OHS, "hs", 2,
- OLE, "le", 2,
- OLO, "lo", 2,
- OLS, "ls", 2,
- OLT, "lt", 2,
- ONE, "ne", 2,
-
- OASADD, "asadd", 3,
- OASAND, "asand", 3,
- OASASHL, "asashl", 3,
- OASASHR, "asashr", 3,
- OASDIV, "asdiv", 3,
- OASLDIV, "asldiv", 3,
- OASLMOD, "aslmod", 3,
- OASLMUL, "aslmul", 3,
- OASLSHR, "aslshr", 3,
- OASMOD, "asmod", 3,
- OASMUL, "asmul", 3,
- OASOR, "asor", 3,
- OASSUB, "assub", 3,
- OASXOR, "asxor", 3,
-
- OPOS, "pos", 4,
- ONEG, "neg", 4,
- OCOM, "com", 4,
- ONOT, "not", 4,
-
-// OPOSTDEC,
-// OPOSTINC,
-// OPREDEC,
-// OPREINC,
-
- OXXX,
-};
-
-// Node* nodtestv;
-
-// Node* nodvpp;
-// Node* nodppv;
-// Node* nodvmm;
-// Node* nodmmv;
diff --git a/src/cmd/cc/godefs.c b/src/cmd/cc/godefs.c
deleted file mode 100644
index d9f67f0ae..000000000
--- a/src/cmd/cc/godefs.c
+++ /dev/null
@@ -1,367 +0,0 @@
-// cmd/cc/godefs.cc
-//
-// derived from pickle.cc which itself was derived from acid.cc.
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009-2011 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-
-static int upper;
-
-static char *kwd[] =
-{
- "_bool",
- "_break",
- "_byte",
- "_case",
- "_chan",
- "_complex128",
- "_complex64",
- "_const",
- "_continue",
- "_default",
- "_defer",
- "_else",
- "_fallthrough",
- "_false",
- "_float32",
- "_float64",
- "_for",
- "_func",
- "_go",
- "_goto",
- "_if",
- "_import",
- "_int",
- "_int16",
- "_int32",
- "_int64",
- "_int8",
- "_interface",
- "_intptr",
- "_map",
- "_package",
- "_panic",
- "_range",
- "_return",
- "_select",
- "_string",
- "_struct",
- "_switch",
- "_true",
- "_type",
- "_uint",
- "_uint16",
- "_uint32",
- "_uint64",
- "_uint8",
- "_uintptr",
- "_var",
-};
-
-static char*
-pmap(char *s)
-{
- int i, bot, top, mid;
-
- bot = -1;
- top = nelem(kwd);
- while(top - bot > 1){
- mid = (bot + top) / 2;
- i = strcmp(kwd[mid]+1, s);
- if(i == 0)
- return kwd[mid];
- if(i < 0)
- bot = mid;
- else
- top = mid;
- }
-
- return s;
-}
-
-
-int
-Uconv(Fmt *fp)
-{
- char str[STRINGSZ+1];
- char *s, *n;
- int i;
-
- str[0] = 0;
- s = va_arg(fp->args, char*);
-
- // strip package name
- n = strrchr(s, '.');
- if(n != nil)
- s = n + 1;
-
- if(s && *s) {
- if(upper)
- str[0] = toupper((uchar)*s);
- else
- str[0] = tolower((uchar)*s);
- for(i = 1; i < STRINGSZ && s[i] != 0; i++)
- str[i] = tolower((uchar)s[i]);
- str[i] = 0;
- }
-
- return fmtstrcpy(fp, pmap(str));
-}
-
-
-static Sym*
-findsue(Type *t)
-{
- int h;
- Sym *s;
-
- if(t != T)
- for(h=0; h<nelem(hash); h++)
- for(s = hash[h]; s != S; s = s->link)
- if(s->suetag && s->suetag->link == t)
- return s;
- return 0;
-}
-
-static void
-printtypename(Type *t)
-{
- Sym *s;
- int w;
- char *n;
-
- for( ; t != nil; t = t->link) {
- switch(t->etype) {
- case TIND:
- // Special handling of *void.
- if(t->link != nil && t->link->etype==TVOID) {
- Bprint(&outbuf, "unsafe.Pointer");
- return;
- }
- // *func == func
- if(t->link != nil && t->link->etype==TFUNC)
- continue;
- Bprint(&outbuf, "*");
- continue;
- case TARRAY:
- w = t->width;
- if(t->link && t->link->width)
- w /= t->link->width;
- Bprint(&outbuf, "[%d]", w);
- continue;
- }
- break;
- }
-
- if(t == nil) {
- Bprint(&outbuf, "bad // should not happen");
- return;
- }
-
- switch(t->etype) {
- case TINT:
- case TUINT:
- case TCHAR:
- case TUCHAR:
- case TSHORT:
- case TUSHORT:
- case TLONG:
- case TULONG:
- case TVLONG:
- case TUVLONG:
- case TFLOAT:
- case TDOUBLE:
- // All names used in the runtime code should be typedefs.
- if(t->tag != nil) {
- if(strcmp(t->tag->name, "intgo") == 0)
- Bprint(&outbuf, "int");
- else if(strcmp(t->tag->name, "uintgo") == 0)
- Bprint(&outbuf, "uint");
- else
- Bprint(&outbuf, "%s", t->tag->name);
- } else
- Bprint(&outbuf, "C.%T", t);
- break;
- case TUNION:
- case TSTRUCT:
- s = findsue(t->link);
- n = "bad";
- if(s != S)
- n = s->name;
- else if(t->tag)
- n = t->tag->name;
- if(strcmp(n, "String") == 0)
- Bprint(&outbuf, "string");
- else if(strcmp(n, "Slice") == 0)
- Bprint(&outbuf, "[]byte");
- else if(strcmp(n, "Eface") == 0)
- Bprint(&outbuf, "interface{}");
- else
- Bprint(&outbuf, "%U", n);
- break;
- case TFUNC:
- // There's no equivalent to a C function in the Go world.
- Bprint(&outbuf, "unsafe.Pointer");
- break;
- case TDOT:
- Bprint(&outbuf, "...interface{}");
- break;
- default:
- Bprint(&outbuf, " weird<%T>", t);
- }
-}
-
-static int
-dontrun(void)
-{
- Io *i;
- int n;
-
- if(!debug['q'] && !debug['Q'])
- return 1;
- if(debug['q'] + debug['Q'] > 1) {
- n = 0;
- for(i=iostack; i; i=i->link)
- n++;
- if(n > 1)
- return 1;
- }
-
- upper = debug['Q'];
- return 0;
-}
-
-void
-godeftype(Type *t)
-{
- Sym *s;
- Type *l;
- int gotone;
-
- if(dontrun())
- return;
-
- switch(t->etype) {
- case TUNION:
- case TSTRUCT:
- s = findsue(t->link);
- if(s == S) {
- Bprint(&outbuf, "/* can't find %T */\n\n", t);
- return;
- }
-
- gotone = 0; // for unions, take first member of size equal to union
- Bprint(&outbuf, "type %U struct {\n", s->name);
- for(l = t->link; l != T; l = l->down) {
- Bprint(&outbuf, "\t");
- if(t->etype == TUNION) {
- if(!gotone && l->width == t->width)
- gotone = 1;
- else
- Bprint(&outbuf, "// (union)\t");
- }
- if(l->sym != nil) // not anonymous field
- Bprint(&outbuf, "%U\t", l->sym->name);
- printtypename(l);
- Bprint(&outbuf, "\n");
- }
- Bprint(&outbuf, "}\n\n");
- break;
-
- default:
- Bprint(&outbuf, "/* %T */\n\n", t);
- break;
- }
-}
-
-void
-godefvar(Sym *s)
-{
- Type *t, *t1;
- char n;
-
- if(dontrun())
- return;
-
- t = s->type;
- if(t == nil)
- return;
-
- switch(t->etype) {
- case TENUM:
- if(!typefd[t->etype])
- Bprint(&outbuf, "const %s = %lld\n", s->name, s->vconst);
- else
- Bprint(&outbuf, "const %s = %f\n;", s->name, s->fconst);
- break;
-
- case TFUNC:
- Bprint(&outbuf, "func %U(", s->name);
- n = 'a';
- for(t1 = t->down; t1 != T; t1 = t1->down) {
- if(t1->etype == TVOID)
- break;
- if(t1 != t->down)
- Bprint(&outbuf, ", ");
- Bprint(&outbuf, "%c ", n++);
- printtypename(t1);
- }
- Bprint(&outbuf, ")");
- if(t->link && t->link->etype != TVOID) {
- Bprint(&outbuf, " ");
- printtypename(t->link);
- }
- Bprint(&outbuf, "\n");
- break;
-
- default:
- switch(s->class) {
- case CTYPEDEF:
- if(!typesu[t->etype]) {
- Bprint(&outbuf, "// type %U\t", s->name);
- printtypename(t);
- Bprint(&outbuf, "\n");
- }
- break;
- case CSTATIC:
- case CEXTERN:
- case CGLOBL:
- if(strchr(s->name, '$') != nil)
- break;
- if(strncmp(s->name, "go.weak.", 8) == 0)
- break;
- Bprint(&outbuf, "var %U\t", s->name);
- printtypename(t);
- Bprint(&outbuf, "\n");
- break;
- }
- break;
- }
-}
diff --git a/src/cmd/cc/lex.c b/src/cmd/cc/lex.c
deleted file mode 100644
index 7c9f718c0..000000000
--- a/src/cmd/cc/lex.c
+++ /dev/null
@@ -1,1593 +0,0 @@
-// Inferno utils/cc/lex.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/lex.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-#include "y.tab.h"
-#include "../ld/textflag.h"
-
-#ifndef CPP
-#define CPP "cpp"
-#endif
-
-int
-systemtype(int sys)
-{
-#ifdef _WIN32
- return sys&Windows;
-#else
- return sys&Plan9;
-#endif
-}
-
-int
-pathchar(void)
-{
- return '/';
-}
-
-/*
- * known debug flags
- * -a acid declaration output
- * -A !B
- * -B non ANSI
- * -d print declarations
- * -D name define
- * -F format specification check
- * -G print pgen stuff
- * -g print cgen trees
- * -i print initialization
- * -I path include
- * -l generate little-endian code
- * -L print every NAME symbol
- * -M constant multiplication
- * -m print add/sub/mul trees
- * -n print acid or godefs to file (%.c=%.acid) (with -a or -aa)
- * -o file output file
- * -p use standard cpp ANSI preprocessor (not on windows)
- * -p something with peepholes
- * -q print equivalent Go code for variables and types (lower-case identifiers)
- * -Q print equivalent Go code for variables and types (upper-case identifiers)
- * -r print registerization
- * -s print structure offsets (with -a or -aa)
- * -S print assembly
- * -t print type trees
- * -V enable void* conversion warnings
- * -v verbose printing
- * -w print warnings
- * -X abort on error
- * -. Inhibit search for includes in source directory
- */
-
-void
-usage(void)
-{
- print("usage: %cc [options] file.c...\n", thechar);
- flagprint(1);
- errorexit();
-}
-
-void
-dospim(void)
-{
- thechar = '0';
- thestring = "spim";
-}
-
-char **defs;
-int ndef;
-
-void
-dodef(char *p)
-{
- if(ndef%8 == 0)
- defs = allocn(defs, ndef*sizeof(char *),
- 8*sizeof(char *));
- defs[ndef++] = p;
- dodefine(p);
-}
-
-void
-main(int argc, char *argv[])
-{
- int c;
- char *p;
-
- // Allow GOARCH=thestring or GOARCH=thestringsuffix,
- // but not other values.
- p = getgoarch();
- if(strncmp(p, thestring, strlen(thestring)) != 0)
- sysfatal("cannot use %cc with GOARCH=%s", thechar, p);
- if(strcmp(p, "amd64p32") == 0) // must be before cinit
- ewidth[TIND] = 4;
-
- nacl = strcmp(getgoos(), "nacl") == 0;
- if(nacl)
- flag_largemodel = 1;
-
- quotefmtinstall(); // before cinit, which overrides %Q
-
- linkarchinit();
- ctxt = linknew(thelinkarch);
- ctxt->diag = yyerror;
- ctxt->bso = &bstdout;
- Binit(&bstdout, 1, OWRITE);
-
- ensuresymb(NSYMB);
- memset(debug, 0, sizeof(debug));
- tinit();
- cinit();
- ginit();
- arginit();
-
- fmtstrinit(&pragcgobuf);
-
- tufield = simplet((1L<<tfield->etype) | BUNSIGNED);
- ndef = 0;
- defs = nil;
- outfile = 0;
- setinclude(".");
-
- flagcount("+", "pass -+ to preprocessor", &debug['+']);
- flagcount(".", "pass -. to preprocessor", &debug['.']);
- flagcount("<", "debug shift", &debug['<']);
- flagcount("A", "debug alignment", &debug['A']);
- flagcount("B", "allow pre-ANSI code", &debug['B']);
- if(thechar == '5')
- flagcount("C", "debug constant propagation", &debug['C']);
- flagfn1("D", "name[=value]: add #define", dodef);
- flagcount("F", "enable print format checks", &debug['F']);
- if(thechar == '5')
- flagcount("H", "debug shift propagation", &debug['H']);
- flagfn1("I", "dir: add dir to include path", setinclude);
- flagcount("L", "debug lexer", &debug['L']);
- flagcount("M", "debug move generation", &debug['M']);
- flagcount("N", "disable optimizations", &debug['N']);
- flagcount("P", "debug peephole optimizer", &debug['P']);
- flagcount("Q", "print exported Go definitions", &debug['Q']);
- flagcount("R", "debug register optimizer", &debug['R']);
- flagcount("S", "print assembly", &debug['S']);
- flagcount("T", "enable type signatures", &debug['T']);
- flagcount("V", "enable pointer type checks", &debug['V']);
- flagcount("W", "debug switch generation", &debug['W']);
- flagcount("X", "abort on error", &debug['X']);
- flagcount("Y", "debug index generation", &debug['Y']);
- flagcount("Z", "skip code generation", &debug['Z']);
- flagcount("a", "print acid definitions", &debug['a']);
- flagcount("c", "debug constant evaluation", &debug['c']);
- flagcount("d", "debug declarations", &debug['d']);
- flagcount("e", "debug macro expansion", &debug['e']);
- flagcount("f", "debug pragmas", &debug['f']);
- flagcount("g", "debug code generation", &debug['g']);
- flagcount("i", "debug initialization", &debug['i']);
- if(thechar == 'v')
- flagfn0("l", "little-endian mips mode", dospim);
- flagcount("m", "debug multiplication", &debug['m']);
- flagcount("n", "print acid/Go to file, not stdout", &debug['n']);
- flagstr("o", "file: set output file", &outfile);
- flagcount("p", "invoke C preprocessor", &debug['p']);
- flagcount("q", "print Go definitions", &debug['q']);
- flagcount("s", "print #define assembly offsets", &debug['s']);
- flagcount("t", "debug code generation", &debug['t']);
- flagstr("trimpath", "prefix: remove prefix from recorded source file paths", &ctxt->trimpath);
- flagcount("w", "enable warnings", &debug['w']);
- flagcount("v", "increase debug verbosity", &debug['v']);
- if(thechar == '6')
- flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel);
-
- flagparse(&argc, &argv, usage);
- ctxt->debugasm = debug['S'];
- ctxt->debugvlog = debug['v'];
-
- if(argc < 1 && outfile == 0)
- usage();
-
- if(argc > 1){
- print("can't compile multiple files\n");
- errorexit();
- }
-
- if(argc == 0)
- c = compile("stdin", defs, ndef);
- else
- c = compile(argv[0], defs, ndef);
-
- Bflush(&bstdout);
- if(c)
- errorexit();
- exits(0);
-}
-
-int
-compile(char *file, char **defs, int ndef)
-{
- char *ofile;
- char *p, **av, opt[256];
- int i, c, fd[2];
- static int first = 1;
-
- ofile = alloc(strlen(file)+10);
- strcpy(ofile, file);
- p = utfrrune(ofile, pathchar());
- if(p) {
- *p++ = 0;
- if(!debug['.'])
- include[0] = strdup(ofile);
- } else
- p = ofile;
-
- if(outfile == 0) {
- outfile = p;
- if(outfile) {
- if(p = utfrrune(outfile, '.'))
- if(p[1] == 'c' && p[2] == 0)
- p[0] = 0;
- p = utfrune(outfile, 0);
- if(debug['a'] && debug['n'])
- strcat(p, ".acid");
- else if((debug['q'] || debug['Q']) && debug['n'])
- strcat(p, ".go");
- else {
- p[0] = '.';
- p[1] = thechar;
- p[2] = 0;
- }
- } else
- outfile = "/dev/null";
- }
-
- if (first)
- Binit(&diagbuf, 1, OWRITE);
- /*
- * if we're writing acid to standard output, don't keep scratching
- * outbuf.
- */
- if((debug['a'] || debug['q'] || debug['Q']) && !debug['n']) {
- if (first) {
- outfile = 0;
- Binit(&outbuf, dup(1, -1), OWRITE);
- dup(2, 1);
- }
- } else {
- c = create(outfile, OWRITE, 0664);
- if(c < 0) {
- diag(Z, "cannot open %s - %r", outfile);
- outfile = 0;
- errorexit();
- }
- Binit(&outbuf, c, OWRITE);
- outfile = strdup(outfile);
- }
- newio();
- first = 0;
-
- /* Use an ANSI preprocessor */
- if(debug['p']) {
- if(systemtype(Windows)) {
- diag(Z, "-p option not supported on windows");
- errorexit();
- }
- if(access(file, AREAD) < 0) {
- diag(Z, "%s does not exist", file);
- errorexit();
- }
- if(pipe(fd) < 0) {
- diag(Z, "pipe failed");
- errorexit();
- }
- switch(fork()) {
- case -1:
- diag(Z, "fork failed");
- errorexit();
- case 0:
- close(fd[0]);
- dup(fd[1], 1);
- close(fd[1]);
- av = alloc((ndef+ninclude+5)*sizeof(char *));
- av[0] = CPP;
- i = 1;
- if(debug['.']){
- sprint(opt, "-.");
- av[i++] = strdup(opt);
- }
- if(debug['+']) {
- sprint(opt, "-+");
- av[i++] = strdup(opt);
- }
- for(c = 0; c < ndef; c++)
- av[i++] = smprint("-D%s", defs[c]);
- for(c = 0; c < ninclude; c++)
- av[i++] = smprint("-I%s", include[c]);
- if(strcmp(file, "stdin") != 0)
- av[i++] = file;
- av[i] = 0;
- if(debug['p'] > 1) {
- for(c = 0; c < i; c++)
- fprint(2, "%s ", av[c]);
- fprint(2, "\n");
- }
- exec(av[0], av);
- fprint(2, "can't exec C preprocessor %s: %r\n", CPP);
- errorexit();
- default:
- close(fd[1]);
- newfile(file, fd[0]);
- break;
- }
- } else {
- if(strcmp(file, "stdin") == 0)
- newfile(file, 0);
- else
- newfile(file, -1);
- }
- yyparse();
- if(!debug['a'] && !debug['q'] && !debug['Q'])
- gclean();
- return nerrors;
-}
-
-void
-errorexit(void)
-{
- Bflush(&bstdout);
- if(outfile)
- remove(outfile);
- exits("error");
-}
-
-void
-pushio(void)
-{
- Io *i;
-
- i = iostack;
- if(i == I) {
- yyerror("botch in pushio");
- errorexit();
- }
- i->p = fi.p;
- i->c = fi.c;
-}
-
-void
-newio(void)
-{
- Io *i;
- static int pushdepth = 0;
-
- i = iofree;
- if(i == I) {
- pushdepth++;
- if(pushdepth > 1000) {
- yyerror("macro/io expansion too deep");
- errorexit();
- }
- i = alloc(sizeof(*i));
- } else
- iofree = i->link;
- i->c = 0;
- i->f = -1;
- ionext = i;
-}
-
-void
-newfile(char *s, int f)
-{
- Io *i;
-
- if(debug['e'])
- print("%L: %s\n", lineno, s);
-
- i = ionext;
- i->link = iostack;
- iostack = i;
- i->f = f;
- if(f < 0)
- i->f = open(s, 0);
- if(i->f < 0) {
- yyerror("%cc: %r: %s", thechar, s);
- errorexit();
- }
- fi.c = 0;
- linklinehist(ctxt, lineno, s, 0);
-}
-
-Sym*
-slookup(char *s)
-{
- ensuresymb(strlen(s));
- strcpy(symb, s);
- return lookup();
-}
-
-Sym*
-lookup(void)
-{
- Sym *s;
- uint32 h;
- char *p;
- int c, n;
- char *r, *w;
-
- if((uchar)symb[0] == 0xc2 && (uchar)symb[1] == 0xb7) {
- // turn leading · into ""·
- h = strlen(symb);
- ensuresymb(h+2);
- memmove(symb+2, symb, h+1);
- symb[0] = '"';
- symb[1] = '"';
- }
-
- for(r=w=symb; *r; r++) {
- // turn · (U+00B7) into .
- // turn ∕ (U+2215) into /
- if((uchar)*r == 0xc2 && (uchar)*(r+1) == 0xb7) {
- *w++ = '.';
- r++;
- }else if((uchar)*r == 0xe2 && (uchar)*(r+1) == 0x88 && (uchar)*(r+2) == 0x95) {
- *w++ = '/';
- r++;
- r++;
- }else
- *w++ = *r;
- }
- *w = '\0';
-
- h = 0;
- for(p=symb; *p;) {
- h = h * 3;
- h += *p++;
- }
- n = (p - symb) + 1;
- h &= 0xffffff;
- h %= NHASH;
- c = symb[0];
- for(s = hash[h]; s != S; s = s->link) {
- if(s->name[0] != c)
- continue;
- if(strcmp(s->name, symb) == 0)
- return s;
- }
- s = alloc(sizeof(*s));
- s->name = alloc(n);
- memmove(s->name, symb, n);
- s->link = hash[h];
- hash[h] = s;
- syminit(s);
-
- return s;
-}
-
-void
-syminit(Sym *s)
-{
- s->lexical = LNAME;
- s->block = 0;
- s->offset = 0;
- s->type = T;
- s->suetag = T;
- s->class = CXXX;
- s->aused = 0;
- s->sig = SIGNONE;
-}
-
-#define EOF (-1)
-#define IGN (-2)
-#define ESC (1<<20)
-#define GETC() ((--fi.c < 0)? filbuf(): (*fi.p++ & 0xff))
-
-enum
-{
- Numdec = 1<<0,
- Numlong = 1<<1,
- Numuns = 1<<2,
- Numvlong = 1<<3,
- Numflt = 1<<4,
-};
-
-int32
-yylex(void)
-{
- vlong vv;
- int32 c, c1, t;
- char *cp;
- Rune rune;
- Sym *s;
-
- if(peekc != IGN) {
- c = peekc;
- peekc = IGN;
- goto l1;
- }
-l0:
- c = GETC();
-
-l1:
- if(c >= Runeself) {
- /*
- * extension --
- * all multibyte runes are alpha
- */
- cp = symb;
- goto talph;
- }
- if(isspace(c)) {
- if(c == '\n')
- lineno++;
- goto l0;
- }
- if(isalpha(c)) {
- cp = symb;
- if(c != 'L')
- goto talph;
- *cp++ = c;
- c = GETC();
- if(c == '\'') {
- /* L'x' */
- c = escchar('\'', 1, 0);
- if(c == EOF)
- c = '\'';
- c1 = escchar('\'', 1, 0);
- if(c1 != EOF) {
- yyerror("missing '");
- peekc = c1;
- }
- yylval.vval = convvtox(c, TRUNE);
- return LUCONST;
- }
- if(c == '"') {
- goto caselq;
- }
- goto talph;
- }
- if(isdigit(c))
- goto tnum;
- switch(c)
- {
-
- case EOF:
- peekc = EOF;
- return -1;
-
- case '_':
- cp = symb;
- goto talph;
-
- case '#':
- domacro();
- goto l0;
-
- case '.':
- c1 = GETC();
- if(isdigit(c1)) {
- cp = symb;
- *cp++ = c;
- c = c1;
- c1 = 0;
- goto casedot;
- }
- break;
-
- case '"':
- strcpy(symb, "\"<string>\"");
- cp = alloc(0);
- c1 = 0;
-
- /* "..." */
- for(;;) {
- c = escchar('"', 0, 1);
- if(c == EOF)
- break;
- if(c & ESC) {
- cp = allocn(cp, c1, 1);
- cp[c1++] = c;
- } else {
- rune = c;
- c = runelen(rune);
- cp = allocn(cp, c1, c);
- runetochar(cp+c1, &rune);
- c1 += c;
- }
- }
- yylval.sval.l = c1;
- do {
- cp = allocn(cp, c1, 1);
- cp[c1++] = 0;
- } while(c1 & MAXALIGN);
- yylval.sval.s = cp;
- return LSTRING;
-
- caselq:
- /* L"..." */
- strcpy(symb, "\"L<string>\"");
- cp = alloc(0);
- c1 = 0;
- for(;;) {
- c = escchar('"', 1, 0);
- if(c == EOF)
- break;
- cp = allocn(cp, c1, sizeof(TRune));
- *(TRune*)(cp + c1) = c;
- c1 += sizeof(TRune);
- }
- yylval.sval.l = c1;
- do {
- cp = allocn(cp, c1, sizeof(TRune));
- *(TRune*)(cp + c1) = 0;
- c1 += sizeof(TRune);
- } while(c1 & MAXALIGN);
- yylval.sval.s = cp;
- return LLSTRING;
-
- case '\'':
- /* '.' */
- c = escchar('\'', 0, 0);
- if(c == EOF)
- c = '\'';
- c1 = escchar('\'', 0, 0);
- if(c1 != EOF) {
- yyerror("missing '");
- peekc = c1;
- }
- vv = c;
- yylval.vval = convvtox(vv, TUCHAR);
- if(yylval.vval != vv)
- yyerror("overflow in character constant: 0x%x", c);
- else
- if(c & 0x80){
- nearln = lineno;
- warn(Z, "sign-extended character constant");
- }
- yylval.vval = convvtox(vv, TCHAR);
- return LCONST;
-
- case '/':
- c1 = GETC();
- if(c1 == '*') {
- for(;;) {
- c = getr();
- while(c == '*') {
- c = getr();
- if(c == '/')
- goto l0;
- }
- if(c == EOF) {
- yyerror("eof in comment");
- errorexit();
- }
- }
- }
- if(c1 == '/') {
- for(;;) {
- c = getr();
- if(c == '\n')
- goto l0;
- if(c == EOF) {
- yyerror("eof in comment");
- errorexit();
- }
- }
- }
- if(c1 == '=')
- return LDVE;
- break;
-
- case '*':
- c1 = GETC();
- if(c1 == '=')
- return LMLE;
- break;
-
- case '%':
- c1 = GETC();
- if(c1 == '=')
- return LMDE;
- break;
-
- case '+':
- c1 = GETC();
- if(c1 == '+')
- return LPP;
- if(c1 == '=')
- return LPE;
- break;
-
- case '-':
- c1 = GETC();
- if(c1 == '-')
- return LMM;
- if(c1 == '=')
- return LME;
- if(c1 == '>')
- return LMG;
- break;
-
- case '>':
- c1 = GETC();
- if(c1 == '>') {
- c = LRSH;
- c1 = GETC();
- if(c1 == '=')
- return LRSHE;
- break;
- }
- if(c1 == '=')
- return LGE;
- break;
-
- case '<':
- c1 = GETC();
- if(c1 == '<') {
- c = LLSH;
- c1 = GETC();
- if(c1 == '=')
- return LLSHE;
- break;
- }
- if(c1 == '=')
- return LLE;
- break;
-
- case '=':
- c1 = GETC();
- if(c1 == '=')
- return LEQ;
- break;
-
- case '!':
- c1 = GETC();
- if(c1 == '=')
- return LNE;
- break;
-
- case '&':
- c1 = GETC();
- if(c1 == '&')
- return LANDAND;
- if(c1 == '=')
- return LANDE;
- break;
-
- case '|':
- c1 = GETC();
- if(c1 == '|')
- return LOROR;
- if(c1 == '=')
- return LORE;
- break;
-
- case '^':
- c1 = GETC();
- if(c1 == '=')
- return LXORE;
- break;
-
- default:
- return c;
- }
- peekc = c1;
- return c;
-
-talph:
- /*
- * cp is set to symb and some
- * prefix has been stored
- */
- for(;;) {
- if(c >= Runeself) {
- for(c1=0;;) {
- cp[c1++] = c;
- if(fullrune(cp, c1))
- break;
- c = GETC();
- }
- cp += c1;
- c = GETC();
- continue;
- }
- if(!isalnum(c) && c != '_')
- break;
- *cp++ = c;
- c = GETC();
- }
- *cp = 0;
- if(debug['L'])
- print("%L: %s\n", lineno, symb);
- peekc = c;
- s = lookup();
- if(s->macro) {
- newio();
- cp = ionext->b;
- macexpand(s, cp);
- pushio();
- ionext->link = iostack;
- iostack = ionext;
- fi.p = cp;
- fi.c = strlen(cp);
- if(peekc != IGN) {
- cp[fi.c++] = peekc;
- cp[fi.c] = 0;
- peekc = IGN;
- }
- goto l0;
- }
- yylval.sym = s;
- if(s->class == CTYPEDEF || s->class == CTYPESTR)
- return LTYPE;
- return s->lexical;
-
-tnum:
- c1 = 0;
- cp = symb;
- if(c != '0') {
- c1 |= Numdec;
- for(;;) {
- *cp++ = c;
- c = GETC();
- if(isdigit(c))
- continue;
- goto dc;
- }
- }
- *cp++ = c;
- c = GETC();
- if(c == 'x' || c == 'X')
- for(;;) {
- *cp++ = c;
- c = GETC();
- if(isdigit(c))
- continue;
- if(c >= 'a' && c <= 'f')
- continue;
- if(c >= 'A' && c <= 'F')
- continue;
- if(cp == symb+2)
- yyerror("malformed hex constant");
- goto ncu;
- }
- if(c < '0' || c > '7')
- goto dc;
- for(;;) {
- if(c >= '0' && c <= '7') {
- *cp++ = c;
- c = GETC();
- continue;
- }
- goto ncu;
- }
-
-dc:
- if(c == '.')
- goto casedot;
- if(c == 'e' || c == 'E')
- goto casee;
-
-ncu:
- if((c == 'U' || c == 'u') && !(c1 & Numuns)) {
- c = GETC();
- c1 |= Numuns;
- goto ncu;
- }
- if((c == 'L' || c == 'l') && !(c1 & Numvlong)) {
- c = GETC();
- if(c1 & Numlong)
- c1 |= Numvlong;
- c1 |= Numlong;
- goto ncu;
- }
- *cp = 0;
- peekc = c;
- if(mpatov(symb, &yylval.vval))
- yyerror("overflow in constant");
-
- vv = yylval.vval;
- if(c1 & Numvlong) {
- if((c1 & Numuns) || convvtox(vv, TVLONG) < 0) {
- c = LUVLCONST;
- t = TUVLONG;
- goto nret;
- }
- c = LVLCONST;
- t = TVLONG;
- goto nret;
- }
- if(c1 & Numlong) {
- if((c1 & Numuns) || convvtox(vv, TLONG) < 0) {
- c = LULCONST;
- t = TULONG;
- goto nret;
- }
- c = LLCONST;
- t = TLONG;
- goto nret;
- }
- if((c1 & Numuns) || convvtox(vv, TINT) < 0) {
- c = LUCONST;
- t = TUINT;
- goto nret;
- }
- c = LCONST;
- t = TINT;
- goto nret;
-
-nret:
- yylval.vval = convvtox(vv, t);
- if(yylval.vval != vv){
- nearln = lineno;
- warn(Z, "truncated constant: %T %s", types[t], symb);
- }
- return c;
-
-casedot:
- for(;;) {
- *cp++ = c;
- c = GETC();
- if(!isdigit(c))
- break;
- }
- if(c != 'e' && c != 'E')
- goto caseout;
-
-casee:
- *cp++ = 'e';
- c = GETC();
- if(c == '+' || c == '-') {
- *cp++ = c;
- c = GETC();
- }
- if(!isdigit(c))
- yyerror("malformed fp constant exponent");
- while(isdigit(c)) {
- *cp++ = c;
- c = GETC();
- }
-
-caseout:
- if(c == 'L' || c == 'l') {
- c = GETC();
- c1 |= Numlong;
- } else
- if(c == 'F' || c == 'f') {
- c = GETC();
- c1 |= Numflt;
- }
- *cp = 0;
- peekc = c;
- yylval.dval = strtod(symb, nil);
- if(isInf(yylval.dval, 1) || isInf(yylval.dval, -1)) {
- yyerror("overflow in float constant");
- yylval.dval = 0;
- }
- if(c1 & Numflt)
- return LFCONST;
- return LDCONST;
-}
-
-/*
- * convert a string, s, to vlong in *v
- * return conversion overflow.
- * required syntax is [0[x]]d*
- */
-int
-mpatov(char *s, vlong *v)
-{
- vlong n, nn;
- int c;
-
- n = 0;
- c = *s;
- if(c == '0')
- goto oct;
- while(c = *s++) {
- if(c >= '0' && c <= '9')
- nn = n*10 + c-'0';
- else
- goto bad;
- if(n < 0 && nn >= 0)
- goto bad;
- n = nn;
- }
- goto out;
-
-oct:
- s++;
- c = *s;
- if(c == 'x' || c == 'X')
- goto hex;
- while(c = *s++) {
- if(c >= '0' || c <= '7')
- nn = n*8 + c-'0';
- else
- goto bad;
- if(n < 0 && nn >= 0)
- goto bad;
- n = nn;
- }
- goto out;
-
-hex:
- s++;
- while(c = *s++) {
- if(c >= '0' && c <= '9')
- c += 0-'0';
- else
- if(c >= 'a' && c <= 'f')
- c += 10-'a';
- else
- if(c >= 'A' && c <= 'F')
- c += 10-'A';
- else
- goto bad;
- nn = (uvlong)n*16 + c;
- if(n < 0 && nn >= 0)
- goto bad;
- n = nn;
- }
-out:
- *v = n;
- return 0;
-
-bad:
- *v = ~0;
- return 1;
-}
-
-int
-getc(void)
-{
- int c;
-
- if(peekc != IGN) {
- c = peekc;
- peekc = IGN;
- } else
- c = GETC();
- if(c == '\n')
- lineno++;
- if(c == EOF) {
- yyerror("End of file");
- errorexit();
- }
- return c;
-}
-
-int32
-getr(void)
-{
- int c, i;
- char str[UTFmax+1];
- Rune rune;
-
-
- c = getc();
- if(c < Runeself)
- return c;
- i = 0;
- str[i++] = c;
-
-loop:
- c = getc();
- str[i++] = c;
- if(!fullrune(str, i))
- goto loop;
- c = chartorune(&rune, str);
- if(rune == Runeerror && c == 1) {
- nearln = lineno;
- diag(Z, "illegal rune in string");
- for(c=0; c<i; c++)
- print(" %.2x", *(uchar*)(str+c));
- print("\n");
- }
- return rune;
-}
-
-int
-getnsc(void)
-{
- int c;
-
- if(peekc != IGN) {
- c = peekc;
- peekc = IGN;
- } else
- c = GETC();
- for(;;) {
- if(c >= Runeself || !isspace(c))
- return c;
- if(c == '\n') {
- lineno++;
- return c;
- }
- c = GETC();
- }
-}
-
-void
-unget(int c)
-{
-
- peekc = c;
- if(c == '\n')
- lineno--;
-}
-
-int32
-escchar(int32 e, int longflg, int escflg)
-{
- int32 c, l;
- int i;
-
-loop:
- c = getr();
- if(c == '\n') {
- yyerror("newline in string");
- return EOF;
- }
- if(c != '\\') {
- if(c == e)
- c = EOF;
- return c;
- }
- c = getr();
- if(c == 'x') {
- /*
- * note this is not ansi,
- * supposed to only accept 2 hex
- */
- i = 2;
- if(longflg)
- i = 6;
- l = 0;
- for(; i>0; i--) {
- c = getc();
- if(c >= '0' && c <= '9') {
- l = l*16 + c-'0';
- continue;
- }
- if(c >= 'a' && c <= 'f') {
- l = l*16 + c-'a' + 10;
- continue;
- }
- if(c >= 'A' && c <= 'F') {
- l = l*16 + c-'A' + 10;
- continue;
- }
- unget(c);
- break;
- }
- if(escflg)
- l |= ESC;
- return l;
- }
- if(c >= '0' && c <= '7') {
- /*
- * note this is not ansi,
- * supposed to only accept 3 oct
- */
- i = 2;
- if(longflg)
- i = 8;
- l = c - '0';
- for(; i>0; i--) {
- c = getc();
- if(c >= '0' && c <= '7') {
- l = l*8 + c-'0';
- continue;
- }
- unget(c);
- }
- if(escflg)
- l |= ESC;
- return l;
- }
- switch(c)
- {
- case '\n': goto loop;
- case 'n': return '\n';
- case 't': return '\t';
- case 'b': return '\b';
- case 'r': return '\r';
- case 'f': return '\f';
- case 'a': return '\a';
- case 'v': return '\v';
- }
- return c;
-}
-
-struct
-{
- char *name;
- ushort lexical;
- ushort type;
-} itab[] =
-{
- "auto", LAUTO, 0,
- "break", LBREAK, 0,
- "case", LCASE, 0,
- "char", LCHAR, TCHAR,
- "const", LCONSTNT, 0,
- "continue", LCONTINUE, 0,
- "default", LDEFAULT, 0,
- "do", LDO, 0,
- "double", LDOUBLE, TDOUBLE,
- "else", LELSE, 0,
- "enum", LENUM, 0,
- "extern", LEXTERN, 0,
- "float", LFLOAT, TFLOAT,
- "for", LFOR, 0,
- "goto", LGOTO, 0,
- "if", LIF, 0,
- "inline", LINLINE, 0,
- "int", LINT, TINT,
- "long", LLONG, TLONG,
- "PREFETCH", LPREFETCH, 0,
- "register", LREGISTER, 0,
- "restrict", LRESTRICT, 0,
- "return", LRETURN, 0,
- "SET", LSET, 0,
- "short", LSHORT, TSHORT,
- "signed", LSIGNED, 0,
- "signof", LSIGNOF, 0,
- "sizeof", LSIZEOF, 0,
- "static", LSTATIC, 0,
- "struct", LSTRUCT, 0,
- "switch", LSWITCH, 0,
- "typedef", LTYPEDEF, 0,
- "typestr", LTYPESTR, 0,
- "union", LUNION, 0,
- "unsigned", LUNSIGNED, 0,
- "USED", LUSED, 0,
- "void", LVOID, TVOID,
- "volatile", LVOLATILE, 0,
- "while", LWHILE, 0,
- 0
-};
-
-void
-cinit(void)
-{
- Sym *s;
- int i;
- Type *t;
-
- nerrors = 0;
- lineno = 1;
- iostack = I;
- iofree = I;
- peekc = IGN;
- nhunk = 0;
-
- types[TXXX] = T;
- types[TCHAR] = typ(TCHAR, T);
- types[TUCHAR] = typ(TUCHAR, T);
- types[TSHORT] = typ(TSHORT, T);
- types[TUSHORT] = typ(TUSHORT, T);
- types[TINT] = typ(TINT, T);
- types[TUINT] = typ(TUINT, T);
- types[TLONG] = typ(TLONG, T);
- types[TULONG] = typ(TULONG, T);
- types[TVLONG] = typ(TVLONG, T);
- types[TUVLONG] = typ(TUVLONG, T);
- types[TFLOAT] = typ(TFLOAT, T);
- types[TDOUBLE] = typ(TDOUBLE, T);
- types[TVOID] = typ(TVOID, T);
- types[TENUM] = typ(TENUM, T);
- types[TFUNC] = typ(TFUNC, types[TINT]);
- types[TIND] = typ(TIND, types[TVOID]);
-
- for(i=0; i<NHASH; i++)
- hash[i] = S;
- for(i=0; itab[i].name; i++) {
- s = slookup(itab[i].name);
- s->lexical = itab[i].lexical;
- if(itab[i].type != 0)
- s->type = types[itab[i].type];
- }
- blockno = 0;
- autobn = 0;
- autoffset = 0;
-
- t = typ(TARRAY, types[TCHAR]);
- t->width = 0;
- symstring = slookup(".string");
- symstring->class = CSTATIC;
- symstring->dataflag = NOPTR;
- symstring->type = t;
-
- t = typ(TARRAY, types[TCHAR]);
- t->width = 0;
-
- nodproto = new(OPROTO, Z, Z);
- dclstack = D;
-
- fmtinstall('O', Oconv);
- fmtinstall('T', Tconv);
- fmtinstall('F', FNconv);
- fmtinstall('L', Lconv);
- fmtinstall('Q', Qconv);
- fmtinstall('|', VBconv);
- fmtinstall('U', Uconv);
- fmtinstall('B', Bconv);
-}
-
-int
-filbuf(void)
-{
- Io *i;
-
-loop:
- i = iostack;
- if(i == I)
- return EOF;
- if(i->f < 0)
- goto pop;
- fi.c = read(i->f, i->b, BUFSIZ) - 1;
- if(fi.c < 0) {
- close(i->f);
- linklinehist(ctxt, lineno, nil, 0);
- goto pop;
- }
- fi.p = i->b + 1;
- return i->b[0] & 0xff;
-
-pop:
- iostack = i->link;
- i->link = iofree;
- iofree = i;
- i = iostack;
- if(i == I)
- return EOF;
- fi.p = i->p;
- fi.c = i->c;
- if(--fi.c < 0)
- goto loop;
- return *fi.p++ & 0xff;
-}
-
-int
-Oconv(Fmt *fp)
-{
- int a;
-
- a = va_arg(fp->args, int);
- if(a < OXXX || a > OEND)
- return fmtprint(fp, "***badO %d***", a);
-
- return fmtstrcpy(fp, onames[a]);
-}
-
-int
-Lconv(Fmt *fp)
-{
- return linklinefmt(ctxt, fp);
-}
-
-int
-Tconv(Fmt *fp)
-{
- char str[STRINGSZ+20], s[STRINGSZ+20];
- Type *t, *t1;
- int et;
- int32 n;
-
- str[0] = 0;
- for(t = va_arg(fp->args, Type*); t != T; t = t->link) {
- et = t->etype;
- if(str[0])
- strcat(str, " ");
- if(t->garb&~GINCOMPLETE) {
- sprint(s, "%s ", gnames[t->garb&~GINCOMPLETE]);
- if(strlen(str) + strlen(s) < STRINGSZ)
- strcat(str, s);
- }
- sprint(s, "%s", tnames[et]);
- if(strlen(str) + strlen(s) < STRINGSZ)
- strcat(str, s);
- if(et == TFUNC && (t1 = t->down)) {
- sprint(s, "(%T", t1);
- if(strlen(str) + strlen(s) < STRINGSZ)
- strcat(str, s);
- while(t1 = t1->down) {
- sprint(s, ", %T", t1);
- if(strlen(str) + strlen(s) < STRINGSZ)
- strcat(str, s);
- }
- if(strlen(str) + strlen(s) < STRINGSZ)
- strcat(str, ")");
- }
- if(et == TARRAY) {
- n = t->width;
- if(t->link && t->link->width)
- n /= t->link->width;
- sprint(s, "[%d]", n);
- if(strlen(str) + strlen(s) < STRINGSZ)
- strcat(str, s);
- }
- if(t->nbits) {
- sprint(s, " %d:%d", t->shift, t->nbits);
- if(strlen(str) + strlen(s) < STRINGSZ)
- strcat(str, s);
- }
- if(typesu[et]) {
- if(t->tag) {
- strcat(str, " ");
- if(strlen(str) + strlen(t->tag->name) < STRINGSZ)
- strcat(str, t->tag->name);
- } else
- strcat(str, " {}");
- break;
- }
- }
- return fmtstrcpy(fp, str);
-}
-
-int
-FNconv(Fmt *fp)
-{
- char *str;
- Node *n;
-
- n = va_arg(fp->args, Node*);
- str = "<indirect>";
- if(n != Z && (n->op == ONAME || n->op == ODOT || n->op == OELEM))
- str = n->sym->name;
- return fmtstrcpy(fp, str);
-}
-
-int
-Qconv(Fmt *fp)
-{
- char str[STRINGSZ+20], *s;
- int32 b;
- int i;
-
- str[0] = 0;
- for(b = va_arg(fp->args, int32); b;) {
- i = bitno(b);
- if(str[0])
- strcat(str, " ");
- s = qnames[i];
- if(strlen(str) + strlen(s) >= STRINGSZ)
- break;
- strcat(str, s);
- b &= ~(1L << i);
- }
- return fmtstrcpy(fp, str);
-}
-
-int
-VBconv(Fmt *fp)
-{
- char str[STRINGSZ];
- int i, n, t, pc;
-
- n = va_arg(fp->args, int);
- pc = 0; /* BUG: was printcol */
- i = 0;
- while(pc < n) {
- t = (pc+4) & ~3;
- if(t <= n) {
- str[i++] = '\t';
- pc = t;
- continue;
- }
- str[i++] = ' ';
- pc++;
- }
- str[i] = 0;
-
- return fmtstrcpy(fp, str);
-}
-
-int
-Bconv(Fmt *fp)
-{
- char str[STRINGSZ], ss[STRINGSZ], *s;
- Bits bits;
- int i;
-
- str[0] = 0;
- bits = va_arg(fp->args, Bits);
- while(bany(&bits)) {
- i = bnum(bits);
- if(str[0])
- strcat(str, " ");
- if(var[i].sym == nil) {
- sprint(ss, "$%lld", var[i].offset);
- s = ss;
- } else
- s = var[i].sym->name;
- if(strlen(str) + strlen(s) + 1 >= STRINGSZ)
- break;
- strcat(str, s);
- bits.b[i/32] &= ~(1L << (i%32));
- }
- return fmtstrcpy(fp, str);
-}
-
-void
-setinclude(char *p)
-{
- int i;
-
- if(*p != 0) {
- for(i=1; i < ninclude; i++)
- if(strcmp(p, include[i]) == 0)
- return;
-
- if(ninclude%8 == 0)
- include = allocn(include, ninclude*sizeof(char *),
- 8*sizeof(char *));
- include[ninclude++] = p;
- }
-}
-
-void*
-alloc(int32 n)
-{
- void *p;
-
- p = malloc(n);
- if(p == nil) {
- print("alloc out of mem\n");
- exits("alloc: out of mem");
- }
- memset(p, 0, n);
- return p;
-}
-
-void*
-allocn(void *p, int32 n, int32 d)
-{
- if(p == nil)
- return alloc(n+d);
- p = realloc(p, n+d);
- if(p == nil) {
- print("allocn out of mem\n");
- exits("allocn: out of mem");
- }
- if(d > 0)
- memset((char*)p+n, 0, d);
- return p;
-}
-
-void
-ensuresymb(int32 n)
-{
- if(symb == nil) {
- symb = alloc(NSYMB+1);
- nsymb = NSYMB;
- }
-
- if(n > nsymb) {
- symb = allocn(symb, nsymb, n+1-nsymb);
- nsymb = n;
- }
-}
diff --git a/src/cmd/cc/mac.c b/src/cmd/cc/mac.c
deleted file mode 100644
index b969662ae..000000000
--- a/src/cmd/cc/mac.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// Inferno utils/cc/mac.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/mac.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-
-#include "macbody"
diff --git a/src/cmd/cc/omachcap.c b/src/cmd/cc/omachcap.c
deleted file mode 100644
index f8fc1d88b..000000000
--- a/src/cmd/cc/omachcap.c
+++ /dev/null
@@ -1,40 +0,0 @@
-// Inferno utils/cc/machcap.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/machcap.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-
-/* default, like old cc */
-int
-machcap(Node *n)
-{
- USED(n);
- return 0;
-}
diff --git a/src/cmd/cc/pgen.c b/src/cmd/cc/pgen.c
deleted file mode 100644
index 54cf0c5e1..000000000
--- a/src/cmd/cc/pgen.c
+++ /dev/null
@@ -1,622 +0,0 @@
-// Inferno utils/6c/sgen.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/sgen.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-#include "../../runtime/funcdata.h"
-
-int
-hasdotdotdot(Type *t)
-{
- for(t=t->down; t!=T; t=t->down)
- if(t->etype == TDOT)
- return 1;
- return 0;
-}
-
-vlong
-argsize(int doret)
-{
- Type *t;
- int32 s;
-
-//print("t=%T\n", thisfn);
- s = 0;
- if(hasdotdotdot(thisfn))
- s = align(s, thisfn->link, Aarg0, nil);
- for(t=thisfn->down; t!=T; t=t->down) {
- switch(t->etype) {
- case TVOID:
- break;
- case TDOT:
- if((textflag & NOSPLIT) == 0)
- yyerror("function takes ... without textflag NOSPLIT");
- return ArgsSizeUnknown;
- default:
- s = align(s, t, Aarg1, nil);
- s = align(s, t, Aarg2, nil);
- break;
- }
-//print(" %d %T\n", s, t);
- }
- if(thechar == '6' || thechar == '9')
- s = (s+7) & ~7;
- else
- s = (s+3) & ~3;
- if(doret && thisfn->link->etype != TVOID) {
- s = align(s, thisfn->link, Aarg1, nil);
- s = align(s, thisfn->link, Aarg2, nil);
- if(thechar == '6' || thechar == '9')
- s = (s+7) & ~7;
- else
- s = (s+3) & ~3;
- }
- return s;
-}
-
-void
-codgen(Node *n, Node *nn)
-{
- Prog *sp;
- Node *n1, nod, nod1;
-
- cursafe = 0;
- curarg = 0;
- maxargsafe = 0;
-
- /*
- * isolate name
- */
- for(n1 = nn;; n1 = n1->left) {
- if(n1 == Z) {
- diag(nn, "can't find function name");
- return;
- }
- if(n1->op == ONAME)
- break;
- }
- nearln = nn->lineno;
-
- p = gtext(n1->sym, stkoff);
- p->from.sym->cfunc = 1;
- sp = p;
-
- /*
- * isolate first argument
- */
- if(REGARG >= 0) {
- if(typesuv[thisfn->link->etype]) {
- nod1 = *nodret->left;
- nodreg(&nod, &nod1, REGARG);
- gmove(&nod, &nod1);
- } else
- if(firstarg && typechlp[firstargtype->etype]) {
- nod1 = *nodret->left;
- nod1.sym = firstarg;
- nod1.type = firstargtype;
- nod1.xoffset = align(0, firstargtype, Aarg1, nil);
- nod1.etype = firstargtype->etype;
- nodreg(&nod, &nod1, REGARG);
- gmove(&nod, &nod1);
- }
- }
-
- canreach = 1;
- warnreach = 1;
- gen(n);
- if(canreach && thisfn->link->etype != TVOID)
- diag(Z, "no return at end of function: %s", n1->sym->name);
- noretval(3);
- gbranch(ORETURN);
-
- if(!debug['N'] || debug['R'] || debug['P'])
- regopt(sp);
-
- if(thechar=='6' || thechar=='7') /* [sic] */
- maxargsafe = xround(maxargsafe, 8);
- sp->to.offset += maxargsafe;
-}
-
-void
-supgen(Node *n)
-{
- int owarn;
- long spc;
- Prog *sp;
-
- if(n == Z)
- return;
- suppress++;
- owarn = warnreach;
- warnreach = 0;
- spc = pc;
- sp = lastp;
- gen(n);
- lastp = sp;
- pc = spc;
- sp->link = nil;
- suppress--;
- warnreach = owarn;
-}
-
-void
-gen(Node *n)
-{
- Node *l, nod, nod1;
- Prog *sp, *spc, *spb;
- Case *cn;
- long sbc, scc;
- int snbreak, sncontin;
- int f, o, oldreach;
-
-loop:
- if(n == Z)
- return;
- nearln = n->lineno;
- o = n->op;
- if(debug['G'])
- if(o != OLIST)
- print("%L %O\n", nearln, o);
-
- if(!canreach) {
- switch(o) {
- case OLABEL:
- case OCASE:
- case OLIST:
- case OBREAK:
- case OFOR:
- case OWHILE:
- case ODWHILE:
- /* all handled specially - see switch body below */
- break;
- default:
- if(warnreach) {
- warn(n, "unreachable code %O", o);
- warnreach = 0;
- }
- }
- }
-
- switch(o) {
-
- default:
- complex(n);
- cgen(n, Z);
- break;
-
- case OLIST:
- gen(n->left);
-
- rloop:
- n = n->right;
- goto loop;
-
- case ORETURN:
- canreach = 0;
- warnreach = !suppress;
- complex(n);
- if(n->type == T)
- break;
- l = n->left;
- if(l == Z) {
- noretval(3);
- gbranch(ORETURN);
- break;
- }
- if(typecmplx[n->type->etype] && !hasdotdotdot(thisfn)) {
- regret(&nod, n, thisfn, 2);
- sugen(l, &nod, n->type->width);
- noretval(3);
- gbranch(ORETURN);
- break;
- }
- if(typecmplx[n->type->etype]) {
- sugen(l, nodret, n->type->width);
- noretval(3);
- gbranch(ORETURN);
- break;
- }
- regret(&nod1, n, thisfn, 2);
- nod = nod1;
- if(nod.op != OREGISTER)
- regalloc(&nod, n, Z);
- cgen(l, &nod);
- if(nod1.op != OREGISTER)
- gmove(&nod, &nod1);
- regfree(&nod);
- if(typefd[n->type->etype])
- noretval(1);
- else
- noretval(2);
- gbranch(ORETURN);
- break;
-
- case OLABEL:
- canreach = 1;
- l = n->left;
- if(l) {
- l->pc = pc;
- if(l->label)
- patch(l->label, pc);
- }
- gbranch(OGOTO); /* prevent self reference in reg */
- patch(p, pc);
- goto rloop;
-
- case OGOTO:
- canreach = 0;
- warnreach = !suppress;
- n = n->left;
- if(n == Z)
- return;
- if(n->complex == 0) {
- diag(Z, "label undefined: %s", n->sym->name);
- return;
- }
- if(suppress)
- return;
- gbranch(OGOTO);
- if(n->pc) {
- patch(p, n->pc);
- return;
- }
- if(n->label)
- patch(n->label, pc-1);
- n->label = p;
- return;
-
- case OCASE:
- canreach = 1;
- l = n->left;
- if(cases == C)
- diag(n, "case/default outside a switch");
- if(l == Z) {
- newcase();
- cases->val = 0;
- cases->def = 1;
- cases->label = pc;
- cases->isv = 0;
- goto rloop;
- }
- complex(l);
- if(l->type == T)
- goto rloop;
- if(l->op == OCONST)
- if(typeword[l->type->etype] && l->type->etype != TIND) {
- newcase();
- cases->val = l->vconst;
- cases->def = 0;
- cases->label = pc;
- cases->isv = typev[l->type->etype];
- goto rloop;
- }
- diag(n, "case expression must be integer constant");
- goto rloop;
-
- case OSWITCH:
- l = n->left;
- complex(l);
- if(l->type == T)
- break;
- if(!typechlvp[l->type->etype] || l->type->etype == TIND) {
- diag(n, "switch expression must be integer");
- break;
- }
-
- gbranch(OGOTO); /* entry */
- sp = p;
-
- cn = cases;
- cases = C;
- newcase();
-
- sbc = breakpc;
- breakpc = pc;
- snbreak = nbreak;
- nbreak = 0;
- gbranch(OGOTO);
- spb = p;
-
- gen(n->right); /* body */
- if(canreach){
- gbranch(OGOTO);
- patch(p, breakpc);
- nbreak++;
- }
-
- patch(sp, pc);
- doswit(l);
- patch(spb, pc);
-
- cases = cn;
- breakpc = sbc;
- canreach = nbreak!=0;
- if(canreach == 0)
- warnreach = !suppress;
- nbreak = snbreak;
- break;
-
- case OWHILE:
- case ODWHILE:
- l = n->left;
- gbranch(OGOTO); /* entry */
- sp = p;
-
- scc = continpc;
- continpc = pc;
- gbranch(OGOTO);
- spc = p;
-
- sbc = breakpc;
- breakpc = pc;
- snbreak = nbreak;
- nbreak = 0;
- gbranch(OGOTO);
- spb = p;
-
- patch(spc, pc);
- if(n->op == OWHILE)
- patch(sp, pc);
- bcomplex(l, Z); /* test */
- patch(p, breakpc);
- if(l->op != OCONST || vconst(l) == 0)
- nbreak++;
-
- if(n->op == ODWHILE)
- patch(sp, pc);
- gen(n->right); /* body */
- gbranch(OGOTO);
- patch(p, continpc);
-
- patch(spb, pc);
- continpc = scc;
- breakpc = sbc;
- canreach = nbreak!=0;
- if(canreach == 0)
- warnreach = !suppress;
- nbreak = snbreak;
- break;
-
- case OFOR:
- l = n->left;
- if(!canreach && l->right->left && warnreach) {
- warn(n, "unreachable code FOR");
- warnreach = 0;
- }
- gen(l->right->left); /* init */
- gbranch(OGOTO); /* entry */
- sp = p;
-
- /*
- * if there are no incoming labels in the
- * body and the top's not reachable, warn
- */
- if(!canreach && warnreach && deadheads(n)) {
- warn(n, "unreachable code %O", o);
- warnreach = 0;
- }
-
- scc = continpc;
- continpc = pc;
- gbranch(OGOTO);
- spc = p;
-
- sbc = breakpc;
- breakpc = pc;
- snbreak = nbreak;
- nbreak = 0;
- sncontin = ncontin;
- ncontin = 0;
- gbranch(OGOTO);
- spb = p;
-
- patch(spc, pc);
- gen(l->right->right); /* inc */
- patch(sp, pc);
- if(l->left != Z) { /* test */
- bcomplex(l->left, Z);
- patch(p, breakpc);
- if(l->left->op != OCONST || vconst(l->left) == 0)
- nbreak++;
- }
- canreach = 1;
- gen(n->right); /* body */
- if(canreach){
- gbranch(OGOTO);
- patch(p, continpc);
- ncontin++;
- }
- if(!ncontin && l->right->right && warnreach) {
- warn(l->right->right, "unreachable FOR inc");
- warnreach = 0;
- }
-
- patch(spb, pc);
- continpc = scc;
- breakpc = sbc;
- canreach = nbreak!=0;
- if(canreach == 0)
- warnreach = !suppress;
- nbreak = snbreak;
- ncontin = sncontin;
- break;
-
- case OCONTINUE:
- if(continpc < 0) {
- diag(n, "continue not in a loop");
- break;
- }
- gbranch(OGOTO);
- patch(p, continpc);
- ncontin++;
- canreach = 0;
- warnreach = !suppress;
- break;
-
- case OBREAK:
- if(breakpc < 0) {
- diag(n, "break not in a loop");
- break;
- }
- /*
- * Don't complain about unreachable break statements.
- * There are breaks hidden in yacc's output and some people
- * write return; break; in their switch statements out of habit.
- * However, don't confuse the analysis by inserting an
- * unreachable reference to breakpc either.
- */
- if(!canreach)
- break;
- gbranch(OGOTO);
- patch(p, breakpc);
- nbreak++;
- canreach = 0;
- warnreach = !suppress;
- break;
-
- case OIF:
- l = n->left;
- if(bcomplex(l, n->right)) {
- if(typefd[l->type->etype])
- f = !l->fconst;
- else
- f = !l->vconst;
- if(debug['c'])
- print("%L const if %s\n", nearln, f ? "false" : "true");
- if(f) {
- canreach = 1;
- supgen(n->right->left);
- oldreach = canreach;
- canreach = 1;
- gen(n->right->right);
- /*
- * treat constant ifs as regular ifs for
- * reachability warnings.
- */
- if(!canreach && oldreach && debug['w'] < 2)
- warnreach = 0;
- }
- else {
- canreach = 1;
- gen(n->right->left);
- oldreach = canreach;
- canreach = 1;
- supgen(n->right->right);
- /*
- * treat constant ifs as regular ifs for
- * reachability warnings.
- */
- if(!oldreach && canreach && debug['w'] < 2)
- warnreach = 0;
- canreach = oldreach;
- }
- }
- else {
- sp = p;
- canreach = 1;
- if(n->right->left != Z)
- gen(n->right->left);
- oldreach = canreach;
- canreach = 1;
- if(n->right->right != Z) {
- gbranch(OGOTO);
- patch(sp, pc);
- sp = p;
- gen(n->right->right);
- }
- patch(sp, pc);
- canreach = canreach || oldreach;
- if(canreach == 0)
- warnreach = !suppress;
- }
- break;
-
- case OSET:
- case OUSED:
- case OPREFETCH:
- usedset(n->left, o);
- break;
- }
-}
-
-void
-usedset(Node *n, int o)
-{
- if(n->op == OLIST) {
- usedset(n->left, o);
- usedset(n->right, o);
- return;
- }
- complex(n);
- if(o == OPREFETCH) {
- gprefetch(n);
- return;
- }
- switch(n->op) {
- case OADDR: /* volatile */
- gins(ANOP, n, Z);
- break;
- case ONAME:
- if(o == OSET)
- gins(ANOP, Z, n);
- else
- gins(ANOP, n, Z);
- break;
- }
-}
-
-int
-bcomplex(Node *n, Node *c)
-{
- Node *b, nod;
-
- complex(n);
- if(n->type != T)
- if(tcompat(n, T, n->type, tnot))
- n->type = T;
- if(n->type == T) {
- gbranch(OGOTO);
- return 0;
- }
- if(c != Z && n->op == OCONST && deadheads(c))
- return 1;
- if(typev[n->type->etype] && machcap(Z)) {
- b = &nod;
- b->op = ONE;
- b->left = n;
- b->right = new(0, Z, Z);
- *b->right = *nodconst(0);
- b->right->type = n->type;
- b->type = types[TLONG];
- n = b;
- }
- bool64(n);
- boolgen(n, 1, Z);
- return 0;
-}
diff --git a/src/cmd/cc/pswt.c b/src/cmd/cc/pswt.c
deleted file mode 100644
index bae57c64d..000000000
--- a/src/cmd/cc/pswt.c
+++ /dev/null
@@ -1,140 +0,0 @@
-// Inferno utils/6c/swt.c
-// http://code.google.com/p/inferno-os/source/browse/utils/6c/swt.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include "gc.h"
-
-int
-swcmp(const void *a1, const void *a2)
-{
- C1 *p1, *p2;
-
- p1 = (C1*)a1;
- p2 = (C1*)a2;
- if(p1->val < p2->val)
- return -1;
- return p1->val > p2->val;
-}
-
-void
-doswit(Node *n)
-{
- Case *c;
- C1 *q, *iq;
- int32 def, nc, i, isv;
-
- def = 0;
- nc = 0;
- isv = 0;
- for(c = cases; c->link != C; c = c->link) {
- if(c->def) {
- if(def)
- diag(n, "more than one default in switch");
- def = c->label;
- continue;
- }
- isv |= c->isv;
- nc++;
- }
- if(isv && !typev[n->type->etype])
- warn(n, "32-bit switch expression with 64-bit case constant");
-
- iq = alloc(nc*sizeof(C1));
- q = iq;
- for(c = cases; c->link != C; c = c->link) {
- if(c->def)
- continue;
- q->label = c->label;
- if(isv)
- q->val = c->val;
- else
- q->val = (int32)c->val; /* cast ensures correct value for 32-bit switch on 64-bit architecture */
- q++;
- }
- qsort(iq, nc, sizeof(C1), swcmp);
- if(debug['W'])
- for(i=0; i<nc; i++)
- print("case %2d: = %.8llux\n", i, (vlong)iq[i].val);
- for(i=0; i<nc-1; i++)
- if(iq[i].val == iq[i+1].val)
- diag(n, "duplicate cases in switch %lld", (vlong)iq[i].val);
- if(def == 0) {
- def = breakpc;
- nbreak++;
- }
- swit1(iq, nc, def, n);
-}
-
-void
-newcase(void)
-{
- Case *c;
-
- c = alloc(sizeof(*c));
- c->link = cases;
- cases = c;
-}
-
-int32
-outlstring(TRune *s, int32 n)
-{
- char buf[sizeof(TRune)];
- uint c;
- int i;
- int32 r;
-
- if(suppress)
- return nstring;
- while(nstring & (sizeof(TRune)-1))
- outstring("", 1);
- r = nstring;
- while(n > 0) {
- c = *s++;
- if(align(0, types[TCHAR], Aarg1, nil)) {
- for(i = 0; i < sizeof(TRune); i++)
- buf[i] = c>>(8*(sizeof(TRune) - i - 1));
- } else {
- for(i = 0; i < sizeof(TRune); i++)
- buf[i] = c>>(8*i);
- }
- outstring(buf, sizeof(TRune));
- n -= sizeof(TRune);
- }
- return r;
-}
-
-void
-nullwarn(Node *l, Node *r)
-{
- warn(Z, "result of operation not used");
- if(l != Z)
- cgen(l, Z);
- if(r != Z)
- cgen(r, Z);
-}
diff --git a/src/cmd/cc/scon.c b/src/cmd/cc/scon.c
deleted file mode 100644
index b0b909759..000000000
--- a/src/cmd/cc/scon.c
+++ /dev/null
@@ -1,640 +0,0 @@
-// Inferno utils/cc/scon.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/scon.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-
-static Node*
-acast(Type *t, Node *n)
-{
- if(n->type->etype != t->etype || n->op == OBIT) {
- n = new1(OCAST, n, Z);
- if(nocast(n->left->type, t))
- *n = *n->left;
- n->type = t;
- }
- return n;
-}
-
-
-void
-evconst(Node *n)
-{
- Node *l, *r;
- int et, isf;
- vlong v;
- double d;
-
- if(n == Z || n->type == T)
- return;
-
- et = n->type->etype;
- isf = typefd[et];
-
- l = n->left;
- r = n->right;
-
- d = 0;
- v = 0;
-
- switch(n->op) {
- default:
- return;
-
- case ONEG:
- if(isf)
- d = -l->fconst;
- else
- v = -l->vconst;
- break;
-
- case OCOM:
- v = ~l->vconst;
- break;
-
- case OCAST:
- if(et == TVOID)
- return;
- et = l->type->etype;
- if(isf) {
- if(typefd[et])
- d = l->fconst;
- else
- d = l->vconst;
- } else {
- if(typefd[et])
- v = l->fconst;
- else
- v = convvtox(l->vconst, n->type->etype);
- }
- break;
-
- case OCONST:
- break;
-
- case OADD:
- if(isf)
- d = l->fconst + r->fconst;
- else {
- v = l->vconst + r->vconst;
- }
- break;
-
- case OSUB:
- if(isf)
- d = l->fconst - r->fconst;
- else
- v = l->vconst - r->vconst;
- break;
-
- case OMUL:
- if(isf)
- d = l->fconst * r->fconst;
- else {
- v = l->vconst * r->vconst;
- }
- break;
-
- case OLMUL:
- v = (uvlong)l->vconst * (uvlong)r->vconst;
- break;
-
-
- case ODIV:
- if(vconst(r) == 0) {
- warn(n, "divide by zero");
- return;
- }
- if(isf)
- d = l->fconst / r->fconst;
- else
- v = l->vconst / r->vconst;
- break;
-
- case OLDIV:
- if(vconst(r) == 0) {
- warn(n, "divide by zero");
- return;
- }
- v = (uvlong)l->vconst / (uvlong)r->vconst;
- break;
-
- case OMOD:
- if(vconst(r) == 0) {
- warn(n, "modulo by zero");
- return;
- }
- v = l->vconst % r->vconst;
- break;
-
- case OLMOD:
- if(vconst(r) == 0) {
- warn(n, "modulo by zero");
- return;
- }
- v = (uvlong)l->vconst % (uvlong)r->vconst;
- break;
-
- case OAND:
- v = l->vconst & r->vconst;
- break;
-
- case OOR:
- v = l->vconst | r->vconst;
- break;
-
- case OXOR:
- v = l->vconst ^ r->vconst;
- break;
-
- case OLSHR:
- if(l->type->width != sizeof(uvlong))
- v = ((uvlong)l->vconst & 0xffffffffULL) >> r->vconst;
- else
- v = (uvlong)l->vconst >> r->vconst;
- break;
-
- case OASHR:
- v = l->vconst >> r->vconst;
- break;
-
- case OASHL:
- v = (uvlong)l->vconst << r->vconst;
- break;
-
- case OLO:
- v = (uvlong)l->vconst < (uvlong)r->vconst;
- break;
-
- case OLT:
- if(typefd[l->type->etype])
- v = l->fconst < r->fconst;
- else
- v = l->vconst < r->vconst;
- break;
-
- case OHI:
- v = (uvlong)l->vconst > (uvlong)r->vconst;
- break;
-
- case OGT:
- if(typefd[l->type->etype])
- v = l->fconst > r->fconst;
- else
- v = l->vconst > r->vconst;
- break;
-
- case OLS:
- v = (uvlong)l->vconst <= (uvlong)r->vconst;
- break;
-
- case OLE:
- if(typefd[l->type->etype])
- v = l->fconst <= r->fconst;
- else
- v = l->vconst <= r->vconst;
- break;
-
- case OHS:
- v = (uvlong)l->vconst >= (uvlong)r->vconst;
- break;
-
- case OGE:
- if(typefd[l->type->etype])
- v = l->fconst >= r->fconst;
- else
- v = l->vconst >= r->vconst;
- break;
-
- case OEQ:
- if(typefd[l->type->etype])
- v = l->fconst == r->fconst;
- else
- v = l->vconst == r->vconst;
- break;
-
- case ONE:
- if(typefd[l->type->etype])
- v = l->fconst != r->fconst;
- else
- v = l->vconst != r->vconst;
- break;
-
- case ONOT:
- if(typefd[l->type->etype])
- v = !l->fconst;
- else
- v = !l->vconst;
- break;
-
- case OANDAND:
- if(typefd[l->type->etype])
- v = l->fconst && r->fconst;
- else
- v = l->vconst && r->vconst;
- break;
-
- case OOROR:
- if(typefd[l->type->etype])
- v = l->fconst || r->fconst;
- else
- v = l->vconst || r->vconst;
- break;
- }
- if(isf) {
- n->fconst = d;
- } else {
- n->vconst = convvtox(v, n->type->etype);
- }
- n->oldop = n->op;
- n->op = OCONST;
-}
-
-void
-acom(Node *n)
-{
- Type *t;
- Node *l, *r;
- int i;
-
- switch(n->op)
- {
-
- case ONAME:
- case OCONST:
- case OSTRING:
- case OINDREG:
- case OREGISTER:
- return;
-
- case ONEG:
- l = n->left;
- if(addo(n) && addo(l))
- break;
- acom(l);
- return;
-
- case OADD:
- case OSUB:
- case OMUL:
- l = n->left;
- r = n->right;
- if(addo(n)) {
- if(addo(r))
- break;
- if(addo(l))
- break;
- }
- acom(l);
- acom(r);
- return;
-
- default:
- l = n->left;
- r = n->right;
- if(l != Z)
- acom(l);
- if(r != Z)
- acom(r);
- return;
- }
-
- /* bust terms out */
- t = n->type;
- term[0].mult = 0;
- term[0].node = Z;
- nterm = 1;
- acom1(1, n);
- if(debug['m'])
- for(i=0; i<nterm; i++) {
- print("%d %3lld ", i, term[i].mult);
- prtree1(term[i].node, 1, 0);
- }
- if(nterm < NTERM)
- acom2(n, t);
- n->type = t;
-}
-
-int
-acomcmp1(const void *a1, const void *a2)
-{
- vlong c1, c2;
- Term *t1, *t2;
-
- t1 = (Term*)a1;
- t2 = (Term*)a2;
- c1 = t1->mult;
- if(c1 < 0)
- c1 = -c1;
- c2 = t2->mult;
- if(c2 < 0)
- c2 = -c2;
- if(c1 > c2)
- return 1;
- if(c1 < c2)
- return -1;
- c1 = 1;
- if(t1->mult < 0)
- c1 = 0;
- c2 = 1;
- if(t2->mult < 0)
- c2 = 0;
- if(c2 -= c1)
- return c2;
- if(t2 > t1)
- return 1;
- return -1;
-}
-
-int
-acomcmp2(const void *a1, const void *a2)
-{
- vlong c1, c2;
- Term *t1, *t2;
-
- t1 = (Term*)a1;
- t2 = (Term*)a2;
- c1 = t1->mult;
- c2 = t2->mult;
- if(c1 > c2)
- return 1;
- if(c1 < c2)
- return -1;
- if(t2 > t1)
- return 1;
- return -1;
-}
-
-void
-acom2(Node *n, Type *t)
-{
- Node *l, *r;
- Term trm[NTERM];
- int et, nt, i, j;
- vlong c1, c2;
-
- /*
- * copy into automatic
- */
- c2 = 0;
- nt = nterm;
- for(i=0; i<nt; i++)
- trm[i] = term[i];
- /*
- * recur on subtrees
- */
- j = 0;
- for(i=1; i<nt; i++) {
- c1 = trm[i].mult;
- if(c1 == 0)
- continue;
- l = trm[i].node;
- if(l != Z) {
- j = 1;
- acom(l);
- }
- }
- c1 = trm[0].mult;
- if(j == 0) {
- n->oldop = n->op;
- n->op = OCONST;
- n->vconst = c1;
- return;
- }
- et = t->etype;
-
- /*
- * prepare constant term,
- * combine it with an addressing term
- */
- if(c1 != 0) {
- l = new1(OCONST, Z, Z);
- l->type = t;
- l->vconst = c1;
- trm[0].mult = 1;
- for(i=1; i<nt; i++) {
- if(trm[i].mult != 1)
- continue;
- r = trm[i].node;
- if(r->op != OADDR)
- continue;
- r->type = t;
- l = new1(OADD, r, l);
- l->type = t;
- trm[i].mult = 0;
- break;
- }
- trm[0].node = l;
- }
- /*
- * look for factorable terms
- * c1*i + c1*c2*j -> c1*(i + c2*j)
- */
- qsort(trm+1, nt-1, sizeof(trm[0]), acomcmp1);
- for(i=nt-1; i>=0; i--) {
- c1 = trm[i].mult;
- if(c1 < 0)
- c1 = -c1;
- if(c1 <= 1)
- continue;
- for(j=i+1; j<nt; j++) {
- c2 = trm[j].mult;
- if(c2 < 0)
- c2 = -c2;
- if(c2 <= 1)
- continue;
- if(c2 % c1)
- continue;
- r = trm[j].node;
- if(r->type->etype != et)
- r = acast(t, r);
- c2 = trm[j].mult/trm[i].mult;
- if(c2 != 1 && c2 != -1) {
- r = new1(OMUL, r, new(OCONST, Z, Z));
- r->type = t;
- r->right->type = t;
- r->right->vconst = c2;
- }
- l = trm[i].node;
- if(l->type->etype != et)
- l = acast(t, l);
- r = new1(OADD, l, r);
- r->type = t;
- if(c2 == -1)
- r->op = OSUB;
- trm[i].node = r;
- trm[j].mult = 0;
- }
- }
- if(debug['m']) {
- print("\n");
- for(i=0; i<nt; i++) {
- print("%d %3lld ", i, trm[i].mult);
- prtree1(trm[i].node, 1, 0);
- }
- }
-
- /*
- * put it all back together
- */
- qsort(trm+1, nt-1, sizeof(trm[0]), acomcmp2);
- l = Z;
- for(i=nt-1; i>=0; i--) {
- c1 = trm[i].mult;
- if(c1 == 0)
- continue;
- r = trm[i].node;
- if(r->type->etype != et || r->op == OBIT)
- r = acast(t, r);
- if(c1 != 1 && c1 != -1) {
- r = new1(OMUL, r, new(OCONST, Z, Z));
- r->type = t;
- r->right->type = t;
- if(c1 < 0) {
- r->right->vconst = -c1;
- c1 = -1;
- } else {
- r->right->vconst = c1;
- c1 = 1;
- }
- }
- if(l == Z) {
- l = r;
- c2 = c1;
- continue;
- }
- if(c1 < 0)
- if(c2 < 0)
- l = new1(OADD, l, r);
- else
- l = new1(OSUB, l, r);
- else
- if(c2 < 0) {
- l = new1(OSUB, r, l);
- c2 = 1;
- } else
- l = new1(OADD, l, r);
- l->type = t;
- }
- if(c2 < 0) {
- r = new1(OCONST, 0, 0);
- r->vconst = 0;
- r->type = t;
- l = new1(OSUB, r, l);
- l->type = t;
- }
- *n = *l;
-}
-
-void
-acom1(vlong v, Node *n)
-{
- Node *l, *r;
-
- if(v == 0 || nterm >= NTERM)
- return;
- if(!addo(n)) {
- if(n->op == OCONST)
- if(!typefd[n->type->etype]) {
- term[0].mult += v*n->vconst;
- return;
- }
- term[nterm].mult = v;
- term[nterm].node = n;
- nterm++;
- return;
- }
- switch(n->op) {
-
- case OCAST:
- acom1(v, n->left);
- break;
-
- case ONEG:
- acom1(-v, n->left);
- break;
-
- case OADD:
- acom1(v, n->left);
- acom1(v, n->right);
- break;
-
- case OSUB:
- acom1(v, n->left);
- acom1(-v, n->right);
- break;
-
- case OMUL:
- l = n->left;
- r = n->right;
- if(l->op == OCONST)
- if(!typefd[n->type->etype]) {
- acom1(v*l->vconst, r);
- break;
- }
- if(r->op == OCONST)
- if(!typefd[n->type->etype]) {
- acom1(v*r->vconst, l);
- break;
- }
- break;
-
- default:
- diag(n, "not addo");
- }
-}
-
-int
-addo(Node *n)
-{
-
- if(n != Z)
- if(!typefd[n->type->etype])
- if(!typev[n->type->etype] || ewidth[TVLONG] == ewidth[TIND])
- switch(n->op) {
-
- case OCAST:
- if(nilcast(n->left->type, n->type))
- return 1;
- break;
-
- case ONEG:
- case OADD:
- case OSUB:
- return 1;
-
- case OMUL:
- if(n->left->op == OCONST)
- return 1;
- if(n->right->op == OCONST)
- return 1;
- }
- return 0;
-}
diff --git a/src/cmd/cc/sub.c b/src/cmd/cc/sub.c
deleted file mode 100644
index 94c11d021..000000000
--- a/src/cmd/cc/sub.c
+++ /dev/null
@@ -1,2068 +0,0 @@
-// Inferno utils/cc/sub.c
-// http://code.google.com/p/inferno-os/source/browse/utils/cc/sub.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// Portions Copyright © 1997-1999 Vita Nuova Limited
-// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// Portions Copyright © 2004,2006 Bruce Ellis
-// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-#include <u.h>
-#include "cc.h"
-
-Node*
-new(int t, Node *l, Node *r)
-{
- Node *n;
-
- n = alloc(sizeof(*n));
- n->op = t;
- n->left = l;
- n->right = r;
- if(l && t != OGOTO)
- n->lineno = l->lineno;
- else if(r)
- n->lineno = r->lineno;
- else
- n->lineno = lineno;
- newflag = 1;
- return n;
-}
-
-Node*
-new1(int o, Node *l, Node *r)
-{
- Node *n;
-
- n = new(o, l, r);
- n->lineno = nearln;
- return n;
-}
-
-void
-prtree(Node *n, char *s)
-{
-
- print(" == %s ==\n", s);
- prtree1(n, 0, 0);
- print("\n");
-}
-
-void
-prtree1(Node *n, int d, int f)
-{
- int i;
-
- if(f)
- for(i=0; i<d; i++)
- print(" ");
- if(n == Z) {
- print("Z\n");
- return;
- }
- if(n->op == OLIST) {
- prtree1(n->left, d, 0);
- prtree1(n->right, d, 1);
- return;
- }
- d++;
- print("%O", n->op);
- i = 3;
- switch(n->op)
- {
- case ONAME:
- print(" \"%F\"", n);
- print(" %d", n->xoffset);
- i = 0;
- break;
-
- case OINDREG:
- print(" %d(R%d)", n->xoffset, n->reg);
- i = 0;
- break;
-
- case OREGISTER:
- if(n->xoffset)
- print(" %d+R%d", n->xoffset, n->reg);
- else
- print(" R%d", n->reg);
- i = 0;
- break;
-
- case OSTRING:
- print(" \"%s\"", n->cstring);
- i = 0;
- break;
-
- case OLSTRING:
- if(sizeof(TRune) == sizeof(Rune))
- print(" \"%S\"", (Rune*)n->rstring);
- else
- print(" \"...\"");
- i = 0;
- break;
-
- case ODOT:
- case OELEM:
- print(" \"%F\"", n);
- break;
-
- case OCONST:
- if(typefd[n->type->etype])
- print(" \"%.8e\"", n->fconst);
- else
- print(" \"%lld\"", n->vconst);
- i = 0;
- break;
- }
- if(n->addable != 0)
- print(" <%d>", n->addable);
- if(n->type != T)
- print(" %T", n->type);
- if(n->complex != 0)
- print(" (%d)", n->complex);
- print(" %L\n", n->lineno);
- if(i & 2)
- prtree1(n->left, d, 1);
- if(i & 1)
- prtree1(n->right, d, 1);
-}
-
-Type*
-typ(int et, Type *d)
-{
- Type *t;
-
- t = alloc(sizeof(*t));
- t->etype = et;
- t->link = d;
- t->down = T;
- t->sym = S;
- if(et < NTYPE)
- t->width = ewidth[et];
- else
- t->width = -1; // for TDOT or TOLD in prototype
- t->offset = 0;
- t->shift = 0;
- t->nbits = 0;
- t->garb = 0;
- return t;
-}
-
-Type*
-copytyp(Type *t)
-{
- Type *nt;
-
- nt = typ(TXXX, T);
- *nt = *t;
- return nt;
-}
-
-Type*
-garbt(Type *t, int32 b)
-{
- Type *t1;
-
- if(b & BGARB) {
- t1 = copytyp(t);
- t1->garb = simpleg(b);
- return t1;
- }
- return t;
-}
-
-int
-simpleg(int32 b)
-{
-
- b &= BGARB;
- switch(b) {
- case BCONSTNT:
- return GCONSTNT;
- case BVOLATILE:
- return GVOLATILE;
- case BVOLATILE|BCONSTNT:
- return GCONSTNT|GVOLATILE;
- }
- return GXXX;
-}
-
-int
-simplec(int32 b)
-{
-
- b &= BCLASS;
- switch(b) {
- case 0:
- case BREGISTER:
- return CXXX;
- case BAUTO:
- case BAUTO|BREGISTER:
- return CAUTO;
- case BEXTERN:
- return CEXTERN;
- case BEXTERN|BREGISTER:
- return CEXREG;
- case BSTATIC:
- return CSTATIC;
- case BTYPEDEF:
- return CTYPEDEF;
- case BTYPESTR:
- return CTYPESTR;
- }
- diag(Z, "illegal combination of classes %Q", b);
- return CXXX;
-}
-
-Type*
-simplet(int32 b)
-{
-
- b &= ~BCLASS & ~BGARB;
- switch(b) {
- case BCHAR:
- case BCHAR|BSIGNED:
- return types[TCHAR];
-
- case BCHAR|BUNSIGNED:
- return types[TUCHAR];
-
- case BSHORT:
- case BSHORT|BINT:
- case BSHORT|BSIGNED:
- case BSHORT|BINT|BSIGNED:
- return types[TSHORT];
-
- case BUNSIGNED|BSHORT:
- case BUNSIGNED|BSHORT|BINT:
- return types[TUSHORT];
-
- case 0:
- case BINT:
- case BINT|BSIGNED:
- case BSIGNED:
- return types[TINT];
-
- case BUNSIGNED:
- case BUNSIGNED|BINT:
- return types[TUINT];
-
- case BLONG:
- case BLONG|BINT:
- case BLONG|BSIGNED:
- case BLONG|BINT|BSIGNED:
- return types[TLONG];
-
- case BUNSIGNED|BLONG:
- case BUNSIGNED|BLONG|BINT:
- return types[TULONG];
-
- case BVLONG|BLONG:
- case BVLONG|BLONG|BINT:
- case BVLONG|BLONG|BSIGNED:
- case BVLONG|BLONG|BINT|BSIGNED:
- return types[TVLONG];
-
- case BVLONG|BLONG|BUNSIGNED:
- case BVLONG|BLONG|BINT|BUNSIGNED:
- return types[TUVLONG];
-
- case BFLOAT:
- return types[TFLOAT];
-
- case BDOUBLE:
- case BDOUBLE|BLONG:
- case BFLOAT|BLONG:
- return types[TDOUBLE];
-
- case BVOID:
- return types[TVOID];
- }
-
- diag(Z, "illegal combination of types %Q", b);
- return types[TINT];
-}
-
-int
-stcompat(Node *n, Type *t1, Type *t2, int32 ttab[])
-{
- int i;
- uint32 b;
-
- i = 0;
- if(t2 != T)
- i = t2->etype;
- b = 1L << i;
- i = 0;
- if(t1 != T)
- i = t1->etype;
- if(b & ttab[i]) {
- if(ttab == tasign)
- if(b == BSTRUCT || b == BUNION)
- if(!sametype(t1, t2))
- return 1;
- if(n->op != OCAST)
- if(b == BIND && i == TIND)
- if(!sametype(t1, t2))
- return 1;
- return 0;
- }
- return 1;
-}
-
-int
-tcompat(Node *n, Type *t1, Type *t2, int32 ttab[])
-{
-
- if(stcompat(n, t1, t2, ttab)) {
- if(t1 == T)
- diag(n, "incompatible type: \"%T\" for op \"%O\"",
- t2, n->op);
- else
- diag(n, "incompatible types: \"%T\" and \"%T\" for op \"%O\"",
- t1, t2, n->op);
- return 1;
- }
- return 0;
-}
-
-void
-makedot(Node *n, Type *t, int32 o)
-{
- Node *n1, *n2;
-
- if(t->nbits) {
- n1 = new(OXXX, Z, Z);
- *n1 = *n;
- n->op = OBIT;
- n->left = n1;
- n->right = Z;
- n->type = t;
- n->addable = n1->left->addable;
- n = n1;
- }
- n->addable = n->left->addable;
- if(n->addable == 0) {
- n1 = new1(OCONST, Z, Z);
- n1->vconst = o;
- n1->type = types[TLONG];
- n->right = n1;
- n->type = t;
- return;
- }
- n->left->type = t;
- if(o == 0) {
- *n = *n->left;
- return;
- }
- n->type = t;
- n1 = new1(OCONST, Z, Z);
- n1->vconst = o;
- t = typ(TIND, t);
- t->width = types[TIND]->width;
- n1->type = t;
-
- n2 = new1(OADDR, n->left, Z);
- n2->type = t;
-
- n1 = new1(OADD, n1, n2);
- n1->type = t;
-
- n->op = OIND;
- n->left = n1;
- n->right = Z;
-}
-
-Type*
-dotsearch(Sym *s, Type *t, Node *n, int32 *off)
-{
- Type *t1, *xt, *rt;
-
- xt = T;
-
- /*
- * look it up by name
- */
- for(t1 = t; t1 != T; t1 = t1->down)
- if(t1->sym == s) {
- if(xt != T)
- goto ambig;
- xt = t1;
- }
-
- /*
- * look it up by type
- */
- if(s->class == CTYPEDEF || s->class == CTYPESTR)
- for(t1 = t; t1 != T; t1 = t1->down)
- if(t1->sym == S && typesu[t1->etype])
- if(sametype(s->type, t1)) {
- if(xt != T)
- goto ambig;
- xt = t1;
- }
- if(xt != T) {
- *off = xt->offset;
- return xt;
- }
-
- /*
- * look it up in unnamed substructures
- */
- for(t1 = t; t1 != T; t1 = t1->down)
- if(t1->sym == S && typesu[t1->etype]){
- rt = dotsearch(s, t1->link, n, off);
- if(rt != T) {
- if(xt != T)
- goto ambig;
- xt = rt;
- *off += t1->offset;
- }
- }
- return xt;
-
-ambig:
- diag(n, "ambiguous structure element: %s", s->name);
- return xt;
-}
-
-int32
-dotoffset(Type *st, Type *lt, Node *n)
-{
- Type *t;
- Sym *g;
- int32 o, o1;
-
- o = -1;
- /*
- * first try matching at the top level
- * for matching tag names
- */
- g = st->tag;
- if(g != S)
- for(t=lt->link; t!=T; t=t->down)
- if(t->sym == S)
- if(g == t->tag) {
- if(o >= 0)
- goto ambig;
- o = t->offset;
- }
- if(o >= 0)
- return o;
-
- /*
- * second try matching at the top level
- * for similar types
- */
- for(t=lt->link; t!=T; t=t->down)
- if(t->sym == S)
- if(sametype(st, t)) {
- if(o >= 0)
- goto ambig;
- o = t->offset;
- }
- if(o >= 0)
- return o;
-
- /*
- * last try matching sub-levels
- */
- for(t=lt->link; t!=T; t=t->down)
- if(t->sym == S)
- if(typesu[t->etype]) {
- o1 = dotoffset(st, t, n);
- if(o1 >= 0) {
- if(o >= 0)
- goto ambig;
- o = o1 + t->offset;
- }
- }
- return o;
-
-ambig:
- diag(n, "ambiguous unnamed structure element");
- return o;
-}
-
-/*
- * look into tree for floating point constant expressions
- */
-int
-allfloat(Node *n, int flag)
-{
-
- if(n != Z) {
- if(n->type->etype != TDOUBLE)
- return 1;
- switch(n->op) {
- case OCONST:
- if(flag)
- n->type = types[TFLOAT];
- return 1;
- case OADD: /* no need to get more exotic than this */
- case OSUB:
- case OMUL:
- case ODIV:
- if(!allfloat(n->right, flag))
- break;
- case OCAST:
- if(!allfloat(n->left, flag))
- break;
- if(flag)
- n->type = types[TFLOAT];
- return 1;
- }
- }
- return 0;
-}
-
-void
-constas(Node *n, Type *il, Type *ir)
-{
- Type *l, *r;
-
- l = il;
- r = ir;
-
- if(l == T)
- return;
- if(l->garb & GCONSTNT) {
- warn(n, "assignment to a constant type (%T)", il);
- return;
- }
- if(r == T)
- return;
- for(;;) {
- if(l->etype != TIND || r->etype != TIND)
- break;
- l = l->link;
- r = r->link;
- if(l == T || r == T)
- break;
- if(r->garb & GCONSTNT)
- if(!(l->garb & GCONSTNT)) {
- warn(n, "assignment of a constant pointer type (%T)", ir);
- break;
- }
- }
-}
-
-void
-typeext1(Type *st, Node *l)
-{
- if(st->etype == TFLOAT && allfloat(l, 0))
- allfloat(l, 1);
-}
-
-void
-typeext(Type *st, Node *l)
-{
- Type *lt;
- Node *n1, *n2;
- int32 o;
-
- lt = l->type;
- if(lt == T)
- return;
- if(st->etype == TIND && vconst(l) == 0) {
- l->type = st;
- l->vconst = 0;
- return;
- }
- typeext1(st, l);
-
- /*
- * extension of C
- * if assign of struct containing unnamed sub-struct
- * to type of sub-struct, insert the DOT.
- * if assign of *struct containing unnamed substruct
- * to type of *sub-struct, insert the add-offset
- */
- if(typesu[st->etype] && typesu[lt->etype]) {
- o = dotoffset(st, lt, l);
- if(o >= 0) {
- n1 = new1(OXXX, Z, Z);
- *n1 = *l;
- l->op = ODOT;
- l->left = n1;
- l->right = Z;
- makedot(l, st, o);
- }
- return;
- }
- if(st->etype == TIND && typesu[st->link->etype])
- if(lt->etype == TIND && typesu[lt->link->etype]) {
- o = dotoffset(st->link, lt->link, l);
- if(o >= 0) {
- l->type = st;
- if(o == 0)
- return;
- n1 = new1(OXXX, Z, Z);
- *n1 = *l;
- n2 = new1(OCONST, Z, Z);
- n2->vconst = o;
- n2->type = st;
- l->op = OADD;
- l->left = n1;
- l->right = n2;
- }
- return;
- }
-}
-
-/*
- * a cast that generates no code
- * (same size move)
- */
-int
-nocast(Type *t1, Type *t2)
-{
- int i, b;
-
- if(t1->nbits)
- return 0;
- i = 0;
- if(t2 != T)
- i = t2->etype;
- b = 1<<i;
- i = 0;
- if(t1 != T)
- i = t1->etype;
- if(b & ncast[i])
- return 1;
- return 0;
-}
-
-/*
- * a cast that has a noop semantic
- * (small to large, convert)
- */
-int
-nilcast(Type *t1, Type *t2)
-{
- int et1, et2;
-
- if(t1 == T)
- return 0;
- if(t1->nbits)
- return 0;
- if(t2 == T)
- return 0;
- et1 = t1->etype;
- et2 = t2->etype;
- if(et1 == et2)
- return 1;
- if(typefd[et1] && typefd[et2]) {
- if(ewidth[et1] < ewidth[et2])
- return 1;
- return 0;
- }
- if(typechlp[et1] && typechlp[et2]) {
- if(ewidth[et1] < ewidth[et2])
- return 1;
- return 0;
- }
- return 0;
-}
-
-/*
- * "the usual arithmetic conversions are performed"
- */
-void
-arith(Node *n, int f)
-{
- Type *t1, *t2;
- int i, j, k;
- Node *n1;
- int32 w;
-
- t1 = n->left->type;
- if(n->right == Z)
- t2 = t1;
- else
- t2 = n->right->type;
- i = TXXX;
- if(t1 != T)
- i = t1->etype;
- j = TXXX;
- if(t2 != T)
- j = t2->etype;
- k = tab[i][j];
- if(k == TIND) {
- if(i == TIND)
- n->type = t1;
- else
- if(j == TIND)
- n->type = t2;
- } else {
- /* convert up to at least int */
- if(f == 1)
- while(k < TINT)
- k += 2;
- n->type = types[k];
- }
- if(n->op == OSUB)
- if(i == TIND && j == TIND) {
- w = n->right->type->link->width;
- if(w < 1 || n->left->type->link == T || n->left->type->link->width < 1)
- goto bad;
- n->type = types[ewidth[TIND] <= ewidth[TLONG]? TLONG: TVLONG];
- if(0 && ewidth[TIND] > ewidth[TLONG]){
- n1 = new1(OXXX, Z, Z);
- *n1 = *n;
- n->op = OCAST;
- n->left = n1;
- n->right = Z;
- n->type = types[TLONG];
- }
- if(w > 1) {
- n1 = new1(OXXX, Z, Z);
- *n1 = *n;
- n->op = ODIV;
- n->left = n1;
- n1 = new1(OCONST, Z, Z);
- n1->vconst = w;
- n1->type = n->type;
- n->right = n1;
- w = vlog(n1);
- if(w >= 0) {
- n->op = OASHR;
- n1->vconst = w;
- }
- }
- return;
- }
- if(!sametype(n->type, n->left->type)) {
- n->left = new1(OCAST, n->left, Z);
- n->left->type = n->type;
- if(n->type->etype == TIND) {
- w = n->type->link->width;
- if(w < 1) {
- snap(n->type->link);
- w = n->type->link->width;
- if(w < 1)
- goto bad;
- }
- if(w > 1) {
- n1 = new1(OCONST, Z, Z);
- n1->vconst = w;
- n1->type = n->type;
- n->left = new1(OMUL, n->left, n1);
- n->left->type = n->type;
- }
- }
- }
- if(n->right != Z)
- if(!sametype(n->type, n->right->type)) {
- n->right = new1(OCAST, n->right, Z);
- n->right->type = n->type;
- if(n->type->etype == TIND) {
- w = n->type->link->width;
- if(w < 1) {
- snap(n->type->link);
- w = n->type->link->width;
- if(w < 1)
- goto bad;
- }
- if(w != 1) {
- n1 = new1(OCONST, Z, Z);
- n1->vconst = w;
- n1->type = n->type;
- n->right = new1(OMUL, n->right, n1);
- n->right->type = n->type;
- }
- }
- }
- return;
-bad:
- diag(n, "pointer addition not fully declared: %T", n->type->link);
-}
-
-/*
- * try to rewrite shift & mask
- */
-void
-simplifyshift(Node *n)
-{
- uint32 c3;
- int o, s1, s2, c1, c2;
-
- if(!typechlp[n->type->etype])
- return;
- switch(n->op) {
- default:
- return;
- case OASHL:
- s1 = 0;
- break;
- case OLSHR:
- s1 = 1;
- break;
- case OASHR:
- s1 = 2;
- break;
- }
- if(n->right->op != OCONST)
- return;
- if(n->left->op != OAND)
- return;
- if(n->left->right->op != OCONST)
- return;
- switch(n->left->left->op) {
- default:
- return;
- case OASHL:
- s2 = 0;
- break;
- case OLSHR:
- s2 = 1;
- break;
- case OASHR:
- s2 = 2;
- break;
- }
- if(n->left->left->right->op != OCONST)
- return;
-
- c1 = n->right->vconst;
- c2 = n->left->left->right->vconst;
- c3 = n->left->right->vconst;
-
- o = n->op;
- switch((s1<<3)|s2) {
- case 000: /* (((e <<u c2) & c3) <<u c1) */
- c3 >>= c2;
- c1 += c2;
- if(c1 >= 32)
- break;
- goto rewrite1;
-
- case 002: /* (((e >>s c2) & c3) <<u c1) */
- if(topbit(c3) >= (32-c2))
- break;
- case 001: /* (((e >>u c2) & c3) <<u c1) */
- if(c1 > c2) {
- c3 <<= c2;
- c1 -= c2;
- o = OASHL;
- goto rewrite1;
- }
- c3 <<= c1;
- if(c1 == c2)
- goto rewrite0;
- c1 = c2-c1;
- o = OLSHR;
- goto rewrite2;
-
- case 022: /* (((e >>s c2) & c3) >>s c1) */
- if(c2 <= 0)
- break;
- case 012: /* (((e >>s c2) & c3) >>u c1) */
- if(topbit(c3) >= (32-c2))
- break;
- goto s11;
- case 021: /* (((e >>u c2) & c3) >>s c1) */
- if(topbit(c3) >= 31 && c2 <= 0)
- break;
- goto s11;
- case 011: /* (((e >>u c2) & c3) >>u c1) */
- s11:
- c3 <<= c2;
- c1 += c2;
- if(c1 >= 32)
- break;
- o = OLSHR;
- goto rewrite1;
-
- case 020: /* (((e <<u c2) & c3) >>s c1) */
- if(topbit(c3) >= 31)
- break;
- case 010: /* (((e <<u c2) & c3) >>u c1) */
- c3 >>= c1;
- if(c1 == c2)
- goto rewrite0;
- if(c1 > c2) {
- c1 -= c2;
- goto rewrite2;
- }
- c1 = c2 - c1;
- o = OASHL;
- goto rewrite2;
- }
- return;
-
-rewrite0: /* get rid of both shifts */
-if(debug['<'])prtree(n, "rewrite0");
- *n = *n->left;
- n->left = n->left->left;
- n->right->vconst = c3;
- return;
-rewrite1: /* get rid of lower shift */
-if(debug['<'])prtree(n, "rewrite1");
- n->left->left = n->left->left->left;
- n->left->right->vconst = c3;
- n->right->vconst = c1;
- n->op = o;
- return;
-rewrite2: /* get rid of upper shift */
-if(debug['<'])prtree(n, "rewrite2");
- *n = *n->left;
- n->right->vconst = c3;
- n->left->right->vconst = c1;
- n->left->op = o;
-}
-
-int
-side(Node *n)
-{
-
-loop:
- if(n != Z)
- switch(n->op) {
- case OCAST:
- case ONOT:
- case OADDR:
- case OIND:
- n = n->left;
- goto loop;
-
- case OCOND:
- if(side(n->left))
- break;
- n = n->right;
-
- case OEQ:
- case ONE:
- case OLT:
- case OGE:
- case OGT:
- case OLE:
- case OADD:
- case OSUB:
- case OMUL:
- case OLMUL:
- case ODIV:
- case OLDIV:
- case OLSHR:
- case OASHL:
- case OASHR:
- case OAND:
- case OOR:
- case OXOR:
- case OMOD:
- case OLMOD:
- case OANDAND:
- case OOROR:
- case OCOMMA:
- case ODOT:
- if(side(n->left))
- break;
- n = n->right;
- goto loop;
-
- case OSIGN:
- case OSIZE:
- case OCONST:
- case OSTRING:
- case OLSTRING:
- case ONAME:
- return 0;
- }
- return 1;
-}
-
-int
-vconst(Node *n)
-{
- int i;
-
- if(n == Z)
- goto no;
- if(n->op != OCONST)
- goto no;
- if(n->type == T)
- goto no;
- switch(n->type->etype)
- {
- case TFLOAT:
- case TDOUBLE:
- i = 100;
- if(n->fconst > i || n->fconst < -i)
- goto no;
- i = n->fconst;
- if(i != n->fconst)
- goto no;
- return i;
-
- case TVLONG:
- case TUVLONG:
- i = n->vconst;
- if(i != n->vconst)
- goto no;
- return i;
-
- case TCHAR:
- case TUCHAR:
- case TSHORT:
- case TUSHORT:
- case TINT:
- case TUINT:
- case TLONG:
- case TULONG:
- case TIND:
- i = n->vconst;
- if(i != n->vconst)
- goto no;
- return i;
- }
-no:
- return -159; /* first uninteresting constant */
-}
-
-/*
- * return log(n) if n is a power of 2 constant
- */
-int
-xlog2(uvlong v)
-{
- int s, i;
- uvlong m;
-
- s = 0;
- m = MASK(8*sizeof(uvlong));
- for(i=32; i; i>>=1) {
- m >>= i;
- if(!(v & m)) {
- v >>= i;
- s += i;
- }
- }
- if(v == 1)
- return s;
- return -1;
-}
-
-int
-vlog(Node *n)
-{
- if(n->op != OCONST)
- goto bad;
- if(typefd[n->type->etype])
- goto bad;
-
- return xlog2(n->vconst);
-
-bad:
- return -1;
-}
-
-int
-topbit(uint32 v)
-{
- int i;
-
- for(i = -1; v; i++)
- v >>= 1;
- return i;
-}
-
-/*
- * try to cast a constant down
- * rather than cast a variable up
- * example:
- * if(c == 'a')
- */
-void
-relcon(Node *l, Node *r)
-{
- vlong v;
-
- if(l->op != OCONST)
- return;
- if(r->op != OCAST)
- return;
- if(!nilcast(r->left->type, r->type))
- return;
- switch(r->type->etype) {
- default:
- return;
- case TCHAR:
- case TUCHAR:
- case TSHORT:
- case TUSHORT:
- v = convvtox(l->vconst, r->type->etype);
- if(v != l->vconst)
- return;
- break;
- }
- l->type = r->left->type;
- *r = *r->left;
-}
-
-int
-relindex(int o)
-{
-
- switch(o) {
- default:
- diag(Z, "bad in relindex: %O", o);
- case OEQ: return 0;
- case ONE: return 1;
- case OLE: return 2;
- case OLS: return 3;
- case OLT: return 4;
- case OLO: return 5;
- case OGE: return 6;
- case OHS: return 7;
- case OGT: return 8;
- case OHI: return 9;
- }
-}
-
-Node*
-invert(Node *n)
-{
- Node *i;
-
- if(n == Z || n->op != OLIST)
- return n;
- i = n;
- for(n = n->left; n != Z; n = n->left) {
- if(n->op != OLIST)
- break;
- i->left = n->right;
- n->right = i;
- i = n;
- }
- i->left = n;
- return i;
-}
-
-int
-bitno(int32 b)
-{
- int i;
-
- for(i=0; i<32; i++)
- if(b & (1L<<i))
- return i;
- diag(Z, "bad in bitno");
- return 0;
-}
-
-int32
-typebitor(int32 a, int32 b)
-{
- int32 c;
-
- c = a | b;
- if(a & b)
- if((a & b) == BLONG)
- c |= BVLONG; /* long long => vlong */
- else
- warn(Z, "once is enough: %Q", a & b);
- return c;
-}
-
-void
-diag(Node *n, char *fmt, ...)
-{
- char buf[STRINGSZ];
- va_list arg;
-
- va_start(arg, fmt);
- vseprint(buf, buf+sizeof(buf), fmt, arg);
- va_end(arg);
- Bprint(&diagbuf, "%L %s\n", (n==Z)? nearln: n->lineno, buf);
-
- if(debug['X']){
- Bflush(&diagbuf);
- abort();
- }
- if(n != Z)
- if(debug['v'])
- prtree(n, "diagnostic");
-
- nerrors++;
- if(nerrors > 10) {
- Bprint(&diagbuf, "too many errors\n");
- errorexit();
- }
-}
-
-void
-warn(Node *n, char *fmt, ...)
-{
- char buf[STRINGSZ];
- va_list arg;
-
- if(debug['w']) {
- Bprint(&diagbuf, "warning: ");
- va_start(arg, fmt);
- vseprint(buf, buf+sizeof(buf), fmt, arg);
- va_end(arg);
- Bprint(&diagbuf, "%L %s\n", (n==Z)? nearln: n->lineno, buf);
-
- if(n != Z)
- if(debug['v'])
- prtree(n, "warning");
- }
-}
-
-void
-yyerror(char *fmt, ...)
-{
- char buf[STRINGSZ];
- va_list arg;
-
- /*
- * hack to intercept message from yaccpar
- */
- if(strcmp(fmt, "syntax error") == 0) {
- yyerror("syntax error, last name: %s", symb);
- return;
- }
- va_start(arg, fmt);
- vseprint(buf, buf+sizeof(buf), fmt, arg);
- va_end(arg);
- Bprint(&diagbuf, "%L %s\n", lineno, buf);
- nerrors++;
- if(nerrors > 10) {
- Bprint(&diagbuf, "too many errors\n");
- errorexit();
- }
-}
-
-void
-fatal(Node *n, char *fmt, ...)
-{
- char buf[STRINGSZ];
- va_list arg;
-
- va_start(arg, fmt);
- vseprint(buf, buf+sizeof(buf), fmt, arg);
- va_end(arg);
- Bprint(&diagbuf, "%L %s\n", (n==Z)? nearln: n->lineno, buf);
-
- if(debug['X']){
- Bflush(&diagbuf);
- abort();
- }
- if(n != Z)
- if(debug['v'])
- prtree(n, "diagnostic");
-
- nerrors++;
- errorexit();
-}
-
-uint32 thash1 = 0x2edab8c9;
-uint32 thash2 = 0x1dc74fb8;
-uint32 thash3 = 0x1f241331;
-uint32 thash[NALLTYPES];
-Init thashinit[] =
-{
- TXXX, 0x17527bbd, 0,
- TCHAR, 0x5cedd32b, 0,
- TUCHAR, 0x552c4454, 0,
- TSHORT, 0x63040b4b, 0,
- TUSHORT, 0x32a45878, 0,
- TINT, 0x4151d5bd, 0,
- TUINT, 0x5ae707d6, 0,
- TLONG, 0x5ef20f47, 0,
- TULONG, 0x36d8eb8f, 0,
- TVLONG, 0x6e5e9590, 0,
- TUVLONG, 0x75910105, 0,
- TFLOAT, 0x25fd7af1, 0,
- TDOUBLE, 0x7c40a1b2, 0,
- TIND, 0x1b832357, 0,
- TFUNC, 0x6babc9cb, 0,
- TARRAY, 0x7c50986d, 0,
- TVOID, 0x44112eff, 0,
- TSTRUCT, 0x7c2da3bf, 0,
- TUNION, 0x3eb25e98, 0,
- TENUM, 0x44b54f61, 0,
- TFILE, 0x19242ac3, 0,
- TOLD, 0x22b15988, 0,
- TDOT, 0x0204f6b3, 0,
- -1, 0, 0,
-};
-
-char* bnames[NALIGN];
-Init bnamesinit[] =
-{
- Axxx, 0, "Axxx",
- Ael1, 0, "el1",
- Ael2, 0, "el2",
- Asu2, 0, "su2",
- Aarg0, 0, "arg0",
- Aarg1, 0, "arg1",
- Aarg2, 0, "arg2",
- Aaut3, 0, "aut3",
- -1, 0, 0,
-};
-
-char* tnames[NALLTYPES];
-Init tnamesinit[] =
-{
- TXXX, 0, "TXXX",
- TCHAR, 0, "CHAR",
- TUCHAR, 0, "UCHAR",
- TSHORT, 0, "SHORT",
- TUSHORT, 0, "USHORT",
- TINT, 0, "INT",
- TUINT, 0, "UINT",
- TLONG, 0, "LONG",
- TULONG, 0, "ULONG",
- TVLONG, 0, "VLONG",
- TUVLONG, 0, "UVLONG",
- TFLOAT, 0, "FLOAT",
- TDOUBLE, 0, "DOUBLE",
- TIND, 0, "IND",
- TFUNC, 0, "FUNC",
- TARRAY, 0, "ARRAY",
- TVOID, 0, "VOID",
- TSTRUCT, 0, "STRUCT",
- TUNION, 0, "UNION",
- TENUM, 0, "ENUM",
- TFILE, 0, "FILE",
- TOLD, 0, "OLD",
- TDOT, 0, "DOT",
- -1, 0, 0,
-};
-
-char* gnames[NGTYPES];
-Init gnamesinit[] =
-{
- GXXX, 0, "GXXX",
- GCONSTNT, 0, "CONST",
- GVOLATILE, 0, "VOLATILE",
- GVOLATILE|GCONSTNT, 0, "CONST-VOLATILE",
- -1, 0, 0,
-};
-
-char* qnames[NALLTYPES];
-Init qnamesinit[] =
-{
- TXXX, 0, "TXXX",
- TCHAR, 0, "CHAR",
- TUCHAR, 0, "UCHAR",
- TSHORT, 0, "SHORT",
- TUSHORT, 0, "USHORT",
- TINT, 0, "INT",
- TUINT, 0, "UINT",
- TLONG, 0, "LONG",
- TULONG, 0, "ULONG",
- TVLONG, 0, "VLONG",
- TUVLONG, 0, "UVLONG",
- TFLOAT, 0, "FLOAT",
- TDOUBLE, 0, "DOUBLE",
- TIND, 0, "IND",
- TFUNC, 0, "FUNC",
- TARRAY, 0, "ARRAY",
- TVOID, 0, "VOID",
- TSTRUCT, 0, "STRUCT",
- TUNION, 0, "UNION",
- TENUM, 0, "ENUM",
-
- TAUTO, 0, "AUTO",
- TEXTERN, 0, "EXTERN",
- TSTATIC, 0, "STATIC",
- TTYPEDEF, 0, "TYPEDEF",
- TTYPESTR, 0, "TYPESTR",
- TREGISTER, 0, "REGISTER",
- TCONSTNT, 0, "CONSTNT",
- TVOLATILE, 0, "VOLATILE",
- TUNSIGNED, 0, "UNSIGNED",
- TSIGNED, 0, "SIGNED",
- TDOT, 0, "DOT",
- TFILE, 0, "FILE",
- TOLD, 0, "OLD",
- -1, 0, 0,
-};
-char* cnames[NCTYPES];
-Init cnamesinit[] =
-{
- CXXX, 0, "CXXX",
- CAUTO, 0, "AUTO",
- CEXTERN, 0, "EXTERN",
- CGLOBL, 0, "GLOBL",
- CSTATIC, 0, "STATIC",
- CLOCAL, 0, "LOCAL",
- CTYPEDEF, 0, "TYPEDEF",
- CTYPESTR, 0, "TYPESTR",
- CPARAM, 0, "PARAM",
- CSELEM, 0, "SELEM",
- CLABEL, 0, "LABEL",
- CEXREG, 0, "EXREG",
- -1, 0, 0,
-};
-
-char* onames[OEND+1];
-Init onamesinit[] =
-{
- OXXX, 0, "OXXX",
- OADD, 0, "ADD",
- OADDR, 0, "ADDR",
- OAND, 0, "AND",
- OANDAND, 0, "ANDAND",
- OARRAY, 0, "ARRAY",
- OAS, 0, "AS",
- OASI, 0, "ASI",
- OASADD, 0, "ASADD",
- OASAND, 0, "ASAND",
- OASASHL, 0, "ASASHL",
- OASASHR, 0, "ASASHR",
- OASDIV, 0, "ASDIV",
- OASHL, 0, "ASHL",
- OASHR, 0, "ASHR",
- OASLDIV, 0, "ASLDIV",
- OASLMOD, 0, "ASLMOD",
- OASLMUL, 0, "ASLMUL",
- OASLSHR, 0, "ASLSHR",
- OASMOD, 0, "ASMOD",
- OASMUL, 0, "ASMUL",
- OASOR, 0, "ASOR",
- OASSUB, 0, "ASSUB",
- OASXOR, 0, "ASXOR",
- OBIT, 0, "BIT",
- OBREAK, 0, "BREAK",
- OCASE, 0, "CASE",
- OCAST, 0, "CAST",
- OCOMMA, 0, "COMMA",
- OCOND, 0, "COND",
- OCONST, 0, "CONST",
- OCONTINUE, 0, "CONTINUE",
- ODIV, 0, "DIV",
- ODOT, 0, "DOT",
- ODOTDOT, 0, "DOTDOT",
- ODWHILE, 0, "DWHILE",
- OENUM, 0, "ENUM",
- OEQ, 0, "EQ",
- OEXREG, 0, "EXREG",
- OFOR, 0, "FOR",
- OFUNC, 0, "FUNC",
- OGE, 0, "GE",
- OGOTO, 0, "GOTO",
- OGT, 0, "GT",
- OHI, 0, "HI",
- OHS, 0, "HS",
- OIF, 0, "IF",
- OIND, 0, "IND",
- OINDREG, 0, "INDREG",
- OINIT, 0, "INIT",
- OLABEL, 0, "LABEL",
- OLDIV, 0, "LDIV",
- OLE, 0, "LE",
- OLIST, 0, "LIST",
- OLMOD, 0, "LMOD",
- OLMUL, 0, "LMUL",
- OLO, 0, "LO",
- OLS, 0, "LS",
- OLSHR, 0, "LSHR",
- OLT, 0, "LT",
- OMOD, 0, "MOD",
- OMUL, 0, "MUL",
- ONAME, 0, "NAME",
- ONE, 0, "NE",
- ONOT, 0, "NOT",
- OOR, 0, "OR",
- OOROR, 0, "OROR",
- OPOSTDEC, 0, "POSTDEC",
- OPOSTINC, 0, "POSTINC",
- OPREDEC, 0, "PREDEC",
- OPREINC, 0, "PREINC",
- OPREFETCH, 0, "PREFETCH",
- OPROTO, 0, "PROTO",
- OREGISTER, 0, "REGISTER",
- ORETURN, 0, "RETURN",
- OSET, 0, "SET",
- OSIGN, 0, "SIGN",
- OSIZE, 0, "SIZE",
- OSTRING, 0, "STRING",
- OLSTRING, 0, "LSTRING",
- OSTRUCT, 0, "STRUCT",
- OSUB, 0, "SUB",
- OSWITCH, 0, "SWITCH",
- OUNION, 0, "UNION",
- OUSED, 0, "USED",
- OWHILE, 0, "WHILE",
- OXOR, 0, "XOR",
- OPOS, 0, "POS",
- ONEG, 0, "NEG",
- OCOM, 0, "COM",
- OELEM, 0, "ELEM",
- OTST, 0, "TST",
- OINDEX, 0, "INDEX",
- OFAS, 0, "FAS",
- OREGPAIR, 0, "REGPAIR",
- OROTL, 0, "ROTL",
- OEND, 0, "END",
- -1, 0, 0,
-};
-
-/* OEQ, ONE, OLE, OLS, OLT, OLO, OGE, OHS, OGT, OHI */
-uchar comrel[12] =
-{
- ONE, OEQ, OGT, OHI, OGE, OHS, OLT, OLO, OLE, OLS,
-};
-uchar invrel[12] =
-{
- OEQ, ONE, OGE, OHS, OGT, OHI, OLE, OLS, OLT, OLO,
-};
-uchar logrel[12] =
-{
- OEQ, ONE, OLS, OLS, OLO, OLO, OHS, OHS, OHI, OHI,
-};
-
-uchar typei[NALLTYPES];
-int typeiinit[] =
-{
- TCHAR, TUCHAR, TSHORT, TUSHORT, TINT, TUINT, TLONG, TULONG, TVLONG, TUVLONG, -1,
-};
-uchar typeu[NALLTYPES];
-int typeuinit[] =
-{
- TUCHAR, TUSHORT, TUINT, TULONG, TUVLONG, TIND, -1,
-};
-
-uchar typesuv[NALLTYPES];
-int typesuvinit[] =
-{
- TVLONG, TUVLONG, TSTRUCT, TUNION, -1,
-};
-
-uchar typeilp[NALLTYPES];
-int typeilpinit[] =
-{
- TINT, TUINT, TLONG, TULONG, TIND, -1
-};
-
-uchar typechl[NALLTYPES];
-uchar typechlv[NALLTYPES];
-uchar typechlvp[NALLTYPES];
-int typechlinit[] =
-{
- TCHAR, TUCHAR, TSHORT, TUSHORT, TINT, TUINT, TLONG, TULONG, -1,
-};
-
-uchar typechlp[NALLTYPES];
-int typechlpinit[] =
-{
- TCHAR, TUCHAR, TSHORT, TUSHORT, TINT, TUINT, TLONG, TULONG, TIND, -1,
-};
-
-uchar typechlpfd[NALLTYPES];
-int typechlpfdinit[] =
-{
- TCHAR, TUCHAR, TSHORT, TUSHORT, TINT, TUINT, TLONG, TULONG, TFLOAT, TDOUBLE, TIND, -1,
-};
-
-uchar typec[NALLTYPES];
-int typecinit[] =
-{
- TCHAR, TUCHAR, -1
-};
-
-uchar typeh[NALLTYPES];
-int typehinit[] =
-{
- TSHORT, TUSHORT, -1,
-};
-
-uchar typeil[NALLTYPES];
-int typeilinit[] =
-{
- TINT, TUINT, TLONG, TULONG, -1,
-};
-
-uchar typev[NALLTYPES];
-int typevinit[] =
-{
- TVLONG, TUVLONG, -1,
-};
-
-uchar typefd[NALLTYPES];
-int typefdinit[] =
-{
- TFLOAT, TDOUBLE, -1,
-};
-
-uchar typeaf[NALLTYPES];
-int typeafinit[] =
-{
- TFUNC, TARRAY, -1,
-};
-
-uchar typesu[NALLTYPES];
-int typesuinit[] =
-{
- TSTRUCT, TUNION, -1,
-};
-
-int32 tasign[NALLTYPES];
-Init tasigninit[] =
-{
- TCHAR, BNUMBER, 0,
- TUCHAR, BNUMBER, 0,
- TSHORT, BNUMBER, 0,
- TUSHORT, BNUMBER, 0,
- TINT, BNUMBER, 0,
- TUINT, BNUMBER, 0,
- TLONG, BNUMBER, 0,
- TULONG, BNUMBER, 0,
- TVLONG, BNUMBER, 0,
- TUVLONG, BNUMBER, 0,
- TFLOAT, BNUMBER, 0,
- TDOUBLE, BNUMBER, 0,
- TIND, BIND, 0,
- TSTRUCT, BSTRUCT, 0,
- TUNION, BUNION, 0,
- -1, 0, 0,
-};
-
-int32 tasadd[NALLTYPES];
-Init tasaddinit[] =
-{
- TCHAR, BNUMBER, 0,
- TUCHAR, BNUMBER, 0,
- TSHORT, BNUMBER, 0,
- TUSHORT, BNUMBER, 0,
- TINT, BNUMBER, 0,
- TUINT, BNUMBER, 0,
- TLONG, BNUMBER, 0,
- TULONG, BNUMBER, 0,
- TVLONG, BNUMBER, 0,
- TUVLONG, BNUMBER, 0,
- TFLOAT, BNUMBER, 0,
- TDOUBLE, BNUMBER, 0,
- TIND, BINTEGER, 0,
- -1, 0, 0,
-};
-
-int32 tcast[NALLTYPES];
-Init tcastinit[] =
-{
- TCHAR, BNUMBER|BIND|BVOID, 0,
- TUCHAR, BNUMBER|BIND|BVOID, 0,
- TSHORT, BNUMBER|BIND|BVOID, 0,
- TUSHORT, BNUMBER|BIND|BVOID, 0,
- TINT, BNUMBER|BIND|BVOID, 0,
- TUINT, BNUMBER|BIND|BVOID, 0,
- TLONG, BNUMBER|BIND|BVOID, 0,
- TULONG, BNUMBER|BIND|BVOID, 0,
- TVLONG, BNUMBER|BIND|BVOID, 0,
- TUVLONG, BNUMBER|BIND|BVOID, 0,
- TFLOAT, BNUMBER|BVOID, 0,
- TDOUBLE, BNUMBER|BVOID, 0,
- TIND, BINTEGER|BIND|BVOID, 0,
- TVOID, BVOID, 0,
- TSTRUCT, BSTRUCT|BVOID, 0,
- TUNION, BUNION|BVOID, 0,
- -1, 0, 0,
-};
-
-int32 tadd[NALLTYPES];
-Init taddinit[] =
-{
- TCHAR, BNUMBER|BIND, 0,
- TUCHAR, BNUMBER|BIND, 0,
- TSHORT, BNUMBER|BIND, 0,
- TUSHORT, BNUMBER|BIND, 0,
- TINT, BNUMBER|BIND, 0,
- TUINT, BNUMBER|BIND, 0,
- TLONG, BNUMBER|BIND, 0,
- TULONG, BNUMBER|BIND, 0,
- TVLONG, BNUMBER|BIND, 0,
- TUVLONG, BNUMBER|BIND, 0,
- TFLOAT, BNUMBER, 0,
- TDOUBLE, BNUMBER, 0,
- TIND, BINTEGER, 0,
- -1, 0, 0,
-};
-
-int32 tsub[NALLTYPES];
-Init tsubinit[] =
-{
- TCHAR, BNUMBER, 0,
- TUCHAR, BNUMBER, 0,
- TSHORT, BNUMBER, 0,
- TUSHORT, BNUMBER, 0,
- TINT, BNUMBER, 0,
- TUINT, BNUMBER, 0,
- TLONG, BNUMBER, 0,
- TULONG, BNUMBER, 0,
- TVLONG, BNUMBER, 0,
- TUVLONG, BNUMBER, 0,
- TFLOAT, BNUMBER, 0,
- TDOUBLE, BNUMBER, 0,
- TIND, BINTEGER|BIND, 0,
- -1, 0, 0,
-};
-
-int32 tmul[NALLTYPES];
-Init tmulinit[] =
-{
- TCHAR, BNUMBER, 0,
- TUCHAR, BNUMBER, 0,
- TSHORT, BNUMBER, 0,
- TUSHORT, BNUMBER, 0,
- TINT, BNUMBER, 0,
- TUINT, BNUMBER, 0,
- TLONG, BNUMBER, 0,
- TULONG, BNUMBER, 0,
- TVLONG, BNUMBER, 0,
- TUVLONG, BNUMBER, 0,
- TFLOAT, BNUMBER, 0,
- TDOUBLE, BNUMBER, 0,
- -1, 0, 0,
-};
-
-int32 tand[NALLTYPES];
-Init tandinit[] =
-{
- TCHAR, BINTEGER, 0,
- TUCHAR, BINTEGER, 0,
- TSHORT, BINTEGER, 0,
- TUSHORT, BINTEGER, 0,
- TINT, BNUMBER, 0,
- TUINT, BNUMBER, 0,
- TLONG, BINTEGER, 0,
- TULONG, BINTEGER, 0,
- TVLONG, BINTEGER, 0,
- TUVLONG, BINTEGER, 0,
- -1, 0, 0,
-};
-
-int32 trel[NALLTYPES];
-Init trelinit[] =
-{
- TCHAR, BNUMBER, 0,
- TUCHAR, BNUMBER, 0,
- TSHORT, BNUMBER, 0,
- TUSHORT, BNUMBER, 0,
- TINT, BNUMBER, 0,
- TUINT, BNUMBER, 0,
- TLONG, BNUMBER, 0,
- TULONG, BNUMBER, 0,
- TVLONG, BNUMBER, 0,
- TUVLONG, BNUMBER, 0,
- TFLOAT, BNUMBER, 0,
- TDOUBLE, BNUMBER, 0,
- TIND, BIND, 0,
- -1, 0, 0,
-};
-
-int32 tfunct[1] =
-{
- BFUNC,
-};
-
-int32 tindir[1] =
-{
- BIND,
-};
-
-int32 tdot[1] =
-{
- BSTRUCT|BUNION,
-};
-
-int32 tnot[1] =
-{
- BNUMBER|BIND,
-};
-
-int32 targ[1] =
-{
- BNUMBER|BIND|BSTRUCT|BUNION,
-};
-
-uchar tab[NTYPE][NTYPE] =
-{
-/*TXXX*/ { 0,
- },
-
-/*TCHAR*/ { 0, TCHAR, TUCHAR, TSHORT, TUSHORT, TINT, TUINT, TLONG,
- TULONG, TVLONG, TUVLONG, TFLOAT, TDOUBLE, TIND,
- },
-/*TUCHAR*/ { 0, TUCHAR, TUCHAR, TUSHORT, TUSHORT, TUINT, TUINT, TULONG,
- TULONG, TUVLONG, TUVLONG, TFLOAT, TDOUBLE, TIND,
- },
-/*TSHORT*/ { 0, TSHORT, TUSHORT, TSHORT, TUSHORT, TINT, TUINT, TLONG,
- TULONG, TVLONG, TUVLONG, TFLOAT, TDOUBLE, TIND,
- },
-/*TUSHORT*/ { 0, TUSHORT, TUSHORT, TUSHORT, TUSHORT, TUINT, TUINT, TULONG,
- TULONG, TUVLONG, TUVLONG, TFLOAT, TDOUBLE, TIND,
- },
-/*TINT*/ { 0, TINT, TUINT, TINT, TUINT, TINT, TUINT, TLONG,
- TULONG, TVLONG, TUVLONG, TFLOAT, TDOUBLE, TIND,
- },
-/*TUINT*/ { 0, TUINT, TUINT, TUINT, TUINT, TUINT, TUINT, TULONG,
- TULONG, TUVLONG, TUVLONG, TFLOAT, TDOUBLE, TIND,
- },
-/*TLONG*/ { 0, TLONG, TULONG, TLONG, TULONG, TLONG, TULONG, TLONG,
- TULONG, TVLONG, TUVLONG, TFLOAT, TDOUBLE, TIND,
- },
-/*TULONG*/ { 0, TULONG, TULONG, TULONG, TULONG, TULONG, TULONG, TULONG,
- TULONG, TUVLONG, TUVLONG, TFLOAT, TDOUBLE, TIND,
- },
-/*TVLONG*/ { 0, TVLONG, TUVLONG, TVLONG, TUVLONG, TVLONG, TUVLONG, TVLONG,
- TUVLONG, TVLONG, TUVLONG, TFLOAT, TDOUBLE, TIND,
- },
-/*TUVLONG*/ { 0, TUVLONG, TUVLONG, TUVLONG, TUVLONG, TUVLONG, TUVLONG, TUVLONG,
- TUVLONG, TUVLONG, TUVLONG, TFLOAT, TDOUBLE, TIND,
- },
-/*TFLOAT*/ { 0, TFLOAT, TFLOAT, TFLOAT, TFLOAT, TFLOAT, TFLOAT, TFLOAT,
- TFLOAT, TFLOAT, TFLOAT, TFLOAT, TDOUBLE, TIND,
- },
-/*TDOUBLE*/ { 0, TDOUBLE, TDOUBLE, TDOUBLE, TDOUBLE, TDOUBLE, TDOUBLE, TDOUBLE,
- TDOUBLE, TDOUBLE, TDOUBLE, TFLOAT, TDOUBLE, TIND,
- },
-/*TIND*/ { 0, TIND, TIND, TIND, TIND, TIND, TIND, TIND,
- TIND, TIND, TIND, TIND, TIND, TIND,
- },
-};
-
-void
-urk(char *name, int max, int i)
-{
- if(i >= max) {
- fprint(2, "bad tinit: %s %d>=%d\n", name, i, max);
- exits("init");
- }
-}
-
-void
-tinit(void)
-{
- int *ip;
- Init *p;
-
- for(p=thashinit; p->code >= 0; p++) {
- urk("thash", nelem(thash), p->code);
- thash[p->code] = p->value;
- }
- for(p=bnamesinit; p->code >= 0; p++) {
- urk("bnames", nelem(bnames), p->code);
- bnames[p->code] = p->s;
- }
- for(p=tnamesinit; p->code >= 0; p++) {
- urk("tnames", nelem(tnames), p->code);
- tnames[p->code] = p->s;
- }
- for(p=gnamesinit; p->code >= 0; p++) {
- urk("gnames", nelem(gnames), p->code);
- gnames[p->code] = p->s;
- }
- for(p=qnamesinit; p->code >= 0; p++) {
- urk("qnames", nelem(qnames), p->code);
- qnames[p->code] = p->s;
- }
- for(p=cnamesinit; p->code >= 0; p++) {
- urk("cnames", nelem(cnames), p->code);
- cnames[p->code] = p->s;
- }
- for(p=onamesinit; p->code >= 0; p++) {
- urk("onames", nelem(onames), p->code);
- onames[p->code] = p->s;
- }
- for(ip=typeiinit; *ip>=0; ip++) {
- urk("typei", nelem(typei), *ip);
- typei[*ip] = 1;
- }
- for(ip=typeuinit; *ip>=0; ip++) {
- urk("typeu", nelem(typeu), *ip);
- typeu[*ip] = 1;
- }
- for(ip=typesuvinit; *ip>=0; ip++) {
- urk("typesuv", nelem(typesuv), *ip);
- typesuv[*ip] = 1;
- }
- for(ip=typeilpinit; *ip>=0; ip++) {
- urk("typeilp", nelem(typeilp), *ip);
- typeilp[*ip] = 1;
- }
- for(ip=typechlinit; *ip>=0; ip++) {
- urk("typechl", nelem(typechl), *ip);
- typechl[*ip] = 1;
- typechlv[*ip] = 1;
- typechlvp[*ip] = 1;
- }
- for(ip=typechlpinit; *ip>=0; ip++) {
- urk("typechlp", nelem(typechlp), *ip);
- typechlp[*ip] = 1;
- typechlvp[*ip] = 1;
- }
- for(ip=typechlpfdinit; *ip>=0; ip++) {
- urk("typechlpfd", nelem(typechlpfd), *ip);
- typechlpfd[*ip] = 1;
- }
- for(ip=typecinit; *ip>=0; ip++) {
- urk("typec", nelem(typec), *ip);
- typec[*ip] = 1;
- }
- for(ip=typehinit; *ip>=0; ip++) {
- urk("typeh", nelem(typeh), *ip);
- typeh[*ip] = 1;
- }
- for(ip=typeilinit; *ip>=0; ip++) {
- urk("typeil", nelem(typeil), *ip);
- typeil[*ip] = 1;
- }
- for(ip=typevinit; *ip>=0; ip++) {
- urk("typev", nelem(typev), *ip);
- typev[*ip] = 1;
- typechlv[*ip] = 1;
- typechlvp[*ip] = 1;
- }
- for(ip=typefdinit; *ip>=0; ip++) {
- urk("typefd", nelem(typefd), *ip);
- typefd[*ip] = 1;
- }
- for(ip=typeafinit; *ip>=0; ip++) {
- urk("typeaf", nelem(typeaf), *ip);
- typeaf[*ip] = 1;
- }
- for(ip=typesuinit; *ip >= 0; ip++) {
- urk("typesu", nelem(typesu), *ip);
- typesu[*ip] = 1;
- }
- for(p=tasigninit; p->code >= 0; p++) {
- urk("tasign", nelem(tasign), p->code);
- tasign[p->code] = p->value;
- }
- for(p=tasaddinit; p->code >= 0; p++) {
- urk("tasadd", nelem(tasadd), p->code);
- tasadd[p->code] = p->value;
- }
- for(p=tcastinit; p->code >= 0; p++) {
- urk("tcast", nelem(tcast), p->code);
- tcast[p->code] = p->value;
- }
- for(p=taddinit; p->code >= 0; p++) {
- urk("tadd", nelem(tadd), p->code);
- tadd[p->code] = p->value;
- }
- for(p=tsubinit; p->code >= 0; p++) {
- urk("tsub", nelem(tsub), p->code);
- tsub[p->code] = p->value;
- }
- for(p=tmulinit; p->code >= 0; p++) {
- urk("tmul", nelem(tmul), p->code);
- tmul[p->code] = p->value;
- }
- for(p=tandinit; p->code >= 0; p++) {
- urk("tand", nelem(tand), p->code);
- tand[p->code] = p->value;
- }
- for(p=trelinit; p->code >= 0; p++) {
- urk("trel", nelem(trel), p->code);
- trel[p->code] = p->value;
- }
-
- /* 32-bit defaults */
- typeword = typechlp;
- typecmplx = typesuv;
-}
-
-/*
- * return 1 if it is impossible to jump into the middle of n.
- */
-static int
-deadhead(Node *n, int caseok)
-{
-loop:
- if(n == Z)
- return 1;
- switch(n->op) {
- case OLIST:
- if(!deadhead(n->left, caseok))
- return 0;
- rloop:
- n = n->right;
- goto loop;
-
- case ORETURN:
- break;
-
- case OLABEL:
- return 0;
-
- case OGOTO:
- break;
-
- case OCASE:
- if(!caseok)
- return 0;
- goto rloop;
-
- case OSWITCH:
- return deadhead(n->right, 1);
-
- case OWHILE:
- case ODWHILE:
- goto rloop;
-
- case OFOR:
- goto rloop;
-
- case OCONTINUE:
- break;
-
- case OBREAK:
- break;
-
- case OIF:
- return deadhead(n->right->left, caseok) && deadhead(n->right->right, caseok);
-
- case OSET:
- case OUSED:
- break;
- }
- return 1;
-}
-
-int
-deadheads(Node *c)
-{
- return deadhead(c->left, 0) && deadhead(c->right, 0);
-}
-
-int
-mixedasop(Type *l, Type *r)
-{
- return !typefd[l->etype] && typefd[r->etype];
-}
-
-LSym*
-linksym(Sym *s)
-{
- if(s == nil)
- return nil;
- if(s->lsym != nil)
- return s->lsym;
- return linklookup(ctxt, s->name, s->class == CSTATIC);
-}
diff --git a/src/cmd/cc/y.tab.c b/src/cmd/cc/y.tab.c
deleted file mode 100644
index 94932efe5..000000000
--- a/src/cmd/cc/y.tab.c
+++ /dev/null
@@ -1,3822 +0,0 @@
-/* A Bison parser, made by GNU Bison 2.3. */
-
-/* Skeleton implementation for Bison's Yacc-like parsers in C
-
- Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
- Free Software Foundation, Inc.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor,
- Boston, MA 02110-1301, USA. */
-
-/* As a special exception, you may create a larger work that contains
- part or all of the Bison parser skeleton and distribute that work
- under terms of your choice, so long as that work isn't itself a
- parser generator using the skeleton or a modified version thereof
- as a parser skeleton. Alternatively, if you modify or redistribute
- the parser skeleton itself, you may (at your option) remove this
- special exception, which will cause the skeleton and the resulting
- Bison output files to be licensed under the GNU General Public
- License without this special exception.
-
- This special exception was added by the Free Software Foundation in
- version 2.2 of Bison. */
-
-/* C LALR(1) parser skeleton written by Richard Stallman, by
- simplifying the original so-called "semantic" parser. */
-
-/* All symbols defined below should begin with yy or YY, to avoid
- infringing on user name space. This should be done even for local
- variables, as they might otherwise be expanded by user macros.
- There are some unavoidable exceptions within include files to
- define necessary library symbols; they are noted "INFRINGES ON
- USER NAME SPACE" below. */
-
-/* Identify Bison output. */
-#define YYBISON 1
-
-/* Bison version. */
-#define YYBISON_VERSION "2.3"
-
-/* Skeleton name. */
-#define YYSKELETON_NAME "yacc.c"
-
-/* Pure parsers. */
-#define YYPURE 0
-
-/* Using locations. */
-#define YYLSP_NEEDED 0
-
-
-
-/* Tokens. */
-#ifndef YYTOKENTYPE
-# define YYTOKENTYPE
- /* Put the tokens into the symbol table, so that GDB and other debuggers
- know about them. */
- enum yytokentype {
- LORE = 258,
- LXORE = 259,
- LANDE = 260,
- LLSHE = 261,
- LRSHE = 262,
- LMDE = 263,
- LDVE = 264,
- LMLE = 265,
- LME = 266,
- LPE = 267,
- LOROR = 268,
- LANDAND = 269,
- LNE = 270,
- LEQ = 271,
- LGE = 272,
- LLE = 273,
- LRSH = 274,
- LLSH = 275,
- LMG = 276,
- LPP = 277,
- LMM = 278,
- LNAME = 279,
- LTYPE = 280,
- LFCONST = 281,
- LDCONST = 282,
- LCONST = 283,
- LLCONST = 284,
- LUCONST = 285,
- LULCONST = 286,
- LVLCONST = 287,
- LUVLCONST = 288,
- LSTRING = 289,
- LLSTRING = 290,
- LAUTO = 291,
- LBREAK = 292,
- LCASE = 293,
- LCHAR = 294,
- LCONTINUE = 295,
- LDEFAULT = 296,
- LDO = 297,
- LDOUBLE = 298,
- LELSE = 299,
- LEXTERN = 300,
- LFLOAT = 301,
- LFOR = 302,
- LGOTO = 303,
- LIF = 304,
- LINT = 305,
- LLONG = 306,
- LPREFETCH = 307,
- LREGISTER = 308,
- LRETURN = 309,
- LSHORT = 310,
- LSIZEOF = 311,
- LUSED = 312,
- LSTATIC = 313,
- LSTRUCT = 314,
- LSWITCH = 315,
- LTYPEDEF = 316,
- LTYPESTR = 317,
- LUNION = 318,
- LUNSIGNED = 319,
- LWHILE = 320,
- LVOID = 321,
- LENUM = 322,
- LSIGNED = 323,
- LCONSTNT = 324,
- LVOLATILE = 325,
- LSET = 326,
- LSIGNOF = 327,
- LRESTRICT = 328,
- LINLINE = 329
- };
-#endif
-/* Tokens. */
-#define LORE 258
-#define LXORE 259
-#define LANDE 260
-#define LLSHE 261
-#define LRSHE 262
-#define LMDE 263
-#define LDVE 264
-#define LMLE 265
-#define LME 266
-#define LPE 267
-#define LOROR 268
-#define LANDAND 269
-#define LNE 270
-#define LEQ 271
-#define LGE 272
-#define LLE 273
-#define LRSH 274
-#define LLSH 275
-#define LMG 276
-#define LPP 277
-#define LMM 278
-#define LNAME 279
-#define LTYPE 280
-#define LFCONST 281
-#define LDCONST 282
-#define LCONST 283
-#define LLCONST 284
-#define LUCONST 285
-#define LULCONST 286
-#define LVLCONST 287
-#define LUVLCONST 288
-#define LSTRING 289
-#define LLSTRING 290
-#define LAUTO 291
-#define LBREAK 292
-#define LCASE 293
-#define LCHAR 294
-#define LCONTINUE 295
-#define LDEFAULT 296
-#define LDO 297
-#define LDOUBLE 298
-#define LELSE 299
-#define LEXTERN 300
-#define LFLOAT 301
-#define LFOR 302
-#define LGOTO 303
-#define LIF 304
-#define LINT 305
-#define LLONG 306
-#define LPREFETCH 307
-#define LREGISTER 308
-#define LRETURN 309
-#define LSHORT 310
-#define LSIZEOF 311
-#define LUSED 312
-#define LSTATIC 313
-#define LSTRUCT 314
-#define LSWITCH 315
-#define LTYPEDEF 316
-#define LTYPESTR 317
-#define LUNION 318
-#define LUNSIGNED 319
-#define LWHILE 320
-#define LVOID 321
-#define LENUM 322
-#define LSIGNED 323
-#define LCONSTNT 324
-#define LVOLATILE 325
-#define LSET 326
-#define LSIGNOF 327
-#define LRESTRICT 328
-#define LINLINE 329
-
-
-
-
-/* Copy the first part of user declarations. */
-#line 31 "cc.y"
-
-#include <u.h>
-#include <stdio.h> /* if we don't, bison will, and cc.h re-#defines getc */
-#include "cc.h"
-
-
-/* Enabling traces. */
-#ifndef YYDEBUG
-# define YYDEBUG 0
-#endif
-
-/* Enabling verbose error messages. */
-#ifdef YYERROR_VERBOSE
-# undef YYERROR_VERBOSE
-# define YYERROR_VERBOSE 1
-#else
-# define YYERROR_VERBOSE 0
-#endif
-
-/* Enabling the token table. */
-#ifndef YYTOKEN_TABLE
-# define YYTOKEN_TABLE 0
-#endif
-
-#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
-typedef union YYSTYPE
-#line 36 "cc.y"
-{
- Node* node;
- Sym* sym;
- Type* type;
- struct
- {
- Type* t;
- uchar c;
- } tycl;
- struct
- {
- Type* t1;
- Type* t2;
- Type* t3;
- uchar c;
- } tyty;
- struct
- {
- char* s;
- int32 l;
- } sval;
- int32 lval;
- double dval;
- vlong vval;
-}
-/* Line 193 of yacc.c. */
-#line 276 "y.tab.c"
- YYSTYPE;
-# define yystype YYSTYPE /* obsolescent; will be withdrawn */
-# define YYSTYPE_IS_DECLARED 1
-# define YYSTYPE_IS_TRIVIAL 1
-#endif
-
-
-
-/* Copy the second part of user declarations. */
-
-
-/* Line 216 of yacc.c. */
-#line 289 "y.tab.c"
-
-#ifdef short
-# undef short
-#endif
-
-#ifdef YYTYPE_UINT8
-typedef YYTYPE_UINT8 yytype_uint8;
-#else
-typedef unsigned char yytype_uint8;
-#endif
-
-#ifdef YYTYPE_INT8
-typedef YYTYPE_INT8 yytype_int8;
-#elif (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-typedef signed char yytype_int8;
-#else
-typedef short int yytype_int8;
-#endif
-
-#ifdef YYTYPE_UINT16
-typedef YYTYPE_UINT16 yytype_uint16;
-#else
-typedef unsigned short int yytype_uint16;
-#endif
-
-#ifdef YYTYPE_INT16
-typedef YYTYPE_INT16 yytype_int16;
-#else
-typedef short int yytype_int16;
-#endif
-
-#ifndef YYSIZE_T
-# ifdef __SIZE_TYPE__
-# define YYSIZE_T __SIZE_TYPE__
-# elif defined size_t
-# define YYSIZE_T size_t
-# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
-# define YYSIZE_T size_t
-# else
-# define YYSIZE_T unsigned int
-# endif
-#endif
-
-#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
-
-#ifndef YY_
-# if defined YYENABLE_NLS && YYENABLE_NLS
-# if ENABLE_NLS
-# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
-# define YY_(msgid) dgettext ("bison-runtime", msgid)
-# endif
-# endif
-# ifndef YY_
-# define YY_(msgid) msgid
-# endif
-#endif
-
-/* Suppress unused-variable warnings by "using" E. */
-#if ! defined lint || defined __GNUC__
-# define YYUSE(e) ((void) (e))
-#else
-# define YYUSE(e) /* empty */
-#endif
-
-/* Identity function, used to suppress warnings about constant conditions. */
-#ifndef lint
-# define YYID(n) (n)
-#else
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static int
-YYID (int i)
-#else
-static int
-YYID (i)
- int i;
-#endif
-{
- return i;
-}
-#endif
-
-#if ! defined yyoverflow || YYERROR_VERBOSE
-
-/* The parser invokes alloca or malloc; define the necessary symbols. */
-
-# ifdef YYSTACK_USE_ALLOCA
-# if YYSTACK_USE_ALLOCA
-# ifdef __GNUC__
-# define YYSTACK_ALLOC __builtin_alloca
-# elif defined __BUILTIN_VA_ARG_INCR
-# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
-# elif defined _AIX
-# define YYSTACK_ALLOC __alloca
-# elif defined _MSC_VER
-# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
-# define alloca _alloca
-# else
-# define YYSTACK_ALLOC alloca
-# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-# ifndef _STDLIB_H
-# define _STDLIB_H 1
-# endif
-# endif
-# endif
-# endif
-# endif
-
-# ifdef YYSTACK_ALLOC
- /* Pacify GCC's `empty if-body' warning. */
-# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
-# ifndef YYSTACK_ALLOC_MAXIMUM
- /* The OS might guarantee only one guard page at the bottom of the stack,
- and a page size can be as small as 4096 bytes. So we cannot safely
- invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
- to allow for a few compiler-allocated temporary stack slots. */
-# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
-# endif
-# else
-# define YYSTACK_ALLOC YYMALLOC
-# define YYSTACK_FREE YYFREE
-# ifndef YYSTACK_ALLOC_MAXIMUM
-# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
-# endif
-# if (defined __cplusplus && ! defined _STDLIB_H \
- && ! ((defined YYMALLOC || defined malloc) \
- && (defined YYFREE || defined free)))
-# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-# ifndef _STDLIB_H
-# define _STDLIB_H 1
-# endif
-# endif
-# ifndef YYMALLOC
-# define YYMALLOC malloc
-# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
-# endif
-# endif
-# ifndef YYFREE
-# define YYFREE free
-# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-void free (void *); /* INFRINGES ON USER NAME SPACE */
-# endif
-# endif
-# endif
-#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
-
-
-#if (! defined yyoverflow \
- && (! defined __cplusplus \
- || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
-
-/* A type that is properly aligned for any stack member. */
-union yyalloc
-{
- yytype_int16 yyss;
- YYSTYPE yyvs;
- };
-
-/* The size of the maximum gap between one aligned stack and the next. */
-# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
-
-/* The size of an array large to enough to hold all stacks, each with
- N elements. */
-# define YYSTACK_BYTES(N) \
- ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
- + YYSTACK_GAP_MAXIMUM)
-
-/* Copy COUNT objects from FROM to TO. The source and destination do
- not overlap. */
-# ifndef YYCOPY
-# if defined __GNUC__ && 1 < __GNUC__
-# define YYCOPY(To, From, Count) \
- __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
-# else
-# define YYCOPY(To, From, Count) \
- do \
- { \
- YYSIZE_T yyi; \
- for (yyi = 0; yyi < (Count); yyi++) \
- (To)[yyi] = (From)[yyi]; \
- } \
- while (YYID (0))
-# endif
-# endif
-
-/* Relocate STACK from its old location to the new one. The
- local variables YYSIZE and YYSTACKSIZE give the old and new number of
- elements in the stack, and YYPTR gives the new location of the
- stack. Advance YYPTR to a properly aligned location for the next
- stack. */
-# define YYSTACK_RELOCATE(Stack) \
- do \
- { \
- YYSIZE_T yynewbytes; \
- YYCOPY (&yyptr->Stack, Stack, yysize); \
- Stack = &yyptr->Stack; \
- yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
- yyptr += yynewbytes / sizeof (*yyptr); \
- } \
- while (YYID (0))
-
-#endif
-
-/* YYFINAL -- State number of the termination state. */
-#define YYFINAL 2
-/* YYLAST -- Last index in YYTABLE. */
-#define YYLAST 1188
-
-/* YYNTOKENS -- Number of terminals. */
-#define YYNTOKENS 99
-/* YYNNTS -- Number of nonterminals. */
-#define YYNNTS 75
-/* YYNRULES -- Number of rules. */
-#define YYNRULES 247
-/* YYNRULES -- Number of states. */
-#define YYNSTATES 417
-
-/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
-#define YYUNDEFTOK 2
-#define YYMAXUTOK 329
-
-#define YYTRANSLATE(YYX) \
- ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
-
-/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
-static const yytype_uint8 yytranslate[] =
-{
- 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 97, 2, 2, 2, 35, 22, 2,
- 38, 93, 33, 31, 4, 32, 36, 34, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 17, 3,
- 25, 5, 26, 16, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 37, 2, 94, 21, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 95, 20, 96, 98, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 1, 2, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15, 18, 19,
- 23, 24, 27, 28, 29, 30, 39, 40, 41, 42,
- 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
- 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
- 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
- 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
- 83, 84, 85, 86, 87, 88, 89, 90, 91, 92
-};
-
-#if YYDEBUG
-/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
- YYRHS. */
-static const yytype_uint16 yyprhs[] =
-{
- 0, 0, 3, 4, 7, 10, 14, 15, 16, 23,
- 25, 26, 31, 35, 37, 41, 43, 47, 52, 57,
- 60, 64, 66, 67, 72, 76, 77, 82, 84, 88,
- 89, 94, 95, 101, 102, 104, 106, 110, 112, 116,
- 119, 120, 122, 125, 129, 131, 133, 138, 143, 146,
- 150, 154, 156, 160, 164, 167, 170, 173, 177, 179,
- 182, 184, 186, 189, 190, 192, 194, 197, 200, 204,
- 208, 212, 213, 216, 219, 221, 224, 228, 231, 234,
- 237, 239, 242, 244, 247, 250, 251, 254, 260, 268,
- 269, 280, 286, 294, 298, 304, 307, 310, 314, 320,
- 326, 332, 333, 335, 336, 338, 340, 342, 346, 348,
- 352, 356, 360, 364, 368, 372, 376, 380, 384, 388,
- 392, 396, 400, 404, 408, 412, 416, 420, 426, 430,
- 434, 438, 442, 446, 450, 454, 458, 462, 466, 470,
- 472, 478, 486, 488, 491, 494, 497, 500, 503, 506,
- 509, 512, 515, 518, 522, 528, 534, 539, 544, 548,
- 552, 555, 558, 560, 562, 564, 566, 568, 570, 572,
- 574, 576, 578, 580, 582, 585, 587, 590, 591, 593,
- 595, 599, 600, 605, 606, 608, 610, 612, 614, 617,
- 620, 624, 627, 631, 633, 635, 638, 639, 644, 647,
- 650, 651, 656, 659, 662, 663, 664, 672, 673, 679,
- 681, 683, 686, 687, 690, 692, 694, 696, 698, 701,
- 703, 705, 707, 711, 714, 718, 720, 722, 724, 726,
- 728, 730, 732, 734, 736, 738, 740, 742, 744, 746,
- 748, 750, 752, 754, 756, 758, 760, 762
-};
-
-/* YYRHS -- A `-1'-separated list of the rules' RHS. */
-static const yytype_int16 yyrhs[] =
-{
- 100, 0, -1, -1, 100, 101, -1, 152, 3, -1,
- 152, 104, 3, -1, -1, -1, 152, 106, 102, 111,
- 103, 129, -1, 106, -1, -1, 106, 105, 5, 123,
- -1, 104, 4, 104, -1, 107, -1, 33, 163, 106,
- -1, 172, -1, 38, 106, 93, -1, 107, 38, 127,
- 93, -1, 107, 37, 139, 94, -1, 155, 3, -1,
- 155, 109, 3, -1, 106, -1, -1, 106, 110, 5,
- 123, -1, 109, 4, 109, -1, -1, 111, 155, 112,
- 3, -1, 106, -1, 112, 4, 112, -1, -1, 154,
- 114, 116, 3, -1, -1, 113, 154, 115, 116, 3,
- -1, -1, 117, -1, 118, -1, 117, 4, 117, -1,
- 106, -1, 172, 17, 140, -1, 17, 140, -1, -1,
- 120, -1, 33, 163, -1, 33, 163, 120, -1, 121,
- -1, 122, -1, 121, 38, 127, 93, -1, 121, 37,
- 139, 94, -1, 38, 93, -1, 37, 139, 94, -1,
- 38, 120, 93, -1, 142, -1, 95, 126, 96, -1,
- 37, 140, 94, -1, 36, 173, -1, 124, 5, -1,
- 123, 4, -1, 125, 123, 4, -1, 124, -1, 125,
- 124, -1, 125, -1, 123, -1, 125, 123, -1, -1,
- 128, -1, 171, -1, 154, 119, -1, 154, 106, -1,
- 36, 36, 36, -1, 128, 4, 128, -1, 95, 130,
- 96, -1, -1, 130, 108, -1, 130, 133, -1, 132,
- -1, 131, 132, -1, 56, 142, 17, -1, 59, 17,
- -1, 42, 17, -1, 1, 3, -1, 135, -1, 131,
- 135, -1, 138, -1, 155, 109, -1, 138, 3, -1,
- -1, 136, 129, -1, 67, 38, 141, 93, 133, -1,
- 67, 38, 141, 93, 133, 62, 133, -1, -1, 137,
- 65, 38, 134, 3, 138, 3, 138, 93, 133, -1,
- 83, 38, 141, 93, 133, -1, 60, 133, 83, 38,
- 141, 93, 3, -1, 72, 138, 3, -1, 78, 38,
- 141, 93, 133, -1, 55, 3, -1, 58, 3, -1,
- 66, 173, 3, -1, 75, 38, 148, 93, 3, -1,
- 70, 38, 148, 93, 3, -1, 89, 38, 148, 93,
- 3, -1, -1, 141, -1, -1, 140, -1, 142, -1,
- 142, -1, 141, 4, 141, -1, 143, -1, 142, 33,
- 142, -1, 142, 34, 142, -1, 142, 35, 142, -1,
- 142, 31, 142, -1, 142, 32, 142, -1, 142, 29,
- 142, -1, 142, 30, 142, -1, 142, 25, 142, -1,
- 142, 26, 142, -1, 142, 28, 142, -1, 142, 27,
- 142, -1, 142, 24, 142, -1, 142, 23, 142, -1,
- 142, 22, 142, -1, 142, 21, 142, -1, 142, 20,
- 142, -1, 142, 19, 142, -1, 142, 18, 142, -1,
- 142, 16, 141, 17, 142, -1, 142, 5, 142, -1,
- 142, 15, 142, -1, 142, 14, 142, -1, 142, 13,
- 142, -1, 142, 12, 142, -1, 142, 11, 142, -1,
- 142, 9, 142, -1, 142, 10, 142, -1, 142, 8,
- 142, -1, 142, 7, 142, -1, 142, 6, 142, -1,
- 144, -1, 38, 154, 119, 93, 143, -1, 38, 154,
- 119, 93, 95, 126, 96, -1, 145, -1, 33, 143,
- -1, 22, 143, -1, 31, 143, -1, 32, 143, -1,
- 97, 143, -1, 98, 143, -1, 40, 143, -1, 41,
- 143, -1, 74, 144, -1, 90, 144, -1, 38, 141,
- 93, -1, 74, 38, 154, 119, 93, -1, 90, 38,
- 154, 119, 93, -1, 145, 38, 148, 93, -1, 145,
- 37, 141, 94, -1, 145, 39, 173, -1, 145, 36,
- 173, -1, 145, 40, -1, 145, 41, -1, 171, -1,
- 46, -1, 47, -1, 48, -1, 49, -1, 45, -1,
- 44, -1, 50, -1, 51, -1, 146, -1, 147, -1,
- 52, -1, 146, 52, -1, 53, -1, 147, 53, -1,
- -1, 149, -1, 142, -1, 149, 4, 149, -1, -1,
- 95, 151, 113, 96, -1, -1, 155, -1, 156, -1,
- 168, -1, 165, -1, 156, 162, -1, 168, 162, -1,
- 165, 156, 163, -1, 165, 168, -1, 165, 168, 162,
- -1, 153, -1, 153, -1, 77, 173, -1, -1, 77,
- 173, 157, 150, -1, 77, 150, -1, 81, 173, -1,
- -1, 81, 173, 158, 150, -1, 81, 150, -1, 85,
- 173, -1, -1, -1, 85, 173, 159, 95, 160, 167,
- 96, -1, -1, 85, 95, 161, 167, 96, -1, 43,
- -1, 164, -1, 162, 164, -1, -1, 163, 170, -1,
- 168, -1, 170, -1, 169, -1, 166, -1, 165, 166,
- -1, 170, -1, 169, -1, 42, -1, 42, 5, 142,
- -1, 167, 4, -1, 167, 4, 167, -1, 57, -1,
- 73, -1, 68, -1, 69, -1, 86, -1, 82, -1,
- 64, -1, 61, -1, 84, -1, 54, -1, 76, -1,
- 63, -1, 79, -1, 80, -1, 71, -1, 92, -1,
- 87, -1, 88, -1, 91, -1, 42, -1, 173, -1,
- 42, -1, 43, -1
-};
-
-/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
-static const yytype_uint16 yyrline[] =
-{
- 0, 101, 101, 102, 108, 112, 114, 128, 113, 143,
- 148, 147, 155, 158, 159, 166, 167, 171, 175, 184,
- 188, 194, 200, 199, 211, 224, 225, 228, 232, 239,
- 238, 244, 243, 250, 254, 257, 261, 264, 269, 273,
- 282, 285, 288, 293, 298, 301, 302, 306, 312, 316,
- 320, 326, 327, 333, 337, 342, 345, 346, 350, 351,
- 357, 358, 359, 365, 368, 375, 376, 381, 386, 390,
- 396, 406, 409, 413, 419, 420, 426, 430, 434, 440,
- 444, 445, 451, 452, 458, 459, 459, 470, 476, 484,
- 484, 495, 499, 503, 508, 522, 526, 530, 534, 538,
- 542, 548, 551, 554, 557, 560, 567, 568, 574, 575,
- 579, 583, 587, 591, 595, 599, 603, 607, 611, 615,
- 619, 623, 627, 631, 635, 639, 643, 647, 651, 655,
- 659, 663, 667, 671, 675, 679, 683, 687, 691, 697,
- 698, 705, 713, 714, 718, 722, 726, 730, 734, 738,
- 742, 746, 750, 756, 760, 766, 772, 780, 784, 789,
- 794, 798, 802, 803, 810, 817, 824, 831, 838, 845,
- 852, 859, 860, 863, 873, 891, 901, 919, 922, 925,
- 926, 933, 932, 955, 959, 962, 967, 972, 978, 986,
- 992, 998, 1004, 1012, 1020, 1027, 1033, 1032, 1044, 1053,
- 1059, 1058, 1070, 1078, 1087, 1091, 1086, 1108, 1107, 1116,
- 1122, 1123, 1129, 1132, 1138, 1139, 1140, 1143, 1144, 1150,
- 1151, 1154, 1158, 1162, 1163, 1166, 1167, 1168, 1169, 1170,
- 1171, 1172, 1173, 1174, 1177, 1178, 1179, 1180, 1181, 1182,
- 1183, 1186, 1187, 1188, 1191, 1206, 1218, 1219
-};
-#endif
-
-#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
-/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
- First, the terminals, then, starting at YYNTOKENS, nonterminals. */
-static const char *const yytname[] =
-{
- "$end", "error", "$undefined", "';'", "','", "'='", "LORE", "LXORE",
- "LANDE", "LLSHE", "LRSHE", "LMDE", "LDVE", "LMLE", "LME", "LPE", "'?'",
- "':'", "LOROR", "LANDAND", "'|'", "'^'", "'&'", "LNE", "LEQ", "'<'",
- "'>'", "LGE", "LLE", "LRSH", "LLSH", "'+'", "'-'", "'*'", "'/'", "'%'",
- "'.'", "'['", "'('", "LMG", "LPP", "LMM", "LNAME", "LTYPE", "LFCONST",
- "LDCONST", "LCONST", "LLCONST", "LUCONST", "LULCONST", "LVLCONST",
- "LUVLCONST", "LSTRING", "LLSTRING", "LAUTO", "LBREAK", "LCASE", "LCHAR",
- "LCONTINUE", "LDEFAULT", "LDO", "LDOUBLE", "LELSE", "LEXTERN", "LFLOAT",
- "LFOR", "LGOTO", "LIF", "LINT", "LLONG", "LPREFETCH", "LREGISTER",
- "LRETURN", "LSHORT", "LSIZEOF", "LUSED", "LSTATIC", "LSTRUCT", "LSWITCH",
- "LTYPEDEF", "LTYPESTR", "LUNION", "LUNSIGNED", "LWHILE", "LVOID",
- "LENUM", "LSIGNED", "LCONSTNT", "LVOLATILE", "LSET", "LSIGNOF",
- "LRESTRICT", "LINLINE", "')'", "']'", "'{'", "'}'", "'!'", "'~'",
- "$accept", "prog", "xdecl", "@1", "@2", "xdlist", "@3", "xdecor",
- "xdecor2", "adecl", "adlist", "@4", "pdecl", "pdlist", "edecl", "@5",
- "@6", "zedlist", "edlist", "edecor", "abdecor", "abdecor1", "abdecor2",
- "abdecor3", "init", "qual", "qlist", "ilist", "zarglist", "arglist",
- "block", "slist", "labels", "label", "stmnt", "forexpr", "ulstmnt", "@7",
- "@8", "zcexpr", "zexpr", "lexpr", "cexpr", "expr", "xuexpr", "uexpr",
- "pexpr", "string", "lstring", "zelist", "elist", "sbody", "@9",
- "zctlist", "types", "tlist", "ctlist", "complex", "@10", "@11", "@12",
- "@13", "@14", "gctnlist", "zgnlist", "gctname", "gcnlist", "gcname",
- "enum", "tname", "cname", "gname", "name", "tag", "ltag", 0
-};
-#endif
-
-# ifdef YYPRINT
-/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
- token YYLEX-NUM. */
-static const yytype_uint16 yytoknum[] =
-{
- 0, 256, 257, 59, 44, 61, 258, 259, 260, 261,
- 262, 263, 264, 265, 266, 267, 63, 58, 268, 269,
- 124, 94, 38, 270, 271, 60, 62, 272, 273, 274,
- 275, 43, 45, 42, 47, 37, 46, 91, 40, 276,
- 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
- 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
- 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
- 307, 308, 309, 310, 311, 312, 313, 314, 315, 316,
- 317, 318, 319, 320, 321, 322, 323, 324, 325, 326,
- 327, 328, 329, 41, 93, 123, 125, 33, 126
-};
-# endif
-
-/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
-static const yytype_uint8 yyr1[] =
-{
- 0, 99, 100, 100, 101, 101, 102, 103, 101, 104,
- 105, 104, 104, 106, 106, 107, 107, 107, 107, 108,
- 108, 109, 110, 109, 109, 111, 111, 112, 112, 114,
- 113, 115, 113, 116, 116, 117, 117, 118, 118, 118,
- 119, 119, 120, 120, 120, 121, 121, 121, 122, 122,
- 122, 123, 123, 124, 124, 124, 125, 125, 125, 125,
- 126, 126, 126, 127, 127, 128, 128, 128, 128, 128,
- 129, 130, 130, 130, 131, 131, 132, 132, 132, 133,
- 133, 133, 134, 134, 135, 136, 135, 135, 135, 137,
- 135, 135, 135, 135, 135, 135, 135, 135, 135, 135,
- 135, 138, 138, 139, 139, 140, 141, 141, 142, 142,
- 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
- 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
- 142, 142, 142, 142, 142, 142, 142, 142, 142, 143,
- 143, 143, 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 145, 145, 145, 145, 145, 145, 145,
- 145, 145, 145, 145, 145, 145, 145, 145, 145, 145,
- 145, 145, 145, 146, 146, 147, 147, 148, 148, 149,
- 149, 151, 150, 152, 152, 153, 153, 153, 153, 153,
- 153, 153, 153, 154, 155, 156, 157, 156, 156, 156,
- 158, 156, 156, 156, 159, 160, 156, 161, 156, 156,
- 162, 162, 163, 163, 164, 164, 164, 165, 165, 166,
- 166, 167, 167, 167, 167, 168, 168, 168, 168, 168,
- 168, 168, 168, 168, 169, 169, 169, 169, 169, 169,
- 169, 170, 170, 170, 171, 172, 173, 173
-};
-
-/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
-static const yytype_uint8 yyr2[] =
-{
- 0, 2, 0, 2, 2, 3, 0, 0, 6, 1,
- 0, 4, 3, 1, 3, 1, 3, 4, 4, 2,
- 3, 1, 0, 4, 3, 0, 4, 1, 3, 0,
- 4, 0, 5, 0, 1, 1, 3, 1, 3, 2,
- 0, 1, 2, 3, 1, 1, 4, 4, 2, 3,
- 3, 1, 3, 3, 2, 2, 2, 3, 1, 2,
- 1, 1, 2, 0, 1, 1, 2, 2, 3, 3,
- 3, 0, 2, 2, 1, 2, 3, 2, 2, 2,
- 1, 2, 1, 2, 2, 0, 2, 5, 7, 0,
- 10, 5, 7, 3, 5, 2, 2, 3, 5, 5,
- 5, 0, 1, 0, 1, 1, 1, 3, 1, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 5, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 1,
- 5, 7, 1, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 3, 5, 5, 4, 4, 3, 3,
- 2, 2, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 2, 1, 2, 0, 1, 1,
- 3, 0, 4, 0, 1, 1, 1, 1, 2, 2,
- 3, 2, 3, 1, 1, 2, 0, 4, 2, 2,
- 0, 4, 2, 2, 0, 0, 7, 0, 5, 1,
- 1, 2, 0, 2, 1, 1, 1, 1, 2, 1,
- 1, 1, 3, 2, 3, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1
-};
-
-/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
- STATE-NUM when YYTABLE doesn't specify something else to do. Zero
- means the default is an error. */
-static const yytype_uint8 yydefact[] =
-{
- 2, 183, 1, 209, 234, 225, 232, 236, 231, 227,
- 228, 239, 226, 235, 0, 237, 238, 0, 230, 233,
- 0, 229, 241, 242, 243, 240, 3, 0, 194, 184,
- 185, 187, 217, 186, 220, 219, 246, 247, 181, 198,
- 195, 202, 199, 207, 203, 4, 212, 0, 0, 6,
- 13, 15, 245, 188, 210, 214, 216, 215, 212, 218,
- 191, 189, 0, 0, 0, 0, 0, 0, 0, 5,
- 0, 25, 0, 103, 63, 211, 190, 192, 0, 193,
- 29, 197, 201, 221, 0, 205, 14, 213, 16, 12,
- 9, 7, 0, 0, 0, 0, 0, 0, 0, 0,
- 244, 168, 167, 163, 164, 165, 166, 169, 170, 173,
- 175, 0, 0, 0, 0, 0, 104, 105, 108, 139,
- 142, 171, 172, 162, 0, 0, 64, 40, 65, 182,
- 31, 33, 0, 223, 208, 0, 0, 0, 0, 11,
- 51, 144, 145, 146, 143, 0, 106, 40, 149, 150,
- 0, 151, 0, 152, 147, 148, 18, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 177,
- 0, 160, 161, 174, 176, 0, 17, 0, 212, 103,
- 0, 67, 66, 41, 44, 45, 33, 0, 37, 0,
- 34, 35, 15, 222, 224, 0, 71, 8, 27, 0,
- 0, 0, 61, 58, 60, 0, 0, 153, 212, 0,
- 0, 40, 40, 128, 138, 137, 136, 134, 135, 133,
- 132, 131, 130, 129, 0, 126, 125, 124, 123, 122,
- 121, 120, 116, 117, 119, 118, 114, 115, 112, 113,
- 109, 110, 111, 159, 0, 179, 0, 178, 158, 68,
- 69, 42, 0, 48, 0, 103, 63, 0, 39, 30,
- 0, 0, 206, 0, 26, 0, 54, 0, 56, 55,
- 62, 59, 52, 107, 42, 0, 0, 0, 0, 157,
- 156, 0, 43, 49, 50, 0, 0, 32, 36, 38,
- 0, 244, 0, 0, 0, 0, 0, 0, 0, 0,
- 101, 0, 0, 0, 0, 70, 72, 85, 74, 73,
- 80, 0, 0, 0, 102, 0, 28, 53, 57, 0,
- 140, 154, 155, 127, 180, 47, 46, 79, 78, 95,
- 0, 96, 77, 0, 0, 0, 177, 0, 177, 0,
- 0, 177, 75, 81, 86, 0, 84, 19, 21, 0,
- 0, 76, 0, 97, 0, 0, 93, 0, 0, 0,
- 0, 101, 0, 20, 0, 141, 0, 0, 0, 0,
- 0, 0, 0, 0, 82, 0, 0, 24, 0, 87,
- 99, 98, 94, 91, 100, 101, 83, 23, 0, 0,
- 0, 92, 88, 101, 0, 0, 90
-};
-
-/* YYDEFGOTO[NTERM-NUM]. */
-static const yytype_int16 yydefgoto[] =
-{
- -1, 1, 26, 71, 136, 48, 72, 208, 50, 326,
- 369, 382, 91, 219, 78, 131, 206, 209, 210, 211,
- 202, 203, 204, 205, 222, 223, 224, 225, 125, 126,
- 217, 283, 327, 328, 329, 393, 330, 331, 332, 333,
- 115, 116, 334, 146, 118, 119, 120, 121, 122, 266,
- 267, 39, 62, 27, 79, 127, 29, 30, 63, 64,
- 66, 135, 65, 53, 67, 54, 31, 32, 84, 33,
- 34, 35, 123, 51, 52
-};
-
-/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
- STATE-NUM. */
-#define YYPACT_NINF -331
-static const yytype_int16 yypact[] =
-{
- -331, 548, -331, -331, -331, -331, -331, -331, -331, -331,
- -331, -331, -331, -331, -3, -331, -331, -3, -331, -331,
- 149, -331, -331, -331, -331, -331, -331, 264, -331, -331,
- 965, 929, -331, 965, -331, -331, -331, -331, -331, -331,
- -75, -331, -72, -331, -60, -331, -331, 307, 60, 270,
- 156, -331, -331, 965, -331, -331, -331, -331, -331, -331,
- 965, 965, 929, -44, -44, 29, -15, 199, -10, -331,
- 307, -331, 83, 756, 849, -331, 140, 965, 889, -331,
- -331, -331, -331, 86, 12, -331, -331, -331, -331, -331,
- 90, 929, 686, 756, 756, 756, 756, 615, 756, 756,
- -331, -331, -331, -331, -331, -331, -331, -331, -331, -331,
- -331, 791, 826, 756, 756, 9, -331, 1084, -331, -331,
- 708, 54, 57, -331, 110, 56, 152, 310, -331, -331,
- -331, 279, 756, 29, -331, 29, 63, 307, 165, -331,
- 1084, -331, -331, -331, -331, 30, 1084, 44, -331, -331,
- 615, -331, 615, -331, -331, -331, -331, 756, 756, 756,
- 756, 756, 756, 756, 756, 756, 756, 756, 756, 756,
- 756, 756, 756, 756, 756, 756, 756, 756, 756, 756,
- 756, 756, 756, 756, 756, 756, 756, 157, 756, 756,
- 157, -331, -331, -331, -331, 115, -331, 849, -331, 756,
- 128, -331, -331, -331, 182, -331, 279, 756, -331, 164,
- 200, -331, 208, 1084, -331, 13, -331, -331, -331, 262,
- 157, 756, 225, 228, 165, 73, 756, -331, -331, -7,
- 150, 44, 44, 1084, 1084, 1084, 1084, 1084, 1084, 1084,
- 1084, 1084, 1084, 1084, 28, 304, 1100, 1115, 1129, 1142,
- 1153, 1153, 433, 433, 433, 433, 333, 333, 265, 265,
- -331, -331, -331, -331, 8, 1084, 153, 236, -331, -331,
- -331, 147, 158, -331, 161, 756, 849, 247, -331, -331,
- 279, 756, -331, 341, -331, 307, -331, 175, -331, -331,
- 254, 228, -331, -331, 135, 721, 188, 190, 756, -331,
- -331, 756, -331, -331, -331, 191, 211, -331, -331, -331,
- 298, 301, 338, 756, 343, 339, 439, 157, 319, 321,
- 756, 322, 323, 324, 332, -331, -331, 509, -331, -331,
- -331, 63, 306, 372, 373, 277, -331, -331, -331, 165,
- -331, -331, -331, 425, -331, -331, -331, -331, -331, -331,
- 1053, -331, -331, 293, 375, 756, 756, 400, 756, 756,
- 756, 756, -331, -331, -331, 396, -331, -331, 430, 285,
- 377, -331, 431, -331, 55, 381, -331, 382, 62, 64,
- 383, 615, 473, -331, 307, -331, 756, 439, 479, 490,
- 439, 439, 493, 497, -331, 307, 686, -331, 66, 440,
- -331, -331, -331, -331, -331, 756, 499, -331, 498, 439,
- 504, -331, -331, 756, 415, 439, -331
-};
-
-/* YYPGOTO[NTERM-NUM]. */
-static const yytype_int16 yypgoto[] =
-{
- -331, -331, -331, -331, -331, 445, -331, -26, -331, -331,
- -330, -331, -331, 233, -331, -331, -331, 313, 230, -331,
- -132, -187, -331, -331, -82, 292, -331, 181, 245, 326,
- 193, -331, -331, 198, -227, -331, 203, -331, -331, -309,
- -181, -183, -83, -45, -38, 243, -331, -331, -331, -175,
- 226, 10, -331, -331, -1, 0, -88, 495, -331, -331,
- -331, -331, -331, -14, -51, -28, -331, 501, -85, 218,
- 231, -24, -52, -127, -12
-};
-
-/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
- positive, shift that token. If negative, reduce the rule which
- number is the opposite. If zero, do what YYDEFACT says.
- If YYTABLE_NINF, syntax error. */
-#define YYTABLE_NINF -205
-static const yytype_int16 yytable[] =
-{
- 28, 49, 40, 137, 212, 42, 57, 76, 44, 57,
- 139, 357, 226, 274, 145, 230, 133, 133, 272, 61,
- -196, 68, 128, -200, 278, 75, 228, 41, 117, 57,
- 199, 229, 226, 75, 226, -204, 57, 57, 287, 36,
- 37, 86, 274, 87, 90, 298, 77, 140, 214, 75,
- 215, 38, 87, 57, 397, 141, 142, 143, 144, 226,
- 148, 149, 80, 69, 70, 406, 226, 145, 226, 145,
- 226, 83, 394, 81, 82, 154, 155, 228, 130, 212,
- 85, 199, 229, 88, 302, 244, 273, 213, 92, 353,
- 28, 132, 38, 140, 305, -10, 410, 147, 309, 296,
- 297, 201, 299, 156, 414, 264, 193, 302, 134, 282,
- 194, 218, 233, 234, 235, 236, 237, 238, 239, 240,
- 241, 242, 243, 227, 245, 246, 247, 248, 249, 250,
- 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
- 261, 262, 290, 293, 265, 128, 195, 271, 387, 196,
- 231, 269, 232, 212, 117, 390, 197, 391, 216, 408,
- 399, 198, 117, 402, 403, 199, 200, 279, 228, 292,
- 36, 37, 199, 229, 68, 263, 117, 294, 268, 140,
- 198, 375, 412, 377, 199, 200, 380, 93, 416, 36,
- 37, 36, 37, 73, 74, 335, 94, 95, 96, 36,
- 37, 220, 221, 97, 280, 98, 99, 100, 286, 101,
- 102, 103, 104, 105, 106, 107, 108, 109, 110, 275,
- 276, 273, 22, 23, 128, 281, 24, 22, 23, 288,
- 117, 24, 46, 289, 22, 23, 117, 47, 24, 111,
- 301, 36, 37, 295, 43, 86, 300, 87, 55, 60,
- 307, 55, 303, 343, 304, 112, 265, 340, 338, 218,
- 138, 56, 113, 114, 56, 284, 285, 45, 350, 337,
- 87, 55, 374, -9, -9, -10, 378, 379, 55, 55,
- 367, 341, 28, 342, 56, 345, 22, 23, 383, 384,
- 24, 56, 56, 395, 140, 55, 207, 46, 184, 185,
- 186, 347, 47, 398, 346, 354, 36, 37, 56, 368,
- 46, 265, 46, 265, 407, 47, 265, 47, 348, 36,
- 37, 36, 37, 170, 171, 172, 173, 174, 175, 176,
- 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
- 46, 349, 310, 198, -101, 47, 351, 199, 200, 36,
- 37, 140, 36, 37, 151, 153, 352, 355, 368, 356,
- 358, 359, 360, 93, 182, 183, 184, 185, 186, 368,
- 361, 365, 94, 95, 96, 366, 372, 226, 373, 97,
- 28, 98, 99, 311, 3, 101, 102, 103, 104, 105,
- 106, 107, 108, 109, 110, 4, 312, 313, 5, 314,
- 315, 316, 6, 376, 7, 8, -89, 317, 318, 9,
- 10, 319, 11, 320, 12, 111, 321, 13, 14, 322,
- 15, 16, 17, 18, 323, 19, 20, 21, 22, 23,
- 324, 112, 24, 25, 381, -22, -85, 325, 113, 114,
- 310, 168, -101, 169, 170, 171, 172, 173, 174, 175,
- 176, 177, 178, 179, 180, 181, 182, 183, 184, 185,
- 186, 93, 180, 181, 182, 183, 184, 185, 186, 386,
- 94, 95, 96, 385, 388, 389, 392, 97, 396, 98,
- 99, 311, 400, 101, 102, 103, 104, 105, 106, 107,
- 108, 109, 110, 401, 312, 313, 404, 314, 315, 316,
- 405, 411, 409, 384, -89, 317, 318, 413, 415, 319,
- 308, 320, -101, 111, 321, 89, 291, 322, 336, 277,
- 370, 306, 323, 270, 364, 362, 58, 344, 324, 112,
- 363, 93, 59, 0, -85, 0, 113, 114, 0, 0,
- 94, 95, 96, 0, 0, 0, 0, 97, 2, 98,
- 99, 311, 0, 101, 102, 103, 104, 105, 106, 107,
- 108, 109, 110, 0, 312, 313, 0, 314, 315, 316,
- 0, 0, 0, 0, -89, 317, 318, 0, 0, 319,
- 0, 320, 0, 111, 321, 0, 0, 322, 0, 0,
- 0, 3, 323, 0, 0, 0, 0, 0, 324, 112,
- 0, 0, 4, 0, 0, 5, 113, 114, 0, 6,
- 0, 7, 8, 0, 0, 0, 9, 10, 0, 11,
- 0, 12, 0, 0, 13, 14, 0, 15, 16, 17,
- 18, 0, 19, 20, 21, 22, 23, 93, 0, 24,
- 25, 0, 0, 0, 0, 0, 94, 95, 96, 0,
- 0, 0, 0, 97, 0, 98, 99, 100, 3, 101,
- 102, 103, 104, 105, 106, 107, 108, 109, 110, 4,
- 0, 0, 5, 0, 0, 0, 6, 0, 7, 8,
- 0, 0, 0, 9, 10, 0, 11, 0, 12, 111,
- 0, 13, 14, 0, 15, 16, 17, 18, 0, 19,
- 20, 21, 22, 23, 0, 112, 24, 25, 93, 0,
- 0, 0, 113, 114, 0, 0, 0, 94, 95, 96,
- 0, 0, 0, 0, 97, 0, 98, 99, 100, 0,
- 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
- 0, 0, 0, 93, 187, 188, 189, 190, 191, 192,
- 0, 0, 94, 95, 96, 0, 0, 0, 0, 97,
- 111, 98, 99, 100, 0, 101, 102, 103, 104, 105,
- 106, 107, 108, 109, 110, 0, 112, 0, 93, 0,
- 0, 138, 0, 113, 114, 0, 0, 94, 95, 96,
- 0, 0, 0, 0, 97, 111, 98, 99, 100, 0,
- 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
- 0, 112, 0, 93, 0, 0, 339, 0, 113, 114,
- 0, 0, 94, 95, 96, 0, 0, 0, 0, 150,
- 111, 98, 99, 100, 0, 101, 102, 103, 104, 105,
- 106, 107, 108, 109, 110, 0, 112, 0, 93, 0,
- 0, 0, 0, 113, 114, 0, 0, 94, 95, 96,
- 0, 0, 0, 0, 152, 111, 98, 99, 100, 0,
- 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
- 0, 112, 0, 0, 0, 124, 0, 0, 113, 114,
- 0, 100, 3, 0, 0, 0, 0, 0, 0, 0,
- 111, 0, 0, 4, 0, 0, 5, 0, 0, 0,
- 6, 0, 7, 8, 0, 0, 112, 9, 10, 0,
- 11, 0, 12, 113, 114, 13, 14, 0, 15, 16,
- 17, 18, 3, 19, 20, 21, 22, 23, 0, 0,
- 24, 25, 0, 4, 0, 0, 5, 0, 0, 0,
- 6, 0, 7, 8, 0, 0, 0, 9, 10, 0,
- 11, 0, 12, 0, 0, 13, 14, 0, 15, 16,
- 17, 18, 3, 19, 20, 21, 22, 23, 0, 0,
- 24, 25, 0, 4, 0, 129, 5, 0, 0, 0,
- 6, 0, 7, 8, 0, 0, 0, 9, 10, 0,
- 11, 0, 12, 0, 0, 13, 14, 0, 15, 16,
- 17, 18, 0, 19, 20, 21, 22, 23, 0, 4,
- 24, 25, 5, 0, 0, 0, 6, 0, 7, 8,
- 0, 0, 0, 9, 10, 0, 11, 0, 12, 0,
- 0, 13, 0, 0, 15, 16, 0, 18, 0, 19,
- 0, 21, 22, 23, 0, 0, 24, 25, 157, 158,
- 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
- 371, 169, 170, 171, 172, 173, 174, 175, 176, 177,
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 157,
- 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
- 168, 0, 169, 170, 171, 172, 173, 174, 175, 176,
- 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
- 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
- 181, 182, 183, 184, 185, 186, 172, 173, 174, 175,
- 176, 177, 178, 179, 180, 181, 182, 183, 184, 185,
- 186, 173, 174, 175, 176, 177, 178, 179, 180, 181,
- 182, 183, 184, 185, 186, 174, 175, 176, 177, 178,
- 179, 180, 181, 182, 183, 184, 185, 186, 176, 177,
- 178, 179, 180, 181, 182, 183, 184, 185, 186
-};
-
-static const yytype_int16 yycheck[] =
-{
- 1, 27, 14, 91, 131, 17, 30, 58, 20, 33,
- 92, 320, 4, 200, 97, 147, 4, 4, 199, 33,
- 95, 47, 74, 95, 207, 53, 33, 17, 73, 53,
- 37, 38, 4, 61, 4, 95, 60, 61, 221, 42,
- 43, 67, 229, 67, 70, 17, 60, 92, 133, 77,
- 135, 95, 76, 77, 384, 93, 94, 95, 96, 4,
- 98, 99, 62, 3, 4, 395, 4, 150, 4, 152,
- 4, 42, 381, 63, 64, 113, 114, 33, 78, 206,
- 95, 37, 38, 93, 271, 168, 93, 132, 5, 316,
- 91, 5, 95, 138, 275, 5, 405, 97, 281, 231,
- 232, 127, 94, 94, 413, 188, 52, 294, 96, 96,
- 53, 137, 157, 158, 159, 160, 161, 162, 163, 164,
- 165, 166, 167, 93, 169, 170, 171, 172, 173, 174,
- 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
- 185, 186, 224, 226, 189, 197, 36, 198, 93, 93,
- 150, 36, 152, 280, 199, 93, 4, 93, 95, 93,
- 387, 33, 207, 390, 391, 37, 38, 3, 33, 96,
- 42, 43, 37, 38, 200, 187, 221, 228, 190, 224,
- 33, 356, 409, 358, 37, 38, 361, 22, 415, 42,
- 43, 42, 43, 37, 38, 283, 31, 32, 33, 42,
- 43, 36, 37, 38, 4, 40, 41, 42, 220, 44,
- 45, 46, 47, 48, 49, 50, 51, 52, 53, 37,
- 38, 93, 87, 88, 276, 17, 91, 87, 88, 4,
- 275, 91, 33, 5, 87, 88, 281, 38, 91, 74,
- 4, 42, 43, 93, 95, 271, 93, 271, 30, 31,
- 3, 33, 94, 298, 93, 90, 301, 295, 4, 285,
- 95, 30, 97, 98, 33, 3, 4, 3, 313, 94,
- 294, 53, 355, 3, 4, 5, 359, 360, 60, 61,
- 3, 93, 283, 93, 53, 94, 87, 88, 3, 4,
- 91, 60, 61, 381, 339, 77, 17, 33, 33, 34,
- 35, 3, 38, 386, 93, 317, 42, 43, 77, 335,
- 33, 356, 33, 358, 396, 38, 361, 38, 17, 42,
- 43, 42, 43, 19, 20, 21, 22, 23, 24, 25,
- 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
- 33, 3, 1, 33, 3, 38, 3, 37, 38, 42,
- 43, 396, 42, 43, 111, 112, 17, 38, 384, 38,
- 38, 38, 38, 22, 31, 32, 33, 34, 35, 395,
- 38, 65, 31, 32, 33, 3, 83, 4, 3, 38,
- 381, 40, 41, 42, 43, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
- 59, 60, 61, 3, 63, 64, 65, 66, 67, 68,
- 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
- 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
- 89, 90, 91, 92, 38, 5, 95, 96, 97, 98,
- 1, 16, 3, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
- 35, 22, 29, 30, 31, 32, 33, 34, 35, 38,
- 31, 32, 33, 96, 93, 93, 93, 38, 5, 40,
- 41, 42, 3, 44, 45, 46, 47, 48, 49, 50,
- 51, 52, 53, 3, 55, 56, 3, 58, 59, 60,
- 3, 3, 62, 4, 65, 66, 67, 3, 93, 70,
- 280, 72, 3, 74, 75, 70, 224, 78, 285, 206,
- 339, 276, 83, 197, 331, 327, 31, 301, 89, 90,
- 327, 22, 31, -1, 95, -1, 97, 98, -1, -1,
- 31, 32, 33, -1, -1, -1, -1, 38, 0, 40,
- 41, 42, -1, 44, 45, 46, 47, 48, 49, 50,
- 51, 52, 53, -1, 55, 56, -1, 58, 59, 60,
- -1, -1, -1, -1, 65, 66, 67, -1, -1, 70,
- -1, 72, -1, 74, 75, -1, -1, 78, -1, -1,
- -1, 43, 83, -1, -1, -1, -1, -1, 89, 90,
- -1, -1, 54, -1, -1, 57, 97, 98, -1, 61,
- -1, 63, 64, -1, -1, -1, 68, 69, -1, 71,
- -1, 73, -1, -1, 76, 77, -1, 79, 80, 81,
- 82, -1, 84, 85, 86, 87, 88, 22, -1, 91,
- 92, -1, -1, -1, -1, -1, 31, 32, 33, -1,
- -1, -1, -1, 38, -1, 40, 41, 42, 43, 44,
- 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
- -1, -1, 57, -1, -1, -1, 61, -1, 63, 64,
- -1, -1, -1, 68, 69, -1, 71, -1, 73, 74,
- -1, 76, 77, -1, 79, 80, 81, 82, -1, 84,
- 85, 86, 87, 88, -1, 90, 91, 92, 22, -1,
- -1, -1, 97, 98, -1, -1, -1, 31, 32, 33,
- -1, -1, -1, -1, 38, -1, 40, 41, 42, -1,
- 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- -1, -1, -1, 22, 36, 37, 38, 39, 40, 41,
- -1, -1, 31, 32, 33, -1, -1, -1, -1, 38,
- 74, 40, 41, 42, -1, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, -1, 90, -1, 22, -1,
- -1, 95, -1, 97, 98, -1, -1, 31, 32, 33,
- -1, -1, -1, -1, 38, 74, 40, 41, 42, -1,
- 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- -1, 90, -1, 22, -1, -1, 95, -1, 97, 98,
- -1, -1, 31, 32, 33, -1, -1, -1, -1, 38,
- 74, 40, 41, 42, -1, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, -1, 90, -1, 22, -1,
- -1, -1, -1, 97, 98, -1, -1, 31, 32, 33,
- -1, -1, -1, -1, 38, 74, 40, 41, 42, -1,
- 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- -1, 90, -1, -1, -1, 36, -1, -1, 97, 98,
- -1, 42, 43, -1, -1, -1, -1, -1, -1, -1,
- 74, -1, -1, 54, -1, -1, 57, -1, -1, -1,
- 61, -1, 63, 64, -1, -1, 90, 68, 69, -1,
- 71, -1, 73, 97, 98, 76, 77, -1, 79, 80,
- 81, 82, 43, 84, 85, 86, 87, 88, -1, -1,
- 91, 92, -1, 54, -1, -1, 57, -1, -1, -1,
- 61, -1, 63, 64, -1, -1, -1, 68, 69, -1,
- 71, -1, 73, -1, -1, 76, 77, -1, 79, 80,
- 81, 82, 43, 84, 85, 86, 87, 88, -1, -1,
- 91, 92, -1, 54, -1, 96, 57, -1, -1, -1,
- 61, -1, 63, 64, -1, -1, -1, 68, 69, -1,
- 71, -1, 73, -1, -1, 76, 77, -1, 79, 80,
- 81, 82, -1, 84, 85, 86, 87, 88, -1, 54,
- 91, 92, 57, -1, -1, -1, 61, -1, 63, 64,
- -1, -1, -1, 68, 69, -1, 71, -1, 73, -1,
- -1, 76, -1, -1, 79, 80, -1, 82, -1, 84,
- -1, 86, 87, 88, -1, -1, 91, 92, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
- 27, 28, 29, 30, 31, 32, 33, 34, 35, 5,
- 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, -1, 18, 19, 20, 21, 22, 23, 24, 25,
- 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
- 30, 31, 32, 33, 34, 35, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
- 35, 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 31, 32, 33, 34, 35, 23, 24, 25, 26, 27,
- 28, 29, 30, 31, 32, 33, 34, 35, 25, 26,
- 27, 28, 29, 30, 31, 32, 33, 34, 35
-};
-
-/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
- symbol of state STATE-NUM. */
-static const yytype_uint8 yystos[] =
-{
- 0, 100, 0, 43, 54, 57, 61, 63, 64, 68,
- 69, 71, 73, 76, 77, 79, 80, 81, 82, 84,
- 85, 86, 87, 88, 91, 92, 101, 152, 153, 155,
- 156, 165, 166, 168, 169, 170, 42, 43, 95, 150,
- 173, 150, 173, 95, 173, 3, 33, 38, 104, 106,
- 107, 172, 173, 162, 164, 168, 169, 170, 156, 166,
- 168, 162, 151, 157, 158, 161, 159, 163, 106, 3,
- 4, 102, 105, 37, 38, 164, 163, 162, 113, 153,
- 154, 150, 150, 42, 167, 95, 106, 170, 93, 104,
- 106, 111, 5, 22, 31, 32, 33, 38, 40, 41,
- 42, 44, 45, 46, 47, 48, 49, 50, 51, 52,
- 53, 74, 90, 97, 98, 139, 140, 142, 143, 144,
- 145, 146, 147, 171, 36, 127, 128, 154, 171, 96,
- 154, 114, 5, 4, 96, 160, 103, 155, 95, 123,
- 142, 143, 143, 143, 143, 141, 142, 154, 143, 143,
- 38, 144, 38, 144, 143, 143, 94, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15, 16, 18,
- 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
- 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
- 39, 40, 41, 52, 53, 36, 93, 4, 33, 37,
- 38, 106, 119, 120, 121, 122, 115, 17, 106, 116,
- 117, 118, 172, 142, 167, 167, 95, 129, 106, 112,
- 36, 37, 123, 124, 125, 126, 4, 93, 33, 38,
- 119, 154, 154, 142, 142, 142, 142, 142, 142, 142,
- 142, 142, 142, 142, 141, 142, 142, 142, 142, 142,
- 142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
- 142, 142, 142, 173, 141, 142, 148, 149, 173, 36,
- 128, 163, 139, 93, 120, 37, 38, 116, 140, 3,
- 4, 17, 96, 130, 3, 4, 173, 140, 4, 5,
- 123, 124, 96, 141, 163, 93, 119, 119, 17, 94,
- 93, 4, 120, 94, 93, 139, 127, 3, 117, 140,
- 1, 42, 55, 56, 58, 59, 60, 66, 67, 70,
- 72, 75, 78, 83, 89, 96, 108, 131, 132, 133,
- 135, 136, 137, 138, 141, 155, 112, 94, 4, 95,
- 143, 93, 93, 142, 149, 94, 93, 3, 17, 3,
- 142, 3, 17, 133, 173, 38, 38, 138, 38, 38,
- 38, 38, 132, 135, 129, 65, 3, 3, 106, 109,
- 126, 17, 83, 3, 141, 148, 3, 148, 141, 141,
- 148, 38, 110, 3, 4, 96, 38, 93, 93, 93,
- 93, 93, 93, 134, 138, 155, 5, 109, 141, 133,
- 3, 3, 133, 133, 3, 3, 109, 123, 93, 62,
- 138, 3, 133, 3, 138, 93, 133
-};
-
-#define yyerrok (yyerrstatus = 0)
-#define yyclearin (yychar = YYEMPTY)
-#define YYEMPTY (-2)
-#define YYEOF 0
-
-#define YYACCEPT goto yyacceptlab
-#define YYABORT goto yyabortlab
-#define YYERROR goto yyerrorlab
-
-
-/* Like YYERROR except do call yyerror. This remains here temporarily
- to ease the transition to the new meaning of YYERROR, for GCC.
- Once GCC version 2 has supplanted version 1, this can go. */
-
-#define YYFAIL goto yyerrlab
-
-#define YYRECOVERING() (!!yyerrstatus)
-
-#define YYBACKUP(Token, Value) \
-do \
- if (yychar == YYEMPTY && yylen == 1) \
- { \
- yychar = (Token); \
- yylval = (Value); \
- yytoken = YYTRANSLATE (yychar); \
- YYPOPSTACK (1); \
- goto yybackup; \
- } \
- else \
- { \
- yyerror (YY_("syntax error: cannot back up")); \
- YYERROR; \
- } \
-while (YYID (0))
-
-
-#define YYTERROR 1
-#define YYERRCODE 256
-
-
-/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
- If N is 0, then set CURRENT to the empty location which ends
- the previous symbol: RHS[0] (always defined). */
-
-#define YYRHSLOC(Rhs, K) ((Rhs)[K])
-#ifndef YYLLOC_DEFAULT
-# define YYLLOC_DEFAULT(Current, Rhs, N) \
- do \
- if (YYID (N)) \
- { \
- (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
- (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
- (Current).last_line = YYRHSLOC (Rhs, N).last_line; \
- (Current).last_column = YYRHSLOC (Rhs, N).last_column; \
- } \
- else \
- { \
- (Current).first_line = (Current).last_line = \
- YYRHSLOC (Rhs, 0).last_line; \
- (Current).first_column = (Current).last_column = \
- YYRHSLOC (Rhs, 0).last_column; \
- } \
- while (YYID (0))
-#endif
-
-
-/* YY_LOCATION_PRINT -- Print the location on the stream.
- This macro was not mandated originally: define only if we know
- we won't break user code: when these are the locations we know. */
-
-#ifndef YY_LOCATION_PRINT
-# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
-# define YY_LOCATION_PRINT(File, Loc) \
- fprintf (File, "%d.%d-%d.%d", \
- (Loc).first_line, (Loc).first_column, \
- (Loc).last_line, (Loc).last_column)
-# else
-# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
-# endif
-#endif
-
-
-/* YYLEX -- calling `yylex' with the right arguments. */
-
-#ifdef YYLEX_PARAM
-# define YYLEX yylex (YYLEX_PARAM)
-#else
-# define YYLEX yylex ()
-#endif
-
-/* Enable debugging if requested. */
-#if YYDEBUG
-
-# ifndef YYFPRINTF
-# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
-# define YYFPRINTF fprintf
-# endif
-
-# define YYDPRINTF(Args) \
-do { \
- if (yydebug) \
- YYFPRINTF Args; \
-} while (YYID (0))
-
-# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
-do { \
- if (yydebug) \
- { \
- YYFPRINTF (stderr, "%s ", Title); \
- yy_symbol_print (stderr, \
- Type, Value); \
- YYFPRINTF (stderr, "\n"); \
- } \
-} while (YYID (0))
-
-
-/*--------------------------------.
-| Print this symbol on YYOUTPUT. |
-`--------------------------------*/
-
-/*ARGSUSED*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static void
-yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
-#else
-static void
-yy_symbol_value_print (yyoutput, yytype, yyvaluep)
- FILE *yyoutput;
- int yytype;
- YYSTYPE const * const yyvaluep;
-#endif
-{
- if (!yyvaluep)
- return;
-# ifdef YYPRINT
- if (yytype < YYNTOKENS)
- YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
-# else
- YYUSE (yyoutput);
-# endif
- switch (yytype)
- {
- default:
- break;
- }
-}
-
-
-/*--------------------------------.
-| Print this symbol on YYOUTPUT. |
-`--------------------------------*/
-
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static void
-yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
-#else
-static void
-yy_symbol_print (yyoutput, yytype, yyvaluep)
- FILE *yyoutput;
- int yytype;
- YYSTYPE const * const yyvaluep;
-#endif
-{
- if (yytype < YYNTOKENS)
- YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
- else
- YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
-
- yy_symbol_value_print (yyoutput, yytype, yyvaluep);
- YYFPRINTF (yyoutput, ")");
-}
-
-/*------------------------------------------------------------------.
-| yy_stack_print -- Print the state stack from its BOTTOM up to its |
-| TOP (included). |
-`------------------------------------------------------------------*/
-
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static void
-yy_stack_print (yytype_int16 *bottom, yytype_int16 *top)
-#else
-static void
-yy_stack_print (bottom, top)
- yytype_int16 *bottom;
- yytype_int16 *top;
-#endif
-{
- YYFPRINTF (stderr, "Stack now");
- for (; bottom <= top; ++bottom)
- YYFPRINTF (stderr, " %d", *bottom);
- YYFPRINTF (stderr, "\n");
-}
-
-# define YY_STACK_PRINT(Bottom, Top) \
-do { \
- if (yydebug) \
- yy_stack_print ((Bottom), (Top)); \
-} while (YYID (0))
-
-
-/*------------------------------------------------.
-| Report that the YYRULE is going to be reduced. |
-`------------------------------------------------*/
-
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static void
-yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
-#else
-static void
-yy_reduce_print (yyvsp, yyrule)
- YYSTYPE *yyvsp;
- int yyrule;
-#endif
-{
- int yynrhs = yyr2[yyrule];
- int yyi;
- unsigned long int yylno = yyrline[yyrule];
- YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
- yyrule - 1, yylno);
- /* The symbols being reduced. */
- for (yyi = 0; yyi < yynrhs; yyi++)
- {
- fprintf (stderr, " $%d = ", yyi + 1);
- yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
- &(yyvsp[(yyi + 1) - (yynrhs)])
- );
- fprintf (stderr, "\n");
- }
-}
-
-# define YY_REDUCE_PRINT(Rule) \
-do { \
- if (yydebug) \
- yy_reduce_print (yyvsp, Rule); \
-} while (YYID (0))
-
-/* Nonzero means print parse trace. It is left uninitialized so that
- multiple parsers can coexist. */
-int yydebug;
-#else /* !YYDEBUG */
-# define YYDPRINTF(Args)
-# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
-# define YY_STACK_PRINT(Bottom, Top)
-# define YY_REDUCE_PRINT(Rule)
-#endif /* !YYDEBUG */
-
-
-/* YYINITDEPTH -- initial size of the parser's stacks. */
-#ifndef YYINITDEPTH
-# define YYINITDEPTH 200
-#endif
-
-/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
- if the built-in stack extension method is used).
-
- Do not make this value too large; the results are undefined if
- YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
- evaluated with infinite-precision integer arithmetic. */
-
-#ifndef YYMAXDEPTH
-# define YYMAXDEPTH 10000
-#endif
-
-
-
-#if YYERROR_VERBOSE
-
-# ifndef yystrlen
-# if defined __GLIBC__ && defined _STRING_H
-# define yystrlen strlen
-# else
-/* Return the length of YYSTR. */
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static YYSIZE_T
-yystrlen (const char *yystr)
-#else
-static YYSIZE_T
-yystrlen (yystr)
- const char *yystr;
-#endif
-{
- YYSIZE_T yylen;
- for (yylen = 0; yystr[yylen]; yylen++)
- continue;
- return yylen;
-}
-# endif
-# endif
-
-# ifndef yystpcpy
-# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
-# define yystpcpy stpcpy
-# else
-/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
- YYDEST. */
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static char *
-yystpcpy (char *yydest, const char *yysrc)
-#else
-static char *
-yystpcpy (yydest, yysrc)
- char *yydest;
- const char *yysrc;
-#endif
-{
- char *yyd = yydest;
- const char *yys = yysrc;
-
- while ((*yyd++ = *yys++) != '\0')
- continue;
-
- return yyd - 1;
-}
-# endif
-# endif
-
-# ifndef yytnamerr
-/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
- quotes and backslashes, so that it's suitable for yyerror. The
- heuristic is that double-quoting is unnecessary unless the string
- contains an apostrophe, a comma, or backslash (other than
- backslash-backslash). YYSTR is taken from yytname. If YYRES is
- null, do not copy; instead, return the length of what the result
- would have been. */
-static YYSIZE_T
-yytnamerr (char *yyres, const char *yystr)
-{
- if (*yystr == '"')
- {
- YYSIZE_T yyn = 0;
- char const *yyp = yystr;
-
- for (;;)
- switch (*++yyp)
- {
- case '\'':
- case ',':
- goto do_not_strip_quotes;
-
- case '\\':
- if (*++yyp != '\\')
- goto do_not_strip_quotes;
- /* Fall through. */
- default:
- if (yyres)
- yyres[yyn] = *yyp;
- yyn++;
- break;
-
- case '"':
- if (yyres)
- yyres[yyn] = '\0';
- return yyn;
- }
- do_not_strip_quotes: ;
- }
-
- if (! yyres)
- return yystrlen (yystr);
-
- return yystpcpy (yyres, yystr) - yyres;
-}
-# endif
-
-/* Copy into YYRESULT an error message about the unexpected token
- YYCHAR while in state YYSTATE. Return the number of bytes copied,
- including the terminating null byte. If YYRESULT is null, do not
- copy anything; just return the number of bytes that would be
- copied. As a special case, return 0 if an ordinary "syntax error"
- message will do. Return YYSIZE_MAXIMUM if overflow occurs during
- size calculation. */
-static YYSIZE_T
-yysyntax_error (char *yyresult, int yystate, int yychar)
-{
- int yyn = yypact[yystate];
-
- if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
- return 0;
- else
- {
- int yytype = YYTRANSLATE (yychar);
- YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
- YYSIZE_T yysize = yysize0;
- YYSIZE_T yysize1;
- int yysize_overflow = 0;
- enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
- char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
- int yyx;
-
-# if 0
- /* This is so xgettext sees the translatable formats that are
- constructed on the fly. */
- YY_("syntax error, unexpected %s");
- YY_("syntax error, unexpected %s, expecting %s");
- YY_("syntax error, unexpected %s, expecting %s or %s");
- YY_("syntax error, unexpected %s, expecting %s or %s or %s");
- YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
-# endif
- char *yyfmt;
- char const *yyf;
- static char const yyunexpected[] = "syntax error, unexpected %s";
- static char const yyexpecting[] = ", expecting %s";
- static char const yyor[] = " or %s";
- char yyformat[sizeof yyunexpected
- + sizeof yyexpecting - 1
- + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
- * (sizeof yyor - 1))];
- char const *yyprefix = yyexpecting;
-
- /* Start YYX at -YYN if negative to avoid negative indexes in
- YYCHECK. */
- int yyxbegin = yyn < 0 ? -yyn : 0;
-
- /* Stay within bounds of both yycheck and yytname. */
- int yychecklim = YYLAST - yyn + 1;
- int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
- int yycount = 1;
-
- yyarg[0] = yytname[yytype];
- yyfmt = yystpcpy (yyformat, yyunexpected);
-
- for (yyx = yyxbegin; yyx < yyxend; ++yyx)
- if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
- {
- if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
- {
- yycount = 1;
- yysize = yysize0;
- yyformat[sizeof yyunexpected - 1] = '\0';
- break;
- }
- yyarg[yycount++] = yytname[yyx];
- yysize1 = yysize + yytnamerr (0, yytname[yyx]);
- yysize_overflow |= (yysize1 < yysize);
- yysize = yysize1;
- yyfmt = yystpcpy (yyfmt, yyprefix);
- yyprefix = yyor;
- }
-
- yyf = YY_(yyformat);
- yysize1 = yysize + yystrlen (yyf);
- yysize_overflow |= (yysize1 < yysize);
- yysize = yysize1;
-
- if (yysize_overflow)
- return YYSIZE_MAXIMUM;
-
- if (yyresult)
- {
- /* Avoid sprintf, as that infringes on the user's name space.
- Don't have undefined behavior even if the translation
- produced a string with the wrong number of "%s"s. */
- char *yyp = yyresult;
- int yyi = 0;
- while ((*yyp = *yyf) != '\0')
- {
- if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
- {
- yyp += yytnamerr (yyp, yyarg[yyi++]);
- yyf += 2;
- }
- else
- {
- yyp++;
- yyf++;
- }
- }
- }
- return yysize;
- }
-}
-#endif /* YYERROR_VERBOSE */
-
-
-/*-----------------------------------------------.
-| Release the memory associated to this symbol. |
-`-----------------------------------------------*/
-
-/*ARGSUSED*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static void
-yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
-#else
-static void
-yydestruct (yymsg, yytype, yyvaluep)
- const char *yymsg;
- int yytype;
- YYSTYPE *yyvaluep;
-#endif
-{
- YYUSE (yyvaluep);
-
- if (!yymsg)
- yymsg = "Deleting";
- YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
-
- switch (yytype)
- {
-
- default:
- break;
- }
-}
-
-
-/* Prevent warnings from -Wmissing-prototypes. */
-
-#ifdef YYPARSE_PARAM
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void *YYPARSE_PARAM);
-#else
-int yyparse ();
-#endif
-#else /* ! YYPARSE_PARAM */
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void);
-#else
-int yyparse ();
-#endif
-#endif /* ! YYPARSE_PARAM */
-
-
-
-/* The look-ahead symbol. */
-int yychar;
-
-/* The semantic value of the look-ahead symbol. */
-YYSTYPE yylval;
-
-/* Number of syntax errors so far. */
-int yynerrs;
-
-
-
-/*----------.
-| yyparse. |
-`----------*/
-
-#ifdef YYPARSE_PARAM
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-int
-yyparse (void *YYPARSE_PARAM)
-#else
-int
-yyparse (YYPARSE_PARAM)
- void *YYPARSE_PARAM;
-#endif
-#else /* ! YYPARSE_PARAM */
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-int
-yyparse (void)
-#else
-int
-yyparse ()
-
-#endif
-#endif
-{
-
- int yystate;
- int yyn;
- int yyresult;
- /* Number of tokens to shift before error messages enabled. */
- int yyerrstatus;
- /* Look-ahead token as an internal (translated) token number. */
- int yytoken = 0;
-#if YYERROR_VERBOSE
- /* Buffer for error messages, and its allocated size. */
- char yymsgbuf[128];
- char *yymsg = yymsgbuf;
- YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
-#endif
-
- /* Three stacks and their tools:
- `yyss': related to states,
- `yyvs': related to semantic values,
- `yyls': related to locations.
-
- Refer to the stacks thru separate pointers, to allow yyoverflow
- to reallocate them elsewhere. */
-
- /* The state stack. */
- yytype_int16 yyssa[YYINITDEPTH];
- yytype_int16 *yyss = yyssa;
- yytype_int16 *yyssp;
-
- /* The semantic value stack. */
- YYSTYPE yyvsa[YYINITDEPTH];
- YYSTYPE *yyvs = yyvsa;
- YYSTYPE *yyvsp;
-
-
-
-#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
-
- YYSIZE_T yystacksize = YYINITDEPTH;
-
- /* The variables used to return semantic value and location from the
- action routines. */
- YYSTYPE yyval;
-
-
- /* The number of symbols on the RHS of the reduced rule.
- Keep to zero when no symbol should be popped. */
- int yylen = 0;
-
- YYDPRINTF ((stderr, "Starting parse\n"));
-
- yystate = 0;
- yyerrstatus = 0;
- yynerrs = 0;
- yychar = YYEMPTY; /* Cause a token to be read. */
-
- /* Initialize stack pointers.
- Waste one element of value and location stack
- so that they stay on the same level as the state stack.
- The wasted elements are never initialized. */
-
- yyssp = yyss;
- yyvsp = yyvs;
-
- goto yysetstate;
-
-/*------------------------------------------------------------.
-| yynewstate -- Push a new state, which is found in yystate. |
-`------------------------------------------------------------*/
- yynewstate:
- /* In all cases, when you get here, the value and location stacks
- have just been pushed. So pushing a state here evens the stacks. */
- yyssp++;
-
- yysetstate:
- *yyssp = yystate;
-
- if (yyss + yystacksize - 1 <= yyssp)
- {
- /* Get the current used size of the three stacks, in elements. */
- YYSIZE_T yysize = yyssp - yyss + 1;
-
-#ifdef yyoverflow
- {
- /* Give user a chance to reallocate the stack. Use copies of
- these so that the &'s don't force the real ones into
- memory. */
- YYSTYPE *yyvs1 = yyvs;
- yytype_int16 *yyss1 = yyss;
-
-
- /* Each stack pointer address is followed by the size of the
- data in use in that stack, in bytes. This used to be a
- conditional around just the two extra args, but that might
- be undefined if yyoverflow is a macro. */
- yyoverflow (YY_("memory exhausted"),
- &yyss1, yysize * sizeof (*yyssp),
- &yyvs1, yysize * sizeof (*yyvsp),
-
- &yystacksize);
-
- yyss = yyss1;
- yyvs = yyvs1;
- }
-#else /* no yyoverflow */
-# ifndef YYSTACK_RELOCATE
- goto yyexhaustedlab;
-# else
- /* Extend the stack our own way. */
- if (YYMAXDEPTH <= yystacksize)
- goto yyexhaustedlab;
- yystacksize *= 2;
- if (YYMAXDEPTH < yystacksize)
- yystacksize = YYMAXDEPTH;
-
- {
- yytype_int16 *yyss1 = yyss;
- union yyalloc *yyptr =
- (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
- if (! yyptr)
- goto yyexhaustedlab;
- YYSTACK_RELOCATE (yyss);
- YYSTACK_RELOCATE (yyvs);
-
-# undef YYSTACK_RELOCATE
- if (yyss1 != yyssa)
- YYSTACK_FREE (yyss1);
- }
-# endif
-#endif /* no yyoverflow */
-
- yyssp = yyss + yysize - 1;
- yyvsp = yyvs + yysize - 1;
-
-
- YYDPRINTF ((stderr, "Stack size increased to %lu\n",
- (unsigned long int) yystacksize));
-
- if (yyss + yystacksize - 1 <= yyssp)
- YYABORT;
- }
-
- YYDPRINTF ((stderr, "Entering state %d\n", yystate));
-
- goto yybackup;
-
-/*-----------.
-| yybackup. |
-`-----------*/
-yybackup:
-
- /* Do appropriate processing given the current state. Read a
- look-ahead token if we need one and don't already have one. */
-
- /* First try to decide what to do without reference to look-ahead token. */
- yyn = yypact[yystate];
- if (yyn == YYPACT_NINF)
- goto yydefault;
-
- /* Not known => get a look-ahead token if don't already have one. */
-
- /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol. */
- if (yychar == YYEMPTY)
- {
- YYDPRINTF ((stderr, "Reading a token: "));
- yychar = YYLEX;
- }
-
- if (yychar <= YYEOF)
- {
- yychar = yytoken = YYEOF;
- YYDPRINTF ((stderr, "Now at end of input.\n"));
- }
- else
- {
- yytoken = YYTRANSLATE (yychar);
- YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
- }
-
- /* If the proper action on seeing token YYTOKEN is to reduce or to
- detect an error, take that action. */
- yyn += yytoken;
- if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
- goto yydefault;
- yyn = yytable[yyn];
- if (yyn <= 0)
- {
- if (yyn == 0 || yyn == YYTABLE_NINF)
- goto yyerrlab;
- yyn = -yyn;
- goto yyreduce;
- }
-
- if (yyn == YYFINAL)
- YYACCEPT;
-
- /* Count tokens shifted since error; after three, turn off error
- status. */
- if (yyerrstatus)
- yyerrstatus--;
-
- /* Shift the look-ahead token. */
- YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
-
- /* Discard the shifted token unless it is eof. */
- if (yychar != YYEOF)
- yychar = YYEMPTY;
-
- yystate = yyn;
- *++yyvsp = yylval;
-
- goto yynewstate;
-
-
-/*-----------------------------------------------------------.
-| yydefault -- do the default action for the current state. |
-`-----------------------------------------------------------*/
-yydefault:
- yyn = yydefact[yystate];
- if (yyn == 0)
- goto yyerrlab;
- goto yyreduce;
-
-
-/*-----------------------------.
-| yyreduce -- Do a reduction. |
-`-----------------------------*/
-yyreduce:
- /* yyn is the number of a rule to reduce with. */
- yylen = yyr2[yyn];
-
- /* If YYLEN is nonzero, implement the default value of the action:
- `$$ = $1'.
-
- Otherwise, the following line sets YYVAL to garbage.
- This behavior is undocumented and Bison
- users should not rely upon it. Assigning to YYVAL
- unconditionally makes the parser a bit smaller, and it avoids a
- GCC warning that YYVAL may be used uninitialized. */
- yyval = yyvsp[1-yylen];
-
-
- YY_REDUCE_PRINT (yyn);
- switch (yyn)
- {
- case 4:
-#line 109 "cc.y"
- {
- dodecl(xdecl, lastclass, lasttype, Z);
- }
- break;
-
- case 6:
-#line 114 "cc.y"
- {
- lastdcl = T;
- firstarg = S;
- dodecl(xdecl, lastclass, lasttype, (yyvsp[(2) - (2)].node));
- if(lastdcl == T || lastdcl->etype != TFUNC) {
- diag((yyvsp[(2) - (2)].node), "not a function");
- lastdcl = types[TFUNC];
- }
- thisfn = lastdcl;
- markdcl();
- firstdcl = dclstack;
- argmark((yyvsp[(2) - (2)].node), 0);
- }
- break;
-
- case 7:
-#line 128 "cc.y"
- {
- argmark((yyvsp[(2) - (4)].node), 1);
- }
- break;
-
- case 8:
-#line 132 "cc.y"
- {
- Node *n;
-
- n = revertdcl();
- if(n)
- (yyvsp[(6) - (6)].node) = new(OLIST, n, (yyvsp[(6) - (6)].node));
- if(!debug['a'] && !debug['Z'])
- codgen((yyvsp[(6) - (6)].node), (yyvsp[(2) - (6)].node));
- }
- break;
-
- case 9:
-#line 144 "cc.y"
- {
- dodecl(xdecl, lastclass, lasttype, (yyvsp[(1) - (1)].node));
- }
- break;
-
- case 10:
-#line 148 "cc.y"
- {
- (yyvsp[(1) - (1)].node) = dodecl(xdecl, lastclass, lasttype, (yyvsp[(1) - (1)].node));
- }
- break;
-
- case 11:
-#line 152 "cc.y"
- {
- doinit((yyvsp[(1) - (4)].node)->sym, (yyvsp[(1) - (4)].node)->type, 0L, (yyvsp[(4) - (4)].node));
- }
- break;
-
- case 14:
-#line 160 "cc.y"
- {
- (yyval.node) = new(OIND, (yyvsp[(3) - (3)].node), Z);
- (yyval.node)->garb = simpleg((yyvsp[(2) - (3)].lval));
- }
- break;
-
- case 16:
-#line 168 "cc.y"
- {
- (yyval.node) = (yyvsp[(2) - (3)].node);
- }
- break;
-
- case 17:
-#line 172 "cc.y"
- {
- (yyval.node) = new(OFUNC, (yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].node));
- }
- break;
-
- case 18:
-#line 176 "cc.y"
- {
- (yyval.node) = new(OARRAY, (yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].node));
- }
- break;
-
- case 19:
-#line 185 "cc.y"
- {
- (yyval.node) = dodecl(adecl, lastclass, lasttype, Z);
- }
- break;
-
- case 20:
-#line 189 "cc.y"
- {
- (yyval.node) = (yyvsp[(2) - (3)].node);
- }
- break;
-
- case 21:
-#line 195 "cc.y"
- {
- dodecl(adecl, lastclass, lasttype, (yyvsp[(1) - (1)].node));
- (yyval.node) = Z;
- }
- break;
-
- case 22:
-#line 200 "cc.y"
- {
- (yyvsp[(1) - (1)].node) = dodecl(adecl, lastclass, lasttype, (yyvsp[(1) - (1)].node));
- }
- break;
-
- case 23:
-#line 204 "cc.y"
- {
- int32 w;
-
- w = (yyvsp[(1) - (4)].node)->sym->type->width;
- (yyval.node) = doinit((yyvsp[(1) - (4)].node)->sym, (yyvsp[(1) - (4)].node)->type, 0L, (yyvsp[(4) - (4)].node));
- (yyval.node) = contig((yyvsp[(1) - (4)].node)->sym, (yyval.node), w);
- }
- break;
-
- case 24:
-#line 212 "cc.y"
- {
- (yyval.node) = (yyvsp[(1) - (3)].node);
- if((yyvsp[(3) - (3)].node) != Z) {
- (yyval.node) = (yyvsp[(3) - (3)].node);
- if((yyvsp[(1) - (3)].node) != Z)
- (yyval.node) = new(OLIST, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- }
- break;
-
- case 27:
-#line 229 "cc.y"
- {
- dodecl(pdecl, lastclass, lasttype, (yyvsp[(1) - (1)].node));
- }
- break;
-
- case 29:
-#line 239 "cc.y"
- {
- lasttype = (yyvsp[(1) - (1)].type);
- }
- break;
-
- case 31:
-#line 244 "cc.y"
- {
- lasttype = (yyvsp[(2) - (2)].type);
- }
- break;
-
- case 33:
-#line 250 "cc.y"
- {
- lastfield = 0;
- edecl(CXXX, lasttype, S);
- }
- break;
-
- case 35:
-#line 258 "cc.y"
- {
- dodecl(edecl, CXXX, lasttype, (yyvsp[(1) - (1)].node));
- }
- break;
-
- case 37:
-#line 265 "cc.y"
- {
- lastbit = 0;
- firstbit = 1;
- }
- break;
-
- case 38:
-#line 270 "cc.y"
- {
- (yyval.node) = new(OBIT, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 39:
-#line 274 "cc.y"
- {
- (yyval.node) = new(OBIT, Z, (yyvsp[(2) - (2)].node));
- }
- break;
-
- case 40:
-#line 282 "cc.y"
- {
- (yyval.node) = (Z);
- }
- break;
-
- case 42:
-#line 289 "cc.y"
- {
- (yyval.node) = new(OIND, (Z), Z);
- (yyval.node)->garb = simpleg((yyvsp[(2) - (2)].lval));
- }
- break;
-
- case 43:
-#line 294 "cc.y"
- {
- (yyval.node) = new(OIND, (yyvsp[(3) - (3)].node), Z);
- (yyval.node)->garb = simpleg((yyvsp[(2) - (3)].lval));
- }
- break;
-
- case 46:
-#line 303 "cc.y"
- {
- (yyval.node) = new(OFUNC, (yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].node));
- }
- break;
-
- case 47:
-#line 307 "cc.y"
- {
- (yyval.node) = new(OARRAY, (yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].node));
- }
- break;
-
- case 48:
-#line 313 "cc.y"
- {
- (yyval.node) = new(OFUNC, (Z), Z);
- }
- break;
-
- case 49:
-#line 317 "cc.y"
- {
- (yyval.node) = new(OARRAY, (Z), (yyvsp[(2) - (3)].node));
- }
- break;
-
- case 50:
-#line 321 "cc.y"
- {
- (yyval.node) = (yyvsp[(2) - (3)].node);
- }
- break;
-
- case 52:
-#line 328 "cc.y"
- {
- (yyval.node) = new(OINIT, invert((yyvsp[(2) - (3)].node)), Z);
- }
- break;
-
- case 53:
-#line 334 "cc.y"
- {
- (yyval.node) = new(OARRAY, (yyvsp[(2) - (3)].node), Z);
- }
- break;
-
- case 54:
-#line 338 "cc.y"
- {
- (yyval.node) = new(OELEM, Z, Z);
- (yyval.node)->sym = (yyvsp[(2) - (2)].sym);
- }
- break;
-
- case 57:
-#line 347 "cc.y"
- {
- (yyval.node) = new(OLIST, (yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].node));
- }
- break;
-
- case 59:
-#line 352 "cc.y"
- {
- (yyval.node) = new(OLIST, (yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].node));
- }
- break;
-
- case 62:
-#line 360 "cc.y"
- {
- (yyval.node) = new(OLIST, (yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].node));
- }
- break;
-
- case 63:
-#line 365 "cc.y"
- {
- (yyval.node) = Z;
- }
- break;
-
- case 64:
-#line 369 "cc.y"
- {
- (yyval.node) = invert((yyvsp[(1) - (1)].node));
- }
- break;
-
- case 66:
-#line 377 "cc.y"
- {
- (yyval.node) = new(OPROTO, (yyvsp[(2) - (2)].node), Z);
- (yyval.node)->type = (yyvsp[(1) - (2)].type);
- }
- break;
-
- case 67:
-#line 382 "cc.y"
- {
- (yyval.node) = new(OPROTO, (yyvsp[(2) - (2)].node), Z);
- (yyval.node)->type = (yyvsp[(1) - (2)].type);
- }
- break;
-
- case 68:
-#line 387 "cc.y"
- {
- (yyval.node) = new(ODOTDOT, Z, Z);
- }
- break;
-
- case 69:
-#line 391 "cc.y"
- {
- (yyval.node) = new(OLIST, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 70:
-#line 397 "cc.y"
- {
- (yyval.node) = invert((yyvsp[(2) - (3)].node));
- // if($2 != Z)
- // $$ = new(OLIST, $2, $$);
- if((yyval.node) == Z)
- (yyval.node) = new(OLIST, Z, Z);
- }
- break;
-
- case 71:
-#line 406 "cc.y"
- {
- (yyval.node) = Z;
- }
- break;
-
- case 72:
-#line 410 "cc.y"
- {
- (yyval.node) = new(OLIST, (yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].node));
- }
- break;
-
- case 73:
-#line 414 "cc.y"
- {
- (yyval.node) = new(OLIST, (yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].node));
- }
- break;
-
- case 75:
-#line 421 "cc.y"
- {
- (yyval.node) = new(OLIST, (yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].node));
- }
- break;
-
- case 76:
-#line 427 "cc.y"
- {
- (yyval.node) = new(OCASE, (yyvsp[(2) - (3)].node), Z);
- }
- break;
-
- case 77:
-#line 431 "cc.y"
- {
- (yyval.node) = new(OCASE, Z, Z);
- }
- break;
-
- case 78:
-#line 435 "cc.y"
- {
- (yyval.node) = new(OLABEL, dcllabel((yyvsp[(1) - (2)].sym), 1), Z);
- }
- break;
-
- case 79:
-#line 441 "cc.y"
- {
- (yyval.node) = Z;
- }
- break;
-
- case 81:
-#line 446 "cc.y"
- {
- (yyval.node) = new(OLIST, (yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].node));
- }
- break;
-
- case 83:
-#line 453 "cc.y"
- {
- (yyval.node) = (yyvsp[(2) - (2)].node);
- }
- break;
-
- case 85:
-#line 459 "cc.y"
- {
- markdcl();
- }
- break;
-
- case 86:
-#line 463 "cc.y"
- {
- (yyval.node) = revertdcl();
- if((yyval.node))
- (yyval.node) = new(OLIST, (yyval.node), (yyvsp[(2) - (2)].node));
- else
- (yyval.node) = (yyvsp[(2) - (2)].node);
- }
- break;
-
- case 87:
-#line 471 "cc.y"
- {
- (yyval.node) = new(OIF, (yyvsp[(3) - (5)].node), new(OLIST, (yyvsp[(5) - (5)].node), Z));
- if((yyvsp[(5) - (5)].node) == Z)
- warn((yyvsp[(3) - (5)].node), "empty if body");
- }
- break;
-
- case 88:
-#line 477 "cc.y"
- {
- (yyval.node) = new(OIF, (yyvsp[(3) - (7)].node), new(OLIST, (yyvsp[(5) - (7)].node), (yyvsp[(7) - (7)].node)));
- if((yyvsp[(5) - (7)].node) == Z)
- warn((yyvsp[(3) - (7)].node), "empty if body");
- if((yyvsp[(7) - (7)].node) == Z)
- warn((yyvsp[(3) - (7)].node), "empty else body");
- }
- break;
-
- case 89:
-#line 484 "cc.y"
- { markdcl(); }
- break;
-
- case 90:
-#line 485 "cc.y"
- {
- (yyval.node) = revertdcl();
- if((yyval.node)){
- if((yyvsp[(4) - (10)].node))
- (yyvsp[(4) - (10)].node) = new(OLIST, (yyval.node), (yyvsp[(4) - (10)].node));
- else
- (yyvsp[(4) - (10)].node) = (yyval.node);
- }
- (yyval.node) = new(OFOR, new(OLIST, (yyvsp[(6) - (10)].node), new(OLIST, (yyvsp[(4) - (10)].node), (yyvsp[(8) - (10)].node))), (yyvsp[(10) - (10)].node));
- }
- break;
-
- case 91:
-#line 496 "cc.y"
- {
- (yyval.node) = new(OWHILE, (yyvsp[(3) - (5)].node), (yyvsp[(5) - (5)].node));
- }
- break;
-
- case 92:
-#line 500 "cc.y"
- {
- (yyval.node) = new(ODWHILE, (yyvsp[(5) - (7)].node), (yyvsp[(2) - (7)].node));
- }
- break;
-
- case 93:
-#line 504 "cc.y"
- {
- (yyval.node) = new(ORETURN, (yyvsp[(2) - (3)].node), Z);
- (yyval.node)->type = thisfn->link;
- }
- break;
-
- case 94:
-#line 509 "cc.y"
- {
- (yyval.node) = new(OCONST, Z, Z);
- (yyval.node)->vconst = 0;
- (yyval.node)->type = types[TINT];
- (yyvsp[(3) - (5)].node) = new(OSUB, (yyval.node), (yyvsp[(3) - (5)].node));
-
- (yyval.node) = new(OCONST, Z, Z);
- (yyval.node)->vconst = 0;
- (yyval.node)->type = types[TINT];
- (yyvsp[(3) - (5)].node) = new(OSUB, (yyval.node), (yyvsp[(3) - (5)].node));
-
- (yyval.node) = new(OSWITCH, (yyvsp[(3) - (5)].node), (yyvsp[(5) - (5)].node));
- }
- break;
-
- case 95:
-#line 523 "cc.y"
- {
- (yyval.node) = new(OBREAK, Z, Z);
- }
- break;
-
- case 96:
-#line 527 "cc.y"
- {
- (yyval.node) = new(OCONTINUE, Z, Z);
- }
- break;
-
- case 97:
-#line 531 "cc.y"
- {
- (yyval.node) = new(OGOTO, dcllabel((yyvsp[(2) - (3)].sym), 0), Z);
- }
- break;
-
- case 98:
-#line 535 "cc.y"
- {
- (yyval.node) = new(OUSED, (yyvsp[(3) - (5)].node), Z);
- }
- break;
-
- case 99:
-#line 539 "cc.y"
- {
- (yyval.node) = new(OPREFETCH, (yyvsp[(3) - (5)].node), Z);
- }
- break;
-
- case 100:
-#line 543 "cc.y"
- {
- (yyval.node) = new(OSET, (yyvsp[(3) - (5)].node), Z);
- }
- break;
-
- case 101:
-#line 548 "cc.y"
- {
- (yyval.node) = Z;
- }
- break;
-
- case 103:
-#line 554 "cc.y"
- {
- (yyval.node) = Z;
- }
- break;
-
- case 105:
-#line 561 "cc.y"
- {
- (yyval.node) = new(OCAST, (yyvsp[(1) - (1)].node), Z);
- (yyval.node)->type = types[TLONG];
- }
- break;
-
- case 107:
-#line 569 "cc.y"
- {
- (yyval.node) = new(OCOMMA, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 109:
-#line 576 "cc.y"
- {
- (yyval.node) = new(OMUL, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 110:
-#line 580 "cc.y"
- {
- (yyval.node) = new(ODIV, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 111:
-#line 584 "cc.y"
- {
- (yyval.node) = new(OMOD, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 112:
-#line 588 "cc.y"
- {
- (yyval.node) = new(OADD, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 113:
-#line 592 "cc.y"
- {
- (yyval.node) = new(OSUB, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 114:
-#line 596 "cc.y"
- {
- (yyval.node) = new(OASHR, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 115:
-#line 600 "cc.y"
- {
- (yyval.node) = new(OASHL, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 116:
-#line 604 "cc.y"
- {
- (yyval.node) = new(OLT, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 117:
-#line 608 "cc.y"
- {
- (yyval.node) = new(OGT, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 118:
-#line 612 "cc.y"
- {
- (yyval.node) = new(OLE, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 119:
-#line 616 "cc.y"
- {
- (yyval.node) = new(OGE, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 120:
-#line 620 "cc.y"
- {
- (yyval.node) = new(OEQ, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 121:
-#line 624 "cc.y"
- {
- (yyval.node) = new(ONE, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 122:
-#line 628 "cc.y"
- {
- (yyval.node) = new(OAND, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 123:
-#line 632 "cc.y"
- {
- (yyval.node) = new(OXOR, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 124:
-#line 636 "cc.y"
- {
- (yyval.node) = new(OOR, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 125:
-#line 640 "cc.y"
- {
- (yyval.node) = new(OANDAND, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 126:
-#line 644 "cc.y"
- {
- (yyval.node) = new(OOROR, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 127:
-#line 648 "cc.y"
- {
- (yyval.node) = new(OCOND, (yyvsp[(1) - (5)].node), new(OLIST, (yyvsp[(3) - (5)].node), (yyvsp[(5) - (5)].node)));
- }
- break;
-
- case 128:
-#line 652 "cc.y"
- {
- (yyval.node) = new(OAS, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 129:
-#line 656 "cc.y"
- {
- (yyval.node) = new(OASADD, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 130:
-#line 660 "cc.y"
- {
- (yyval.node) = new(OASSUB, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 131:
-#line 664 "cc.y"
- {
- (yyval.node) = new(OASMUL, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 132:
-#line 668 "cc.y"
- {
- (yyval.node) = new(OASDIV, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 133:
-#line 672 "cc.y"
- {
- (yyval.node) = new(OASMOD, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 134:
-#line 676 "cc.y"
- {
- (yyval.node) = new(OASASHL, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 135:
-#line 680 "cc.y"
- {
- (yyval.node) = new(OASASHR, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 136:
-#line 684 "cc.y"
- {
- (yyval.node) = new(OASAND, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 137:
-#line 688 "cc.y"
- {
- (yyval.node) = new(OASXOR, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 138:
-#line 692 "cc.y"
- {
- (yyval.node) = new(OASOR, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 140:
-#line 699 "cc.y"
- {
- (yyval.node) = new(OCAST, (yyvsp[(5) - (5)].node), Z);
- dodecl(NODECL, CXXX, (yyvsp[(2) - (5)].type), (yyvsp[(3) - (5)].node));
- (yyval.node)->type = lastdcl;
- (yyval.node)->xcast = 1;
- }
- break;
-
- case 141:
-#line 706 "cc.y"
- {
- (yyval.node) = new(OSTRUCT, (yyvsp[(6) - (7)].node), Z);
- dodecl(NODECL, CXXX, (yyvsp[(2) - (7)].type), (yyvsp[(3) - (7)].node));
- (yyval.node)->type = lastdcl;
- }
- break;
-
- case 143:
-#line 715 "cc.y"
- {
- (yyval.node) = new(OIND, (yyvsp[(2) - (2)].node), Z);
- }
- break;
-
- case 144:
-#line 719 "cc.y"
- {
- (yyval.node) = new(OADDR, (yyvsp[(2) - (2)].node), Z);
- }
- break;
-
- case 145:
-#line 723 "cc.y"
- {
- (yyval.node) = new(OPOS, (yyvsp[(2) - (2)].node), Z);
- }
- break;
-
- case 146:
-#line 727 "cc.y"
- {
- (yyval.node) = new(ONEG, (yyvsp[(2) - (2)].node), Z);
- }
- break;
-
- case 147:
-#line 731 "cc.y"
- {
- (yyval.node) = new(ONOT, (yyvsp[(2) - (2)].node), Z);
- }
- break;
-
- case 148:
-#line 735 "cc.y"
- {
- (yyval.node) = new(OCOM, (yyvsp[(2) - (2)].node), Z);
- }
- break;
-
- case 149:
-#line 739 "cc.y"
- {
- (yyval.node) = new(OPREINC, (yyvsp[(2) - (2)].node), Z);
- }
- break;
-
- case 150:
-#line 743 "cc.y"
- {
- (yyval.node) = new(OPREDEC, (yyvsp[(2) - (2)].node), Z);
- }
- break;
-
- case 151:
-#line 747 "cc.y"
- {
- (yyval.node) = new(OSIZE, (yyvsp[(2) - (2)].node), Z);
- }
- break;
-
- case 152:
-#line 751 "cc.y"
- {
- (yyval.node) = new(OSIGN, (yyvsp[(2) - (2)].node), Z);
- }
- break;
-
- case 153:
-#line 757 "cc.y"
- {
- (yyval.node) = (yyvsp[(2) - (3)].node);
- }
- break;
-
- case 154:
-#line 761 "cc.y"
- {
- (yyval.node) = new(OSIZE, Z, Z);
- dodecl(NODECL, CXXX, (yyvsp[(3) - (5)].type), (yyvsp[(4) - (5)].node));
- (yyval.node)->type = lastdcl;
- }
- break;
-
- case 155:
-#line 767 "cc.y"
- {
- (yyval.node) = new(OSIGN, Z, Z);
- dodecl(NODECL, CXXX, (yyvsp[(3) - (5)].type), (yyvsp[(4) - (5)].node));
- (yyval.node)->type = lastdcl;
- }
- break;
-
- case 156:
-#line 773 "cc.y"
- {
- (yyval.node) = new(OFUNC, (yyvsp[(1) - (4)].node), Z);
- if((yyvsp[(1) - (4)].node)->op == ONAME)
- if((yyvsp[(1) - (4)].node)->type == T)
- dodecl(xdecl, CXXX, types[TINT], (yyval.node));
- (yyval.node)->right = invert((yyvsp[(3) - (4)].node));
- }
- break;
-
- case 157:
-#line 781 "cc.y"
- {
- (yyval.node) = new(OIND, new(OADD, (yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].node)), Z);
- }
- break;
-
- case 158:
-#line 785 "cc.y"
- {
- (yyval.node) = new(ODOT, new(OIND, (yyvsp[(1) - (3)].node), Z), Z);
- (yyval.node)->sym = (yyvsp[(3) - (3)].sym);
- }
- break;
-
- case 159:
-#line 790 "cc.y"
- {
- (yyval.node) = new(ODOT, (yyvsp[(1) - (3)].node), Z);
- (yyval.node)->sym = (yyvsp[(3) - (3)].sym);
- }
- break;
-
- case 160:
-#line 795 "cc.y"
- {
- (yyval.node) = new(OPOSTINC, (yyvsp[(1) - (2)].node), Z);
- }
- break;
-
- case 161:
-#line 799 "cc.y"
- {
- (yyval.node) = new(OPOSTDEC, (yyvsp[(1) - (2)].node), Z);
- }
- break;
-
- case 163:
-#line 804 "cc.y"
- {
- (yyval.node) = new(OCONST, Z, Z);
- (yyval.node)->type = types[TINT];
- (yyval.node)->vconst = (yyvsp[(1) - (1)].vval);
- (yyval.node)->cstring = strdup(symb);
- }
- break;
-
- case 164:
-#line 811 "cc.y"
- {
- (yyval.node) = new(OCONST, Z, Z);
- (yyval.node)->type = types[TLONG];
- (yyval.node)->vconst = (yyvsp[(1) - (1)].vval);
- (yyval.node)->cstring = strdup(symb);
- }
- break;
-
- case 165:
-#line 818 "cc.y"
- {
- (yyval.node) = new(OCONST, Z, Z);
- (yyval.node)->type = types[TUINT];
- (yyval.node)->vconst = (yyvsp[(1) - (1)].vval);
- (yyval.node)->cstring = strdup(symb);
- }
- break;
-
- case 166:
-#line 825 "cc.y"
- {
- (yyval.node) = new(OCONST, Z, Z);
- (yyval.node)->type = types[TULONG];
- (yyval.node)->vconst = (yyvsp[(1) - (1)].vval);
- (yyval.node)->cstring = strdup(symb);
- }
- break;
-
- case 167:
-#line 832 "cc.y"
- {
- (yyval.node) = new(OCONST, Z, Z);
- (yyval.node)->type = types[TDOUBLE];
- (yyval.node)->fconst = (yyvsp[(1) - (1)].dval);
- (yyval.node)->cstring = strdup(symb);
- }
- break;
-
- case 168:
-#line 839 "cc.y"
- {
- (yyval.node) = new(OCONST, Z, Z);
- (yyval.node)->type = types[TFLOAT];
- (yyval.node)->fconst = (yyvsp[(1) - (1)].dval);
- (yyval.node)->cstring = strdup(symb);
- }
- break;
-
- case 169:
-#line 846 "cc.y"
- {
- (yyval.node) = new(OCONST, Z, Z);
- (yyval.node)->type = types[TVLONG];
- (yyval.node)->vconst = (yyvsp[(1) - (1)].vval);
- (yyval.node)->cstring = strdup(symb);
- }
- break;
-
- case 170:
-#line 853 "cc.y"
- {
- (yyval.node) = new(OCONST, Z, Z);
- (yyval.node)->type = types[TUVLONG];
- (yyval.node)->vconst = (yyvsp[(1) - (1)].vval);
- (yyval.node)->cstring = strdup(symb);
- }
- break;
-
- case 173:
-#line 864 "cc.y"
- {
- (yyval.node) = new(OSTRING, Z, Z);
- (yyval.node)->type = typ(TARRAY, types[TCHAR]);
- (yyval.node)->type->width = (yyvsp[(1) - (1)].sval).l + 1;
- (yyval.node)->cstring = (yyvsp[(1) - (1)].sval).s;
- (yyval.node)->sym = symstring;
- (yyval.node)->etype = TARRAY;
- (yyval.node)->class = CSTATIC;
- }
- break;
-
- case 174:
-#line 874 "cc.y"
- {
- char *s;
- int n;
-
- n = (yyvsp[(1) - (2)].node)->type->width - 1;
- s = alloc(n+(yyvsp[(2) - (2)].sval).l+MAXALIGN);
-
- memcpy(s, (yyvsp[(1) - (2)].node)->cstring, n);
- memcpy(s+n, (yyvsp[(2) - (2)].sval).s, (yyvsp[(2) - (2)].sval).l);
- s[n+(yyvsp[(2) - (2)].sval).l] = 0;
-
- (yyval.node) = (yyvsp[(1) - (2)].node);
- (yyval.node)->type->width += (yyvsp[(2) - (2)].sval).l;
- (yyval.node)->cstring = s;
- }
- break;
-
- case 175:
-#line 892 "cc.y"
- {
- (yyval.node) = new(OLSTRING, Z, Z);
- (yyval.node)->type = typ(TARRAY, types[TRUNE]);
- (yyval.node)->type->width = (yyvsp[(1) - (1)].sval).l + sizeof(TRune);
- (yyval.node)->rstring = (TRune*)(yyvsp[(1) - (1)].sval).s;
- (yyval.node)->sym = symstring;
- (yyval.node)->etype = TARRAY;
- (yyval.node)->class = CSTATIC;
- }
- break;
-
- case 176:
-#line 902 "cc.y"
- {
- char *s;
- int n;
-
- n = (yyvsp[(1) - (2)].node)->type->width - sizeof(TRune);
- s = alloc(n+(yyvsp[(2) - (2)].sval).l+MAXALIGN);
-
- memcpy(s, (yyvsp[(1) - (2)].node)->rstring, n);
- memcpy(s+n, (yyvsp[(2) - (2)].sval).s, (yyvsp[(2) - (2)].sval).l);
- *(TRune*)(s+n+(yyvsp[(2) - (2)].sval).l) = 0;
-
- (yyval.node) = (yyvsp[(1) - (2)].node);
- (yyval.node)->type->width += (yyvsp[(2) - (2)].sval).l;
- (yyval.node)->rstring = (TRune*)s;
- }
- break;
-
- case 177:
-#line 919 "cc.y"
- {
- (yyval.node) = Z;
- }
- break;
-
- case 180:
-#line 927 "cc.y"
- {
- (yyval.node) = new(OLIST, (yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 181:
-#line 933 "cc.y"
- {
- (yyval.tyty).t1 = strf;
- (yyval.tyty).t2 = strl;
- (yyval.tyty).t3 = lasttype;
- (yyval.tyty).c = lastclass;
- strf = T;
- strl = T;
- lastbit = 0;
- firstbit = 1;
- lastclass = CXXX;
- lasttype = T;
- }
- break;
-
- case 182:
-#line 946 "cc.y"
- {
- (yyval.type) = strf;
- strf = (yyvsp[(2) - (4)].tyty).t1;
- strl = (yyvsp[(2) - (4)].tyty).t2;
- lasttype = (yyvsp[(2) - (4)].tyty).t3;
- lastclass = (yyvsp[(2) - (4)].tyty).c;
- }
- break;
-
- case 183:
-#line 955 "cc.y"
- {
- lastclass = CXXX;
- lasttype = types[TINT];
- }
- break;
-
- case 185:
-#line 963 "cc.y"
- {
- (yyval.tycl).t = (yyvsp[(1) - (1)].type);
- (yyval.tycl).c = CXXX;
- }
- break;
-
- case 186:
-#line 968 "cc.y"
- {
- (yyval.tycl).t = simplet((yyvsp[(1) - (1)].lval));
- (yyval.tycl).c = CXXX;
- }
- break;
-
- case 187:
-#line 973 "cc.y"
- {
- (yyval.tycl).t = simplet((yyvsp[(1) - (1)].lval));
- (yyval.tycl).c = simplec((yyvsp[(1) - (1)].lval));
- (yyval.tycl).t = garbt((yyval.tycl).t, (yyvsp[(1) - (1)].lval));
- }
- break;
-
- case 188:
-#line 979 "cc.y"
- {
- (yyval.tycl).t = (yyvsp[(1) - (2)].type);
- (yyval.tycl).c = simplec((yyvsp[(2) - (2)].lval));
- (yyval.tycl).t = garbt((yyval.tycl).t, (yyvsp[(2) - (2)].lval));
- if((yyvsp[(2) - (2)].lval) & ~BCLASS & ~BGARB)
- diag(Z, "duplicate types given: %T and %Q", (yyvsp[(1) - (2)].type), (yyvsp[(2) - (2)].lval));
- }
- break;
-
- case 189:
-#line 987 "cc.y"
- {
- (yyval.tycl).t = simplet(typebitor((yyvsp[(1) - (2)].lval), (yyvsp[(2) - (2)].lval)));
- (yyval.tycl).c = simplec((yyvsp[(2) - (2)].lval));
- (yyval.tycl).t = garbt((yyval.tycl).t, (yyvsp[(2) - (2)].lval));
- }
- break;
-
- case 190:
-#line 993 "cc.y"
- {
- (yyval.tycl).t = (yyvsp[(2) - (3)].type);
- (yyval.tycl).c = simplec((yyvsp[(1) - (3)].lval));
- (yyval.tycl).t = garbt((yyval.tycl).t, (yyvsp[(1) - (3)].lval)|(yyvsp[(3) - (3)].lval));
- }
- break;
-
- case 191:
-#line 999 "cc.y"
- {
- (yyval.tycl).t = simplet((yyvsp[(2) - (2)].lval));
- (yyval.tycl).c = simplec((yyvsp[(1) - (2)].lval));
- (yyval.tycl).t = garbt((yyval.tycl).t, (yyvsp[(1) - (2)].lval));
- }
- break;
-
- case 192:
-#line 1005 "cc.y"
- {
- (yyval.tycl).t = simplet(typebitor((yyvsp[(2) - (3)].lval), (yyvsp[(3) - (3)].lval)));
- (yyval.tycl).c = simplec((yyvsp[(1) - (3)].lval)|(yyvsp[(3) - (3)].lval));
- (yyval.tycl).t = garbt((yyval.tycl).t, (yyvsp[(1) - (3)].lval)|(yyvsp[(3) - (3)].lval));
- }
- break;
-
- case 193:
-#line 1013 "cc.y"
- {
- (yyval.type) = (yyvsp[(1) - (1)].tycl).t;
- if((yyvsp[(1) - (1)].tycl).c != CXXX)
- diag(Z, "illegal combination of class 4: %s", cnames[(yyvsp[(1) - (1)].tycl).c]);
- }
- break;
-
- case 194:
-#line 1021 "cc.y"
- {
- lasttype = (yyvsp[(1) - (1)].tycl).t;
- lastclass = (yyvsp[(1) - (1)].tycl).c;
- }
- break;
-
- case 195:
-#line 1028 "cc.y"
- {
- dotag((yyvsp[(2) - (2)].sym), TSTRUCT, 0);
- (yyval.type) = (yyvsp[(2) - (2)].sym)->suetag;
- }
- break;
-
- case 196:
-#line 1033 "cc.y"
- {
- dotag((yyvsp[(2) - (2)].sym), TSTRUCT, autobn);
- }
- break;
-
- case 197:
-#line 1037 "cc.y"
- {
- (yyval.type) = (yyvsp[(2) - (4)].sym)->suetag;
- if((yyval.type)->link != T)
- diag(Z, "redeclare tag: %s", (yyvsp[(2) - (4)].sym)->name);
- (yyval.type)->link = (yyvsp[(4) - (4)].type);
- sualign((yyval.type));
- }
- break;
-
- case 198:
-#line 1045 "cc.y"
- {
- diag(Z, "struct must have tag");
- taggen++;
- sprint(symb, "_%d_", taggen);
- (yyval.type) = dotag(lookup(), TSTRUCT, autobn);
- (yyval.type)->link = (yyvsp[(2) - (2)].type);
- sualign((yyval.type));
- }
- break;
-
- case 199:
-#line 1054 "cc.y"
- {
- dotag((yyvsp[(2) - (2)].sym), TUNION, 0);
- (yyval.type) = (yyvsp[(2) - (2)].sym)->suetag;
- }
- break;
-
- case 200:
-#line 1059 "cc.y"
- {
- dotag((yyvsp[(2) - (2)].sym), TUNION, autobn);
- }
- break;
-
- case 201:
-#line 1063 "cc.y"
- {
- (yyval.type) = (yyvsp[(2) - (4)].sym)->suetag;
- if((yyval.type)->link != T)
- diag(Z, "redeclare tag: %s", (yyvsp[(2) - (4)].sym)->name);
- (yyval.type)->link = (yyvsp[(4) - (4)].type);
- sualign((yyval.type));
- }
- break;
-
- case 202:
-#line 1071 "cc.y"
- {
- taggen++;
- sprint(symb, "_%d_", taggen);
- (yyval.type) = dotag(lookup(), TUNION, autobn);
- (yyval.type)->link = (yyvsp[(2) - (2)].type);
- sualign((yyval.type));
- }
- break;
-
- case 203:
-#line 1079 "cc.y"
- {
- dotag((yyvsp[(2) - (2)].sym), TENUM, 0);
- (yyval.type) = (yyvsp[(2) - (2)].sym)->suetag;
- if((yyval.type)->link == T)
- (yyval.type)->link = types[TINT];
- (yyval.type) = (yyval.type)->link;
- }
- break;
-
- case 204:
-#line 1087 "cc.y"
- {
- dotag((yyvsp[(2) - (2)].sym), TENUM, autobn);
- }
- break;
-
- case 205:
-#line 1091 "cc.y"
- {
- en.tenum = T;
- en.cenum = T;
- }
- break;
-
- case 206:
-#line 1096 "cc.y"
- {
- (yyval.type) = (yyvsp[(2) - (7)].sym)->suetag;
- if((yyval.type)->link != T)
- diag(Z, "redeclare tag: %s", (yyvsp[(2) - (7)].sym)->name);
- if(en.tenum == T) {
- diag(Z, "enum type ambiguous: %s", (yyvsp[(2) - (7)].sym)->name);
- en.tenum = types[TINT];
- }
- (yyval.type)->link = en.tenum;
- (yyval.type) = en.tenum;
- }
- break;
-
- case 207:
-#line 1108 "cc.y"
- {
- en.tenum = T;
- en.cenum = T;
- }
- break;
-
- case 208:
-#line 1113 "cc.y"
- {
- (yyval.type) = en.tenum;
- }
- break;
-
- case 209:
-#line 1117 "cc.y"
- {
- (yyval.type) = tcopy((yyvsp[(1) - (1)].sym)->type);
- }
- break;
-
- case 211:
-#line 1124 "cc.y"
- {
- (yyval.lval) = typebitor((yyvsp[(1) - (2)].lval), (yyvsp[(2) - (2)].lval));
- }
- break;
-
- case 212:
-#line 1129 "cc.y"
- {
- (yyval.lval) = 0;
- }
- break;
-
- case 213:
-#line 1133 "cc.y"
- {
- (yyval.lval) = typebitor((yyvsp[(1) - (2)].lval), (yyvsp[(2) - (2)].lval));
- }
- break;
-
- case 218:
-#line 1145 "cc.y"
- {
- (yyval.lval) = typebitor((yyvsp[(1) - (2)].lval), (yyvsp[(2) - (2)].lval));
- }
- break;
-
- case 221:
-#line 1155 "cc.y"
- {
- doenum((yyvsp[(1) - (1)].sym), Z);
- }
- break;
-
- case 222:
-#line 1159 "cc.y"
- {
- doenum((yyvsp[(1) - (3)].sym), (yyvsp[(3) - (3)].node));
- }
- break;
-
- case 225:
-#line 1166 "cc.y"
- { (yyval.lval) = BCHAR; }
- break;
-
- case 226:
-#line 1167 "cc.y"
- { (yyval.lval) = BSHORT; }
- break;
-
- case 227:
-#line 1168 "cc.y"
- { (yyval.lval) = BINT; }
- break;
-
- case 228:
-#line 1169 "cc.y"
- { (yyval.lval) = BLONG; }
- break;
-
- case 229:
-#line 1170 "cc.y"
- { (yyval.lval) = BSIGNED; }
- break;
-
- case 230:
-#line 1171 "cc.y"
- { (yyval.lval) = BUNSIGNED; }
- break;
-
- case 231:
-#line 1172 "cc.y"
- { (yyval.lval) = BFLOAT; }
- break;
-
- case 232:
-#line 1173 "cc.y"
- { (yyval.lval) = BDOUBLE; }
- break;
-
- case 233:
-#line 1174 "cc.y"
- { (yyval.lval) = BVOID; }
- break;
-
- case 234:
-#line 1177 "cc.y"
- { (yyval.lval) = BAUTO; }
- break;
-
- case 235:
-#line 1178 "cc.y"
- { (yyval.lval) = BSTATIC; }
- break;
-
- case 236:
-#line 1179 "cc.y"
- { (yyval.lval) = BEXTERN; }
- break;
-
- case 237:
-#line 1180 "cc.y"
- { (yyval.lval) = BTYPEDEF; }
- break;
-
- case 238:
-#line 1181 "cc.y"
- { (yyval.lval) = BTYPESTR; }
- break;
-
- case 239:
-#line 1182 "cc.y"
- { (yyval.lval) = BREGISTER; }
- break;
-
- case 240:
-#line 1183 "cc.y"
- { (yyval.lval) = 0; }
- break;
-
- case 241:
-#line 1186 "cc.y"
- { (yyval.lval) = BCONSTNT; }
- break;
-
- case 242:
-#line 1187 "cc.y"
- { (yyval.lval) = BVOLATILE; }
- break;
-
- case 243:
-#line 1188 "cc.y"
- { (yyval.lval) = 0; }
- break;
-
- case 244:
-#line 1192 "cc.y"
- {
- (yyval.node) = new(ONAME, Z, Z);
- if((yyvsp[(1) - (1)].sym)->class == CLOCAL)
- (yyvsp[(1) - (1)].sym) = mkstatic((yyvsp[(1) - (1)].sym));
- (yyval.node)->sym = (yyvsp[(1) - (1)].sym);
- (yyval.node)->type = (yyvsp[(1) - (1)].sym)->type;
- (yyval.node)->etype = TVOID;
- if((yyval.node)->type != T)
- (yyval.node)->etype = (yyval.node)->type->etype;
- (yyval.node)->xoffset = (yyvsp[(1) - (1)].sym)->offset;
- (yyval.node)->class = (yyvsp[(1) - (1)].sym)->class;
- (yyvsp[(1) - (1)].sym)->aused = 1;
- }
- break;
-
- case 245:
-#line 1207 "cc.y"
- {
- (yyval.node) = new(ONAME, Z, Z);
- (yyval.node)->sym = (yyvsp[(1) - (1)].sym);
- (yyval.node)->type = (yyvsp[(1) - (1)].sym)->type;
- (yyval.node)->etype = TVOID;
- if((yyval.node)->type != T)
- (yyval.node)->etype = (yyval.node)->type->etype;
- (yyval.node)->xoffset = (yyvsp[(1) - (1)].sym)->offset;
- (yyval.node)->class = (yyvsp[(1) - (1)].sym)->class;
- }
- break;
-
-
-/* Line 1267 of yacc.c. */
-#line 3607 "y.tab.c"
- default: break;
- }
- YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
-
- YYPOPSTACK (yylen);
- yylen = 0;
- YY_STACK_PRINT (yyss, yyssp);
-
- *++yyvsp = yyval;
-
-
- /* Now `shift' the result of the reduction. Determine what state
- that goes to, based on the state we popped back to and the rule
- number reduced by. */
-
- yyn = yyr1[yyn];
-
- yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
- if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
- yystate = yytable[yystate];
- else
- yystate = yydefgoto[yyn - YYNTOKENS];
-
- goto yynewstate;
-
-
-/*------------------------------------.
-| yyerrlab -- here on detecting error |
-`------------------------------------*/
-yyerrlab:
- /* If not already recovering from an error, report this error. */
- if (!yyerrstatus)
- {
- ++yynerrs;
-#if ! YYERROR_VERBOSE
- yyerror (YY_("syntax error"));
-#else
- {
- YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
- if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
- {
- YYSIZE_T yyalloc = 2 * yysize;
- if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
- yyalloc = YYSTACK_ALLOC_MAXIMUM;
- if (yymsg != yymsgbuf)
- YYSTACK_FREE (yymsg);
- yymsg = (char *) YYSTACK_ALLOC (yyalloc);
- if (yymsg)
- yymsg_alloc = yyalloc;
- else
- {
- yymsg = yymsgbuf;
- yymsg_alloc = sizeof yymsgbuf;
- }
- }
-
- if (0 < yysize && yysize <= yymsg_alloc)
- {
- (void) yysyntax_error (yymsg, yystate, yychar);
- yyerror (yymsg);
- }
- else
- {
- yyerror (YY_("syntax error"));
- if (yysize != 0)
- goto yyexhaustedlab;
- }
- }
-#endif
- }
-
-
-
- if (yyerrstatus == 3)
- {
- /* If just tried and failed to reuse look-ahead token after an
- error, discard it. */
-
- if (yychar <= YYEOF)
- {
- /* Return failure if at end of input. */
- if (yychar == YYEOF)
- YYABORT;
- }
- else
- {
- yydestruct ("Error: discarding",
- yytoken, &yylval);
- yychar = YYEMPTY;
- }
- }
-
- /* Else will try to reuse look-ahead token after shifting the error
- token. */
- goto yyerrlab1;
-
-
-/*---------------------------------------------------.
-| yyerrorlab -- error raised explicitly by YYERROR. |
-`---------------------------------------------------*/
-yyerrorlab:
-
- /* Pacify compilers like GCC when the user code never invokes
- YYERROR and the label yyerrorlab therefore never appears in user
- code. */
- if (/*CONSTCOND*/ 0)
- goto yyerrorlab;
-
- /* Do not reclaim the symbols of the rule which action triggered
- this YYERROR. */
- YYPOPSTACK (yylen);
- yylen = 0;
- YY_STACK_PRINT (yyss, yyssp);
- yystate = *yyssp;
- goto yyerrlab1;
-
-
-/*-------------------------------------------------------------.
-| yyerrlab1 -- common code for both syntax error and YYERROR. |
-`-------------------------------------------------------------*/
-yyerrlab1:
- yyerrstatus = 3; /* Each real token shifted decrements this. */
-
- for (;;)
- {
- yyn = yypact[yystate];
- if (yyn != YYPACT_NINF)
- {
- yyn += YYTERROR;
- if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
- {
- yyn = yytable[yyn];
- if (0 < yyn)
- break;
- }
- }
-
- /* Pop the current state because it cannot handle the error token. */
- if (yyssp == yyss)
- YYABORT;
-
-
- yydestruct ("Error: popping",
- yystos[yystate], yyvsp);
- YYPOPSTACK (1);
- yystate = *yyssp;
- YY_STACK_PRINT (yyss, yyssp);
- }
-
- if (yyn == YYFINAL)
- YYACCEPT;
-
- *++yyvsp = yylval;
-
-
- /* Shift the error token. */
- YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
-
- yystate = yyn;
- goto yynewstate;
-
-
-/*-------------------------------------.
-| yyacceptlab -- YYACCEPT comes here. |
-`-------------------------------------*/
-yyacceptlab:
- yyresult = 0;
- goto yyreturn;
-
-/*-----------------------------------.
-| yyabortlab -- YYABORT comes here. |
-`-----------------------------------*/
-yyabortlab:
- yyresult = 1;
- goto yyreturn;
-
-#ifndef yyoverflow
-/*-------------------------------------------------.
-| yyexhaustedlab -- memory exhaustion comes here. |
-`-------------------------------------------------*/
-yyexhaustedlab:
- yyerror (YY_("memory exhausted"));
- yyresult = 2;
- /* Fall through. */
-#endif
-
-yyreturn:
- if (yychar != YYEOF && yychar != YYEMPTY)
- yydestruct ("Cleanup: discarding lookahead",
- yytoken, &yylval);
- /* Do not reclaim the symbols of the rule which action triggered
- this YYABORT or YYACCEPT. */
- YYPOPSTACK (yylen);
- YY_STACK_PRINT (yyss, yyssp);
- while (yyssp != yyss)
- {
- yydestruct ("Cleanup: popping",
- yystos[*yyssp], yyvsp);
- YYPOPSTACK (1);
- }
-#ifndef yyoverflow
- if (yyss != yyssa)
- YYSTACK_FREE (yyss);
-#endif
-#if YYERROR_VERBOSE
- if (yymsg != yymsgbuf)
- YYSTACK_FREE (yymsg);
-#endif
- /* Make sure YYID is used. */
- return YYID (yyresult);
-}
-
-
-#line 1220 "cc.y"
-
-
diff --git a/src/cmd/cc/y.tab.h b/src/cmd/cc/y.tab.h
deleted file mode 100644
index 32daca9b6..000000000
--- a/src/cmd/cc/y.tab.h
+++ /dev/null
@@ -1,230 +0,0 @@
-/* A Bison parser, made by GNU Bison 2.3. */
-
-/* Skeleton interface for Bison's Yacc-like parsers in C
-
- Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
- Free Software Foundation, Inc.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor,
- Boston, MA 02110-1301, USA. */
-
-/* As a special exception, you may create a larger work that contains
- part or all of the Bison parser skeleton and distribute that work
- under terms of your choice, so long as that work isn't itself a
- parser generator using the skeleton or a modified version thereof
- as a parser skeleton. Alternatively, if you modify or redistribute
- the parser skeleton itself, you may (at your option) remove this
- special exception, which will cause the skeleton and the resulting
- Bison output files to be licensed under the GNU General Public
- License without this special exception.
-
- This special exception was added by the Free Software Foundation in
- version 2.2 of Bison. */
-
-/* Tokens. */
-#ifndef YYTOKENTYPE
-# define YYTOKENTYPE
- /* Put the tokens into the symbol table, so that GDB and other debuggers
- know about them. */
- enum yytokentype {
- LORE = 258,
- LXORE = 259,
- LANDE = 260,
- LLSHE = 261,
- LRSHE = 262,
- LMDE = 263,
- LDVE = 264,
- LMLE = 265,
- LME = 266,
- LPE = 267,
- LOROR = 268,
- LANDAND = 269,
- LNE = 270,
- LEQ = 271,
- LGE = 272,
- LLE = 273,
- LRSH = 274,
- LLSH = 275,
- LMG = 276,
- LPP = 277,
- LMM = 278,
- LNAME = 279,
- LTYPE = 280,
- LFCONST = 281,
- LDCONST = 282,
- LCONST = 283,
- LLCONST = 284,
- LUCONST = 285,
- LULCONST = 286,
- LVLCONST = 287,
- LUVLCONST = 288,
- LSTRING = 289,
- LLSTRING = 290,
- LAUTO = 291,
- LBREAK = 292,
- LCASE = 293,
- LCHAR = 294,
- LCONTINUE = 295,
- LDEFAULT = 296,
- LDO = 297,
- LDOUBLE = 298,
- LELSE = 299,
- LEXTERN = 300,
- LFLOAT = 301,
- LFOR = 302,
- LGOTO = 303,
- LIF = 304,
- LINT = 305,
- LLONG = 306,
- LPREFETCH = 307,
- LREGISTER = 308,
- LRETURN = 309,
- LSHORT = 310,
- LSIZEOF = 311,
- LUSED = 312,
- LSTATIC = 313,
- LSTRUCT = 314,
- LSWITCH = 315,
- LTYPEDEF = 316,
- LTYPESTR = 317,
- LUNION = 318,
- LUNSIGNED = 319,
- LWHILE = 320,
- LVOID = 321,
- LENUM = 322,
- LSIGNED = 323,
- LCONSTNT = 324,
- LVOLATILE = 325,
- LSET = 326,
- LSIGNOF = 327,
- LRESTRICT = 328,
- LINLINE = 329
- };
-#endif
-/* Tokens. */
-#define LORE 258
-#define LXORE 259
-#define LANDE 260
-#define LLSHE 261
-#define LRSHE 262
-#define LMDE 263
-#define LDVE 264
-#define LMLE 265
-#define LME 266
-#define LPE 267
-#define LOROR 268
-#define LANDAND 269
-#define LNE 270
-#define LEQ 271
-#define LGE 272
-#define LLE 273
-#define LRSH 274
-#define LLSH 275
-#define LMG 276
-#define LPP 277
-#define LMM 278
-#define LNAME 279
-#define LTYPE 280
-#define LFCONST 281
-#define LDCONST 282
-#define LCONST 283
-#define LLCONST 284
-#define LUCONST 285
-#define LULCONST 286
-#define LVLCONST 287
-#define LUVLCONST 288
-#define LSTRING 289
-#define LLSTRING 290
-#define LAUTO 291
-#define LBREAK 292
-#define LCASE 293
-#define LCHAR 294
-#define LCONTINUE 295
-#define LDEFAULT 296
-#define LDO 297
-#define LDOUBLE 298
-#define LELSE 299
-#define LEXTERN 300
-#define LFLOAT 301
-#define LFOR 302
-#define LGOTO 303
-#define LIF 304
-#define LINT 305
-#define LLONG 306
-#define LPREFETCH 307
-#define LREGISTER 308
-#define LRETURN 309
-#define LSHORT 310
-#define LSIZEOF 311
-#define LUSED 312
-#define LSTATIC 313
-#define LSTRUCT 314
-#define LSWITCH 315
-#define LTYPEDEF 316
-#define LTYPESTR 317
-#define LUNION 318
-#define LUNSIGNED 319
-#define LWHILE 320
-#define LVOID 321
-#define LENUM 322
-#define LSIGNED 323
-#define LCONSTNT 324
-#define LVOLATILE 325
-#define LSET 326
-#define LSIGNOF 327
-#define LRESTRICT 328
-#define LINLINE 329
-
-
-
-
-#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
-typedef union YYSTYPE
-#line 36 "cc.y"
-{
- Node* node;
- Sym* sym;
- Type* type;
- struct
- {
- Type* t;
- uchar c;
- } tycl;
- struct
- {
- Type* t1;
- Type* t2;
- Type* t3;
- uchar c;
- } tyty;
- struct
- {
- char* s;
- int32 l;
- } sval;
- int32 lval;
- double dval;
- vlong vval;
-}
-/* Line 1529 of yacc.c. */
-#line 223 "y.tab.h"
- YYSTYPE;
-# define yystype YYSTYPE /* obsolescent; will be withdrawn */
-# define YYSTYPE_IS_DECLARED 1
-# define YYSTYPE_IS_TRIVIAL 1
-#endif
-
-extern YYSTYPE yylval;
-
diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go
index 0dc22dcd4..28ded816d 100644
--- a/src/cmd/cgo/main.go
+++ b/src/cmd/cgo/main.go
@@ -155,6 +155,7 @@ var fset = token.NewFileSet()
var dynobj = flag.String("dynimport", "", "if non-empty, print dynamic import data for that file")
var dynout = flag.String("dynout", "", "write -dynobj output to this file")
+var dynpackage = flag.String("dynpackage", "main", "set Go package for dynobj output")
var dynlinker = flag.Bool("dynlinker", false, "record dynamic linker information in dynimport mode")
// These flags are for bootstrapping a new Go implementation,
diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
index d92bed9bf..78ecfd397 100644
--- a/src/cmd/cgo/out.go
+++ b/src/cmd/cgo/out.go
@@ -13,6 +13,7 @@ import (
"go/ast"
"go/printer"
"go/token"
+ "io"
"os"
"sort"
"strings"
@@ -23,8 +24,15 @@ var conf = printer.Config{Mode: printer.SourcePos, Tabwidth: 8}
// writeDefs creates output files to be compiled by 6g, 6c, and gcc.
// (The comments here say 6g and 6c but the code applies to the 8 and 5 tools too.)
func (p *Package) writeDefs() {
- fgo2 := creat(*objDir + "_cgo_gotypes.go")
- fc := creat(*objDir + "_cgo_defun.c")
+ var fgo2, fc io.Writer
+ f := creat(*objDir + "_cgo_gotypes.go")
+ defer f.Close()
+ fgo2 = f
+ if *gccgo {
+ f := creat(*objDir + "_cgo_defun.c")
+ defer f.Close()
+ fc = f
+ }
fm := creat(*objDir + "_cgo_main.c")
var gccgoInit bytes.Buffer
@@ -34,7 +42,7 @@ func (p *Package) writeDefs() {
fmt.Fprintf(fflg, "_CGO_%s=%s\n", k, strings.Join(v, " "))
if k == "LDFLAGS" && !*gccgo {
for _, arg := range v {
- fmt.Fprintf(fc, "#pragma cgo_ldflag %q\n", arg)
+ fmt.Fprintf(fgo2, "//go:cgo_ldflag %q\n", arg)
}
}
}
@@ -88,7 +96,6 @@ func (p *Package) writeDefs() {
if *gccgo {
fmt.Fprint(fc, p.cPrologGccgo())
} else {
- fmt.Fprint(fc, cProlog)
fmt.Fprint(fgo2, goProlog)
}
@@ -104,42 +111,42 @@ func (p *Package) writeDefs() {
if !cVars[n.C] {
fmt.Fprintf(fm, "extern char %s[];\n", n.C)
fmt.Fprintf(fm, "void *_cgohack_%s = %s;\n\n", n.C, n.C)
-
- if !*gccgo {
- fmt.Fprintf(fc, "#pragma cgo_import_static %s\n", n.C)
+ if *gccgo {
+ fmt.Fprintf(fc, "extern byte *%s;\n", n.C)
+ } else {
+ fmt.Fprintf(fgo2, "//go:linkname __cgo_%s %s\n", n.C, n.C)
+ fmt.Fprintf(fgo2, "//go:cgo_import_static %s\n", n.C)
+ fmt.Fprintf(fgo2, "var __cgo_%s byte\n", n.C)
}
-
- fmt.Fprintf(fc, "extern byte *%s;\n", n.C)
-
cVars[n.C] = true
}
- var amp string
+
var node ast.Node
if n.Kind == "var" {
- amp = "&"
node = &ast.StarExpr{X: n.Type.Go}
} else if n.Kind == "fpvar" {
node = n.Type.Go
- if *gccgo {
- amp = "&"
- }
} else {
panic(fmt.Errorf("invalid var kind %q", n.Kind))
}
if *gccgo {
fmt.Fprintf(fc, `extern void *%s __asm__("%s.%s");`, n.Mangle, gccgoSymbolPrefix, n.Mangle)
- fmt.Fprintf(&gccgoInit, "\t%s = %s%s;\n", n.Mangle, amp, n.C)
- } else {
- fmt.Fprintf(fc, "#pragma dataflag NOPTR /* C pointer, not heap pointer */ \n")
- fmt.Fprintf(fc, "void *·%s = %s%s;\n", n.Mangle, amp, n.C)
+ fmt.Fprintf(&gccgoInit, "\t%s = &%s;\n", n.Mangle, n.C)
+ fmt.Fprintf(fc, "\n")
}
- fmt.Fprintf(fc, "\n")
fmt.Fprintf(fgo2, "var %s ", n.Mangle)
conf.Fprint(fgo2, fset, node)
+ if !*gccgo {
+ fmt.Fprintf(fgo2, " = (")
+ conf.Fprint(fgo2, fset, node)
+ fmt.Fprintf(fgo2, ")(unsafe.Pointer(&__cgo_%s))", n.C)
+ }
fmt.Fprintf(fgo2, "\n")
}
- fmt.Fprintf(fc, "\n")
+ if *gccgo {
+ fmt.Fprintf(fc, "\n")
+ }
for _, key := range nameKeys(p.Name) {
n := p.Name[key]
@@ -169,9 +176,6 @@ func (p *Package) writeDefs() {
fmt.Fprint(fc, init)
fmt.Fprintln(fc, "}")
}
-
- fgo2.Close()
- fc.Close()
}
func dynimport(obj string) {
@@ -184,13 +188,15 @@ func dynimport(obj string) {
stdout = f
}
+ fmt.Fprintf(stdout, "package %s\n", *dynpackage)
+
if f, err := elf.Open(obj); err == nil {
if *dynlinker {
// Emit the cgo_dynamic_linker line.
if sec := f.Section(".interp"); sec != nil {
if data, err := sec.Data(); err == nil && len(data) > 1 {
// skip trailing \0 in data
- fmt.Fprintf(stdout, "#pragma cgo_dynamic_linker %q\n", string(data[:len(data)-1]))
+ fmt.Fprintf(stdout, "//go:cgo_dynamic_linker %q\n", string(data[:len(data)-1]))
}
}
}
@@ -203,14 +209,14 @@ func dynimport(obj string) {
if s.Version != "" {
targ += "#" + s.Version
}
- fmt.Fprintf(stdout, "#pragma cgo_import_dynamic %s %s %q\n", s.Name, targ, s.Library)
+ fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s.Name, targ, s.Library)
}
lib, err := f.ImportedLibraries()
if err != nil {
fatalf("cannot load imported libraries from ELF file %s: %v", obj, err)
}
for _, l := range lib {
- fmt.Fprintf(stdout, "#pragma cgo_import_dynamic _ _ %q\n", l)
+ fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l)
}
return
}
@@ -224,14 +230,14 @@ func dynimport(obj string) {
if len(s) > 0 && s[0] == '_' {
s = s[1:]
}
- fmt.Fprintf(stdout, "#pragma cgo_import_dynamic %s %s %q\n", s, s, "")
+ fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s, s, "")
}
lib, err := f.ImportedLibraries()
if err != nil {
fatalf("cannot load imported libraries from Mach-O file %s: %v", obj, err)
}
for _, l := range lib {
- fmt.Fprintf(stdout, "#pragma cgo_import_dynamic _ _ %q\n", l)
+ fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l)
}
return
}
@@ -244,7 +250,7 @@ func dynimport(obj string) {
for _, s := range sym {
ss := strings.Split(s, ":")
name := strings.Split(ss[0], "@")[0]
- fmt.Fprintf(stdout, "#pragma cgo_import_dynamic %s %s %q\n", name, ss[0], strings.ToLower(ss[1]))
+ fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", name, ss[0], strings.ToLower(ss[1]))
}
return
}
@@ -304,7 +310,7 @@ func (p *Package) structType(n *Name) (string, int64) {
return buf.String(), off
}
-func (p *Package) writeDefsFunc(fc, fgo2 *os.File, n *Name) {
+func (p *Package) writeDefsFunc(fc, fgo2 io.Writer, n *Name) {
name := n.Go
gtype := n.FuncType.Go
void := gtype.Results == nil || len(gtype.Results.List) == 0
@@ -397,10 +403,10 @@ func (p *Package) writeDefsFunc(fc, fgo2 *os.File, n *Name) {
}
// C wrapper calls into gcc, passing a pointer to the argument frame.
- fmt.Fprintf(fc, "#pragma cgo_import_static %s\n", cname)
- fmt.Fprintf(fc, "void %s(void*);\n", cname)
- fmt.Fprintf(fc, "#pragma dataflag NOPTR\n")
- fmt.Fprintf(fc, "void *·%s = %s;\n", cname, cname)
+ fmt.Fprintf(fgo2, "//go:cgo_import_static %s\n", cname)
+ fmt.Fprintf(fgo2, "//go:linkname __cgofn_%s %s\n", cname, cname)
+ fmt.Fprintf(fgo2, "var __cgofn_%s byte\n", cname)
+ fmt.Fprintf(fgo2, "var %s = unsafe.Pointer(&__cgofn_%s)\n", cname, cname)
nret := 0
if !void {
@@ -412,7 +418,6 @@ func (p *Package) writeDefsFunc(fc, fgo2 *os.File, n *Name) {
}
fmt.Fprint(fgo2, "\n")
- fmt.Fprintf(fgo2, "var %s unsafe.Pointer\n", cname)
conf.Fprint(fgo2, fset, d)
fmt.Fprint(fgo2, " {\n")
@@ -626,7 +631,7 @@ func (p *Package) packedAttribute() string {
// Write out the various stubs we need to support functions exported
// from Go so that they are callable from C.
-func (p *Package) writeExports(fgo2, fc, fm *os.File) {
+func (p *Package) writeExports(fgo2, fc, fm io.Writer) {
fgcc := creat(*objDir + "_cgo_export.c")
fgcch := creat(*objDir + "_cgo_export.h")
@@ -763,15 +768,15 @@ func (p *Package) writeExports(fgo2, fc, fm *os.File) {
if fn.Recv != nil {
goname = "_cgoexpwrap" + cPrefix + "_" + fn.Recv.List[0].Names[0].Name + "_" + goname
}
- fmt.Fprintf(fc, "#pragma cgo_export_dynamic %s\n", goname)
- fmt.Fprintf(fc, "extern void ·%s();\n\n", goname)
- fmt.Fprintf(fc, "#pragma cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName)
- fmt.Fprintf(fc, "#pragma textflag 7\n") // no split stack, so no use of m or g
- fmt.Fprintf(fc, "void\n")
- fmt.Fprintf(fc, "_cgoexp%s_%s(void *a, int32 n)\n", cPrefix, exp.ExpName)
- fmt.Fprintf(fc, "{\n")
- fmt.Fprintf(fc, "\truntime·cgocallback(·%s, a, n);\n", goname)
- fmt.Fprintf(fc, "}\n")
+ fmt.Fprintf(fgo2, "//go:cgo_export_dynamic %s\n", goname)
+ fmt.Fprintf(fgo2, "//go:linkname _cgoexp%s_%s _cgoexp%s_%s\n", cPrefix, exp.ExpName, cPrefix, exp.ExpName)
+ fmt.Fprintf(fgo2, "//go:cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName)
+ fmt.Fprintf(fgo2, "//go:nosplit\n") // no split stack, so no use of m or g
+ fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a unsafe.Pointer, n int32) {", cPrefix, exp.ExpName)
+ fmt.Fprintf(fgo2, "\tfn := %s\n", goname)
+ // The indirect here is converting from a Go function pointer to a C function pointer.
+ fmt.Fprintf(fgo2, "\t_cgo_runtime_cgocallback(**(**unsafe.Pointer)(unsafe.Pointer(&fn)), a, uintptr(n));\n")
+ fmt.Fprintf(fgo2, "}\n")
fmt.Fprintf(fm, "int _cgoexp%s_%s;\n", cPrefix, exp.ExpName)
@@ -817,7 +822,7 @@ func (p *Package) writeExports(fgo2, fc, fm *os.File) {
}
// Write out the C header allowing C code to call exported gccgo functions.
-func (p *Package) writeGccgoExports(fgo2, fc, fm *os.File) {
+func (p *Package) writeGccgoExports(fgo2, fc, fm io.Writer) {
fgcc := creat(*objDir + "_cgo_export.c")
fgcch := creat(*objDir + "_cgo_export.h")
@@ -1164,60 +1169,39 @@ char *CString(_GoString_);
void *_CMalloc(size_t);
`
-const cProlog = `
-#include "runtime.h"
-#include "cgocall.h"
-#include "textflag.h"
-
-#pragma dataflag NOPTR
-static void *cgocall_errno = runtime·cgocall_errno;
-#pragma dataflag NOPTR
-void *·_cgo_runtime_cgocall_errno = &cgocall_errno;
-
-#pragma dataflag NOPTR
-static void *runtime_gostring = runtime·gostring;
-#pragma dataflag NOPTR
-void *·_cgo_runtime_gostring = &runtime_gostring;
-
-#pragma dataflag NOPTR
-static void *runtime_gostringn = runtime·gostringn;
-#pragma dataflag NOPTR
-void *·_cgo_runtime_gostringn = &runtime_gostringn;
-
-#pragma dataflag NOPTR
-static void *runtime_gobytes = runtime·gobytes;
-#pragma dataflag NOPTR
-void *·_cgo_runtime_gobytes = &runtime_gobytes;
-
-#pragma dataflag NOPTR
-static void *runtime_cmalloc = runtime·cmalloc;
-#pragma dataflag NOPTR
-void *·_cgo_runtime_cmalloc = &runtime_cmalloc;
-
-void ·_Cerrno(void*, int32);
-`
-
const goProlog = `
-var _cgo_runtime_cgocall_errno func(unsafe.Pointer, uintptr) int32
-var _cgo_runtime_cmalloc func(uintptr) unsafe.Pointer
+//go:linkname _cgo_runtime_cgocall_errno runtime.cgocall_errno
+func _cgo_runtime_cgocall_errno(unsafe.Pointer, uintptr) int32
+
+//go:linkname _cgo_runtime_cmalloc runtime.cmalloc
+func _cgo_runtime_cmalloc(uintptr) unsafe.Pointer
+
+//go:linkname _cgo_runtime_cgocallback runtime.cgocallback
+func _cgo_runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr)
`
const goStringDef = `
-var _cgo_runtime_gostring func(*_Ctype_char) string
+//go:linkname _cgo_runtime_gostring runtime.gostring
+func _cgo_runtime_gostring(*_Ctype_char) string
+
func _Cfunc_GoString(p *_Ctype_char) string {
return _cgo_runtime_gostring(p)
}
`
const goStringNDef = `
-var _cgo_runtime_gostringn func(*_Ctype_char, int) string
+//go:linkname _cgo_runtime_gostringn runtime.gostringn
+func _cgo_runtime_gostringn(*_Ctype_char, int) string
+
func _Cfunc_GoStringN(p *_Ctype_char, l _Ctype_int) string {
return _cgo_runtime_gostringn(p, int(l))
}
`
const goBytesDef = `
-var _cgo_runtime_gobytes func(unsafe.Pointer, int) []byte
+//go:linkname _cgo_runtime_gobytes runtime.gobytes
+func _cgo_runtime_gobytes(unsafe.Pointer, int) []byte
+
func _Cfunc_GoBytes(p unsafe.Pointer, l _Ctype_int) []byte {
return _cgo_runtime_gobytes(p, int(l))
}
diff --git a/src/cmd/dist/build.c b/src/cmd/dist/build.c
index 9c81dd8b2..e4f307bee 100644
--- a/src/cmd/dist/build.c
+++ b/src/cmd/dist/build.c
@@ -526,10 +526,6 @@ static struct {
"anames8.c",
"anames9.c",
}},
- {"cmd/cc", {
- "-pgen.c",
- "-pswt.c",
- }},
{"cmd/gc", {
"-cplx.c",
"-pgen.c",
@@ -538,26 +534,6 @@ static struct {
"-y1.tab.c", // makefile dreg
"opnames.h",
}},
- {"cmd/5c", {
- "../cc/pgen.c",
- "../cc/pswt.c",
- "$GOROOT/pkg/obj/$GOHOSTOS_$GOHOSTARCH/libcc.a",
- }},
- {"cmd/6c", {
- "../cc/pgen.c",
- "../cc/pswt.c",
- "$GOROOT/pkg/obj/$GOHOSTOS_$GOHOSTARCH/libcc.a",
- }},
- {"cmd/8c", {
- "../cc/pgen.c",
- "../cc/pswt.c",
- "$GOROOT/pkg/obj/$GOHOSTOS_$GOHOSTARCH/libcc.a",
- }},
- {"cmd/9c", {
- "../cc/pgen.c",
- "../cc/pswt.c",
- "$GOROOT/pkg/obj/$GOHOSTOS_$GOHOSTARCH/libcc.a",
- }},
{"cmd/5g", {
"../gc/cplx.c",
"../gc/pgen.c",
@@ -611,12 +587,10 @@ static struct {
"$GOROOT/pkg/obj/$GOHOSTOS_$GOHOSTARCH/lib9.a",
}},
{"runtime", {
- "zaexperiment.h", // must sort above zasm
- "zasm_$GOOS_$GOARCH.h",
+ "zaexperiment.h",
"zsys_$GOOS_$GOARCH.s",
"zgoarch_$GOARCH.go",
"zgoos_$GOOS.go",
- "zruntime_defs_$GOOS_$GOARCH.go",
"zversion.go",
}},
};
@@ -639,12 +613,10 @@ static struct {
{"anames6.c", mkanames},
{"anames8.c", mkanames},
{"anames9.c", mkanames},
- {"zasm_", mkzasm},
{"zdefaultcc.go", mkzdefaultcc},
{"zsys_", mkzsys},
{"zgoarch_", mkzgoarch},
{"zgoos_", mkzgoos},
- {"zruntime_defs_", mkzruntimedefs},
{"zversion.go", mkzversion},
{"zaexperiment.h", mkzexperiment},
@@ -659,7 +631,7 @@ install(char *dir)
{
char *name, *p, *elem, *prefix, *exe;
bool islib, ispkg, isgo, stale, ispackcmd;
- Buf b, b1, path, final_path, final_name;
+ Buf b, b1, path, final_path, final_name, archive;
Vec compile, files, link, go, missing, clean, lib, extra;
Time ttarg, t;
int i, j, k, n, doclean, targ;
@@ -676,6 +648,7 @@ install(char *dir)
binit(&path);
binit(&final_path);
binit(&final_name);
+ binit(&archive);
vinit(&compile);
vinit(&files);
vinit(&link);
@@ -719,7 +692,7 @@ install(char *dir)
splitfields(&ldargs, bstr(&b));
}
- islib = hasprefix(dir, "lib") || streq(dir, "cmd/cc") || streq(dir, "cmd/gc");
+ islib = hasprefix(dir, "lib") || streq(dir, "cmd/gc");
ispkg = !islib && !hasprefix(dir, "cmd/");
isgo = ispkg || streq(dir, "cmd/go") || streq(dir, "cmd/cgo");
@@ -898,17 +871,6 @@ install(char *dir)
// For package runtime, copy some files into the work space.
if(streq(dir, "runtime")) {
- copyfile(bpathf(&b, "%s/arch_GOARCH.h", workdir),
- bpathf(&b1, "%s/arch_%s.h", bstr(&path), goarch), 0);
- copyfile(bpathf(&b, "%s/defs_GOOS_GOARCH.h", workdir),
- bpathf(&b1, "%s/defs_%s_%s.h", bstr(&path), goos, goarch), 0);
- p = bpathf(&b1, "%s/signal_%s_%s.h", bstr(&path), goos, goarch);
- if(isfile(p))
- copyfile(bpathf(&b, "%s/signal_GOOS_GOARCH.h", workdir), p, 0);
- copyfile(bpathf(&b, "%s/os_GOOS.h", workdir),
- bpathf(&b1, "%s/os_%s.h", bstr(&path), goos), 0);
- copyfile(bpathf(&b, "%s/signals_GOOS.h", workdir),
- bpathf(&b1, "%s/signals_%s.h", bstr(&path), goos), 0);
copyfile(bpathf(&b, "%s/pkg/%s_%s/textflag.h", goroot, goos, goarch),
bpathf(&b1, "%s/src/cmd/ld/textflag.h", goroot), 0);
copyfile(bpathf(&b, "%s/pkg/%s_%s/funcdata.h", goroot, goos, goarch),
@@ -942,14 +904,6 @@ install(char *dir)
built:;
}
- // One more copy for package runtime.
- // The last batch was required for the generators.
- // This one is generated.
- if(streq(dir, "runtime")) {
- copyfile(bpathf(&b, "%s/zasm_GOOS_GOARCH.h", workdir),
- bpathf(&b1, "%s/zasm_%s_%s.h", bstr(&path), goos, goarch), 0);
- }
-
if((!streq(goos, gohostos) || !streq(goarch, gohostarch)) && isgo) {
// We've generated the right files; the go command can do the build.
if(vflag > 1)
@@ -957,6 +911,42 @@ install(char *dir)
goto nobuild;
}
+ if(isgo) {
+ // The next loop will compile individual non-Go files.
+ // Hand the Go files to the compiler en masse.
+ // For package runtime, this writes go_asm.h, which
+ // the assembly files will need.
+ vreset(&compile);
+ vadd(&compile, bpathf(&b, "%s/%sg", tooldir, gochar));
+
+ bpathf(&b, "%s/_go_.a", workdir);
+ vadd(&compile, "-pack");
+ vadd(&compile, "-o");
+ vadd(&compile, bstr(&b));
+ vadd(&clean, bstr(&b));
+ if(!ispackcmd)
+ vadd(&link, bstr(&b));
+ else
+ bwriteb(&archive, &b);
+
+ vadd(&compile, "-p");
+ if(hasprefix(dir, "cmd/"))
+ vadd(&compile, "main");
+ else
+ vadd(&compile, dir);
+
+ if(streq(dir, "runtime")) {
+ vadd(&compile, "-+");
+ vadd(&compile, "-asmhdr");
+ bpathf(&b1, "%s/go_asm.h", workdir);
+ vadd(&compile, bstr(&b1));
+ }
+
+ vcopy(&compile, go.p, go.len);
+
+ runv(nil, bstr(&path), CheckExit, &compile);
+ }
+
// Compile the files.
for(i=0; i<files.len; i++) {
if(!hassuffix(files.p[i], ".c") && !hassuffix(files.p[i], ".s"))
@@ -1070,38 +1060,10 @@ install(char *dir)
}
bgwait();
- if(isgo) {
- // The last loop was compiling individual files.
- // Hand the Go files to the compiler en masse.
- vreset(&compile);
- vadd(&compile, bpathf(&b, "%s/%sg", tooldir, gochar));
-
- bpathf(&b, "%s/_go_.a", workdir);
- vadd(&compile, "-pack");
- vadd(&compile, "-o");
- vadd(&compile, bstr(&b));
- vadd(&clean, bstr(&b));
- if(!ispackcmd)
- vadd(&link, bstr(&b));
-
- vadd(&compile, "-p");
- if(hasprefix(dir, "pkg/"))
- vadd(&compile, dir+4);
- else
- vadd(&compile, "main");
-
- if(streq(dir, "runtime"))
- vadd(&compile, "-+");
-
- vcopy(&compile, go.p, go.len);
-
- runv(nil, bstr(&path), CheckExit, &compile);
-
- if(ispackcmd) {
- xremove(link.p[targ]);
- dopack(link.p[targ], bstr(&b), &link.p[targ+1], link.len - (targ+1));
- goto nobuild;
- }
+ if(isgo && ispackcmd) {
+ xremove(link.p[targ]);
+ dopack(link.p[targ], bstr(&archive), &link.p[targ+1], link.len - (targ+1));
+ goto nobuild;
}
if(!islib && !isgo) {
@@ -1115,17 +1077,7 @@ install(char *dir)
xremove(link.p[targ]);
runv(nil, nil, CheckExit, &link);
-
nobuild:
- // In package runtime, we install runtime.h and cgocall.h too,
- // for use by cgo compilation.
- if(streq(dir, "runtime")) {
- copyfile(bpathf(&b, "%s/pkg/%s_%s/cgocall.h", goroot, goos, goarch),
- bpathf(&b1, "%s/src/runtime/cgocall.h", goroot), 0);
- copyfile(bpathf(&b, "%s/pkg/%s_%s/runtime.h", goroot, goos, goarch),
- bpathf(&b1, "%s/src/runtime/runtime.h", goroot), 0);
- }
-
out:
for(i=0; i<clean.len; i++)
@@ -1134,6 +1086,7 @@ out:
bfree(&b);
bfree(&b1);
bfree(&path);
+ bfree(&archive);
vfree(&compile);
vfree(&files);
vfree(&link);
@@ -1321,11 +1274,9 @@ static char *buildorder[] = {
"libbio",
"liblink",
- "cmd/cc", // must be before c
"cmd/gc", // must be before g
- "cmd/%sl", // must be before a, c, g
+ "cmd/%sl", // must be before a, g
"cmd/%sa",
- "cmd/%sc",
"cmd/%sg",
// The dependency order here was copied from a buildscript
@@ -1382,22 +1333,17 @@ static char *buildorder[] = {
static char *cleantab[] = {
// Commands and C libraries.
"cmd/5a",
- "cmd/5c",
"cmd/5g",
"cmd/5l",
"cmd/6a",
- "cmd/6c",
"cmd/6g",
"cmd/6l",
"cmd/8a",
- "cmd/8c",
"cmd/8g",
"cmd/8l",
"cmd/9a",
- "cmd/9c",
"cmd/9g",
"cmd/9l",
- "cmd/cc",
"cmd/gc",
"cmd/go",
"lib9",
diff --git a/src/cmd/dist/buildruntime.c b/src/cmd/dist/buildruntime.c
index d22e09955..e561937fb 100644
--- a/src/cmd/dist/buildruntime.c
+++ b/src/cmd/dist/buildruntime.c
@@ -127,174 +127,8 @@ mkzgoos(char *dir, char *file)
bfree(&out);
}
-static struct {
- char *goarch;
- char *goos;
- char *hdr;
-} zasmhdr[] = {
- {"386", "",
- "#define get_tls(r) MOVL TLS, r\n"
- "#define g(r) 0(r)(TLS*1)\n"
- },
- {"amd64p32", "",
- "#define get_tls(r) MOVL TLS, r\n"
- "#define g(r) 0(r)(TLS*1)\n"
- },
- {"amd64", "",
- "#define get_tls(r) MOVQ TLS, r\n"
- "#define g(r) 0(r)(TLS*1)\n"
- },
-
- {"arm", "",
- "#define LR R14\n"
- },
-
- {"power64", "",
- "#define g R30\n"
- },
- {"power64le", "",
- "#define g R30\n"
- },
-};
-
#define MAXWINCB 2000 /* maximum number of windows callbacks allowed */
-// mkzasm writes zasm_$GOOS_$GOARCH.h,
-// which contains struct offsets for use by
-// assembly files. It also writes a copy to the work space
-// under the name zasm_GOOS_GOARCH.h (no expansion).
-//
-void
-mkzasm(char *dir, char *file)
-{
- int i, n;
- char *aggr, *p;
- Buf in, b, b1, out, exp;
- Vec argv, lines, fields;
-
- binit(&in);
- binit(&b);
- binit(&b1);
- binit(&out);
- binit(&exp);
- vinit(&argv);
- vinit(&lines);
- vinit(&fields);
-
- bwritestr(&out, "// auto generated by go tool dist\n\n");
- if(streq(goos, "linux")) {
- bwritestr(&out, "// +build !android\n\n");
- }
-
- for(i=0; i<nelem(zasmhdr); i++) {
- if(hasprefix(goarch, zasmhdr[i].goarch) && hasprefix(goos, zasmhdr[i].goos)) {
- bwritestr(&out, zasmhdr[i].hdr);
- goto ok;
- }
- }
- fatal("unknown $GOOS/$GOARCH in mkzasm");
-ok:
-
- copyfile(bpathf(&b, "%s/pkg/%s_%s/textflag.h", goroot, goos, goarch),
- bpathf(&b1, "%s/src/cmd/ld/textflag.h", goroot), 0);
-
- // Run 6c -D GOOS_goos -D GOARCH_goarch -I workdir -a -n -o workdir/proc.acid proc.c
- // to get acid [sic] output. Run once without the -a -o workdir/proc.acid in order to
- // report compilation failures (the -o redirects all messages, unfortunately).
- vreset(&argv);
- vadd(&argv, bpathf(&b, "%s/%sc", tooldir, gochar));
- vadd(&argv, "-D");
- vadd(&argv, bprintf(&b, "GOOS_%s", goos));
- vadd(&argv, "-D");
- vadd(&argv, bprintf(&b, "GOARCH_%s", goarch));
- vadd(&argv, "-I");
- vadd(&argv, bprintf(&b, "%s", workdir));
- vadd(&argv, "-I");
- vadd(&argv, bprintf(&b, "%s/pkg/%s_%s", goroot, goos, goarch));
- vadd(&argv, "-n");
- vadd(&argv, "-a");
- vadd(&argv, "-o");
- vadd(&argv, bpathf(&b, "%s/proc.acid", workdir));
- vadd(&argv, "proc.c");
- runv(nil, dir, CheckExit, &argv);
- readfile(&in, bpathf(&b, "%s/proc.acid", workdir));
-
- // Convert input like
- // aggr G
- // {
- // Gobuf 24 sched;
- // 'Y' 48 stack0;
- // }
- // StackMin = 128;
- // into output like
- // #define g_sched 24
- // #define g_stack0 48
- // #define const_StackMin 128
- aggr = nil;
- splitlines(&lines, bstr(&in));
- for(i=0; i<lines.len; i++) {
- splitfields(&fields, lines.p[i]);
- if(fields.len == 2 && streq(fields.p[0], "aggr")) {
- if(streq(fields.p[1], "G"))
- aggr = "g";
- else if(streq(fields.p[1], "M"))
- aggr = "m";
- else if(streq(fields.p[1], "P"))
- aggr = "p";
- else if(streq(fields.p[1], "Gobuf"))
- aggr = "gobuf";
- else if(streq(fields.p[1], "LibCall"))
- aggr = "libcall";
- else if(streq(fields.p[1], "WinCallbackContext"))
- aggr = "cbctxt";
- else if(streq(fields.p[1], "SEH"))
- aggr = "seh";
- else if(streq(fields.p[1], "Alg"))
- aggr = "alg";
- else if(streq(fields.p[1], "Panic"))
- aggr = "panic";
- else if(streq(fields.p[1], "Stack"))
- aggr = "stack";
- }
- if(hasprefix(lines.p[i], "}"))
- aggr = nil;
- if(aggr && hasprefix(lines.p[i], "\t") && fields.len >= 2) {
- n = fields.len;
- p = fields.p[n-1];
- if(p[xstrlen(p)-1] == ';')
- p[xstrlen(p)-1] = '\0';
- bwritestr(&out, bprintf(&b, "#define %s_%s %s\n", aggr, fields.p[n-1], fields.p[n-2]));
- }
- if(fields.len == 3 && streq(fields.p[1], "=")) { // generated from enumerated constants
- p = fields.p[2];
- if(p[xstrlen(p)-1] == ';')
- p[xstrlen(p)-1] = '\0';
- bwritestr(&out, bprintf(&b, "#define const_%s %s\n", fields.p[0], p));
- }
- }
-
- // Some #defines that are used for .c files.
- if(streq(goos, "windows")) {
- bwritestr(&out, bprintf(&b, "#define cb_max %d\n", MAXWINCB));
- }
-
- xgetenv(&exp, "GOEXPERIMENT");
- bwritestr(&out, bprintf(&b, "#define GOEXPERIMENT \"%s\"\n", bstr(&exp)));
-
- // Write both to file and to workdir/zasm_GOOS_GOARCH.h.
- writefile(&out, file, 0);
- writefile(&out, bprintf(&b, "%s/zasm_GOOS_GOARCH.h", workdir), 0);
-
- bfree(&in);
- bfree(&b);
- bfree(&b1);
- bfree(&out);
- bfree(&exp);
- vfree(&argv);
- vfree(&lines);
- vfree(&fields);
-}
-
// mkzsys writes zsys_$GOOS_$GOARCH.s,
// which contains arch or os specific asm code.
//
@@ -333,143 +167,3 @@ mkzsys(char *dir, char *file)
bfree(&out);
}
-
-static char *runtimedefs[] = {
- "defs.c",
- "malloc.c",
- "mcache.c",
- "mgc0.c",
- "proc.c",
- "parfor.c",
- "stack.c",
-};
-
-// mkzruntimedefs writes zruntime_defs_$GOOS_$GOARCH.h,
-// which contains Go struct definitions equivalent to the C ones.
-// Mostly we just write the output of 6c -q to the file.
-// However, we run it on multiple files, so we have to delete
-// the duplicated definitions, and we don't care about the funcs,
-// so we delete those too.
-//
-void
-mkzruntimedefs(char *dir, char *file)
-{
- int i, skip;
- char *p;
- Buf in, b, b1, out;
- Vec argv, lines, fields, seen;
-
- binit(&in);
- binit(&b);
- binit(&b1);
- binit(&out);
- vinit(&argv);
- vinit(&lines);
- vinit(&fields);
- vinit(&seen);
-
- bwritestr(&out, "// auto generated by go tool dist\n"
- "\n");
-
- if(streq(goos, "linux")) {
- bwritestr(&out, "// +build !android\n\n");
- }
-
- bwritestr(&out,
- "package runtime\n"
- "import \"unsafe\"\n"
- "var _ unsafe.Pointer\n"
- "\n"
- );
-
- // Do not emit definitions for these.
- vadd(&seen, "true");
- vadd(&seen, "false");
- vadd(&seen, "raceenabled");
- vadd(&seen, "allgs");
-
- // Run 6c -D GOOS_goos -D GOARCH_goarch -I workdir -q -n -o workdir/runtimedefs
- // on each of the runtimedefs C files.
- vadd(&argv, bpathf(&b, "%s/%sc", tooldir, gochar));
- vadd(&argv, "-D");
- vadd(&argv, bprintf(&b, "GOOS_%s", goos));
- vadd(&argv, "-D");
- vadd(&argv, bprintf(&b, "GOARCH_%s", goarch));
- vadd(&argv, "-I");
- vadd(&argv, bprintf(&b, "%s", workdir));
- vadd(&argv, "-I");
- vadd(&argv, bprintf(&b, "%s/pkg/%s_%s", goroot, goos, goarch));
- vadd(&argv, "-q");
- vadd(&argv, "-n");
- vadd(&argv, "-o");
- vadd(&argv, bpathf(&b, "%s/runtimedefs", workdir));
- vadd(&argv, "");
- p = argv.p[argv.len-1];
- for(i=0; i<nelem(runtimedefs); i++) {
- argv.p[argv.len-1] = runtimedefs[i];
- runv(nil, dir, CheckExit, &argv);
- readfile(&b, bpathf(&b1, "%s/runtimedefs", workdir));
- bwriteb(&in, &b);
- }
- argv.p[argv.len-1] = p;
-
- // Process the aggregate output.
- skip = 0;
- splitlines(&lines, bstr(&in));
- for(i=0; i<lines.len; i++) {
- p = lines.p[i];
- // Drop comment and func lines.
- if(hasprefix(p, "//") || hasprefix(p, "func"))
- continue;
-
- // Note beginning of type or var decl, which can be multiline.
- // Remove duplicates. The linear check of seen here makes the
- // whole processing quadratic in aggregate, but there are only
- // about 100 declarations, so this is okay (and simple).
- if(hasprefix(p, "type ") || hasprefix(p, "var ") || hasprefix(p, "const ")) {
- splitfields(&fields, p);
- if(fields.len < 2)
- continue;
- if(find(fields.p[1], seen.p, seen.len) >= 0) {
- if(streq(fields.p[fields.len-1], "{"))
- skip = 1; // skip until }
- continue;
- }
- vadd(&seen, fields.p[1]);
- }
-
- // Const lines are printed in original case (usually upper). Add a leading _ as needed.
- if(hasprefix(p, "const ")) {
- if('A' <= p[6] && p[6] <= 'Z')
- bwritestr(&out, "const _");
- else
- bwritestr(&out, "const ");
- bwritestr(&out, p+6);
- continue;
- }
-
- if(skip) {
- if(hasprefix(p, "}"))
- skip = 0;
- continue;
- }
-
- bwritestr(&out, p);
- }
-
- // Some windows specific const.
- if(streq(goos, "windows")) {
- bwritestr(&out, bprintf(&b, "const cb_max = %d\n", MAXWINCB));
- }
-
- writefile(&out, file, 0);
-
- bfree(&in);
- bfree(&b);
- bfree(&b1);
- bfree(&out);
- vfree(&argv);
- vfree(&lines);
- vfree(&fields);
- vfree(&seen);
-}
diff --git a/src/cmd/gc/export.c b/src/cmd/gc/export.c
index da5984ceb..aeee55236 100644
--- a/src/cmd/gc/export.c
+++ b/src/cmd/gc/export.c
@@ -7,6 +7,8 @@
#include "go.h"
#include "y.tab.h"
+static NodeList *asmlist;
+
static void dumpexporttype(Type *t);
// Mark n's symbol as exported
@@ -68,6 +70,11 @@ autoexport(Node *n, int ctxt)
// -A is for cmd/gc/mkbuiltin script, so export everything
if(debug['A'] || exportname(n->sym->name) || initname(n->sym->name))
exportsym(n);
+ if(asmhdr && n->sym->pkg == localpkg && !(n->sym->flags & SymAsm)) {
+ n->sym->flags |= SymAsm;
+ asmlist = list(asmlist, n);
+ }
+
}
static void
@@ -519,3 +526,37 @@ importtype(Type *pt, Type *t)
if(debug['E'])
print("import type %T %lT\n", pt, t);
}
+
+void
+dumpasmhdr(void)
+{
+ Biobuf *b;
+ NodeList *l;
+ Node *n;
+ Type *t;
+
+ b = Bopen(asmhdr, OWRITE);
+ if(b == nil)
+ fatal("open %s: %r", asmhdr);
+ Bprint(b, "// generated by %cg -asmhdr from package %s\n\n", thechar, localpkg->name);
+ for(l=asmlist; l; l=l->next) {
+ n = l->n;
+ if(isblanksym(n->sym))
+ continue;
+ switch(n->op) {
+ case OLITERAL:
+ Bprint(b, "#define const_%s %#V\n", n->sym->name, &n->val);
+ break;
+ case OTYPE:
+ t = n->type;
+ if(t->etype != TSTRUCT || t->map != T || t->funarg)
+ break;
+ for(t=t->type; t != T; t=t->down)
+ if(!isblanksym(t->sym))
+ Bprint(b, "#define %s_%s %d\n", n->sym->name, t->sym->name, (int)t->width);
+ break;
+ }
+ }
+
+ Bterm(b);
+}
diff --git a/src/cmd/gc/go.h b/src/cmd/gc/go.h
index c695c5bf3..5236305f8 100644
--- a/src/cmd/gc/go.h
+++ b/src/cmd/gc/go.h
@@ -382,6 +382,7 @@ enum
SymExported = 1<<2, // already written out by export
SymUniq = 1<<3,
SymSiggen = 1<<4,
+ SymAsm = 1<<5,
};
struct Sym
@@ -393,6 +394,7 @@ struct Sym
int32 npkg; // number of imported packages with this name
uint32 uniqgen;
Pkg* importdef; // where imported definition was found
+ char* linkname; // link name
// saved and restored by dcopy
Pkg* pkg;
@@ -860,6 +862,8 @@ EXTERN int32 lexlineno;
EXTERN int32 lineno;
EXTERN int32 prevlineno;
+EXTERN Fmt pragcgobuf;
+
EXTERN char* infile;
EXTERN char* outfile;
EXTERN Biobuf* bout;
@@ -890,6 +894,7 @@ EXTERN Pkg* typelinkpkg; // fake package for runtime type info (data)
EXTERN Pkg* weaktypepkg; // weak references to runtime type info
EXTERN Pkg* unsafepkg; // package unsafe
EXTERN Pkg* trackpkg; // fake package for field tracking
+EXTERN Pkg* rawpkg; // fake package for raw symbol names
EXTERN Pkg* phash[128];
EXTERN int tptr; // either TPTR32 or TPTR64
extern char* runtimeimport;
@@ -897,6 +902,7 @@ extern char* unsafeimport;
EXTERN char* myimportpath;
EXTERN Idir* idirs;
EXTERN char* localimport;
+EXTERN char* asmhdr;
EXTERN Type* types[NTYPE];
EXTERN Type* idealstring;
@@ -1147,6 +1153,7 @@ void escapes(NodeList*);
*/
void autoexport(Node *n, int ctxt);
void dumpexport(void);
+void dumpasmhdr(void);
int exportname(char *s);
void exportsym(Node *n);
void importconst(Sym *s, Type *t, Node *n);
diff --git a/src/cmd/gc/lex.c b/src/cmd/gc/lex.c
index 523ba37aa..2bd7adfb6 100644
--- a/src/cmd/gc/lex.c
+++ b/src/cmd/gc/lex.c
@@ -17,6 +17,8 @@ extern int yychar;
int yyprev;
int yylast;
+static int imported_unsafe;
+
static void lexinit(void);
static void lexinit1(void);
static void lexfini(void);
@@ -271,6 +273,9 @@ main(int argc, char *argv[])
flag_largemodel = 1;
setexp();
+
+ fmtstrinit(&pragcgobuf);
+ quotefmtinstall();
outfile = nil;
flagcount("+", "compiling runtime", &compiling_runtime);
@@ -289,6 +294,7 @@ main(int argc, char *argv[])
flagcount("S", "print assembly listing", &debug['S']);
flagfn0("V", "print compiler version", doversion);
flagcount("W", "debug parse tree after type checking", &debug['W']);
+ flagstr("asmhdr", "file: write assembly header to named file", &asmhdr);
flagcount("complete", "compiling complete package (no C or assembly)", &pure_go);
flagstr("d", "list: print debug information about items in list", &debugstr);
flagcount("e", "no limit on number of errors reported", &debug['e']);
@@ -403,6 +409,8 @@ main(int argc, char *argv[])
block = 1;
iota = -1000000;
+
+ imported_unsafe = 0;
yyparse();
if(nsyntaxerrors != 0)
@@ -509,6 +517,9 @@ main(int argc, char *argv[])
errorexit();
dumpobj();
+
+ if(asmhdr)
+ dumpasmhdr();
if(nerrors+nsavederrors)
errorexit();
@@ -724,6 +735,7 @@ importfile(Val *f, int line)
}
importpkg = mkpkg(f->u.sval);
cannedimports("unsafe.6", unsafeimport);
+ imported_unsafe = 1;
return;
}
@@ -1501,6 +1513,20 @@ caseout:
return LLITERAL;
}
+static void pragcgo(char*);
+
+static int
+more(char **pp)
+{
+ char *p;
+
+ p = *pp;
+ while(yy_isspace(*p))
+ p++;
+ *pp = p;
+ return *p != '\0';
+}
+
/*
* read and interpret syntax that looks like
* //line parse.y:15
@@ -1583,9 +1609,39 @@ go:
*cp++ = c;
}
*cp = 0;
+
+ if(strncmp(lexbuf, "go:cgo_", 7) == 0)
+ pragcgo(lexbuf);
+
ep = strchr(lexbuf, ' ');
if(ep != nil)
*ep = 0;
+
+ if(strcmp(lexbuf, "go:linkname") == 0) {
+ if(!imported_unsafe)
+ yyerror("//go:linkname only allowed in Go files that import \"unsafe\"");
+ if(ep == nil) {
+ yyerror("usage: //go:linkname localname linkname");
+ goto out;
+ }
+ cp = ep+1;
+ while(yy_isspace(*cp))
+ cp++;
+ ep = strchr(cp, ' ');
+ if(ep == nil) {
+ yyerror("usage: //go:linkname localname linkname");
+ goto out;
+ }
+ *ep++ = 0;
+ while(yy_isspace(*ep))
+ ep++;
+ if(*ep == 0) {
+ yyerror("usage: //go:linkname localname linkname");
+ goto out;
+ }
+ lookup(cp)->linkname = strdup(ep);
+ goto out;
+ }
if(strcmp(lexbuf, "go:nointerface") == 0 && fieldtrack_enabled) {
nointerface = 1;
@@ -1604,6 +1660,150 @@ out:
return c;
}
+static char*
+getimpsym(char **pp)
+{
+ char *p, *start;
+
+ more(pp); // skip spaces
+
+ p = *pp;
+ if(*p == '\0' || *p == '"')
+ return nil;
+
+ start = p;
+ while(*p != '\0' && !yy_isspace(*p) && *p != '"')
+ p++;
+ if(*p != '\0')
+ *p++ = '\0';
+
+ *pp = p;
+ return start;
+}
+
+static char*
+getquoted(char **pp)
+{
+ char *p, *start;
+
+ more(pp); // skip spaces
+
+ p = *pp;
+ if(*p != '"')
+ return nil;
+ p++;
+
+ start = p;
+ while(*p != '"') {
+ if(*p == '\0')
+ return nil;
+ p++;
+ }
+ *p++ = '\0';
+ *pp = p;
+ return start;
+}
+
+// Copied nearly verbatim from the C compiler's #pragma parser.
+// TODO: Rewrite more cleanly once the compiler is written in Go.
+static void
+pragcgo(char *text)
+{
+ char *local, *remote, *p, *q, *verb;
+
+ for(q=text; *q != '\0' && *q != ' '; q++)
+ ;
+ if(*q == ' ')
+ *q++ = '\0';
+
+ verb = text+3; // skip "go:"
+
+ if(strcmp(verb, "cgo_dynamic_linker") == 0 || strcmp(verb, "dynlinker") == 0) {
+ p = getquoted(&q);
+ if(p == nil)
+ goto err1;
+ fmtprint(&pragcgobuf, "cgo_dynamic_linker %q\n", p);
+ goto out;
+
+ err1:
+ yyerror("usage: //go:cgo_dynamic_linker \"path\"");
+ goto out;
+ }
+
+ if(strcmp(verb, "dynexport") == 0)
+ verb = "cgo_export_dynamic";
+ if(strcmp(verb, "cgo_export_static") == 0 || strcmp(verb, "cgo_export_dynamic") == 0) {
+ local = getimpsym(&q);
+ if(local == nil)
+ goto err2;
+ if(!more(&q)) {
+ fmtprint(&pragcgobuf, "%s %q\n", verb, local);
+ goto out;
+ }
+ remote = getimpsym(&q);
+ if(remote == nil)
+ goto err2;
+ fmtprint(&pragcgobuf, "%s %q %q\n", verb, local, remote);
+ goto out;
+
+ err2:
+ yyerror("usage: //go:%s local [remote]", verb);
+ goto out;
+ }
+
+ if(strcmp(verb, "cgo_import_dynamic") == 0 || strcmp(verb, "dynimport") == 0) {
+ local = getimpsym(&q);
+ if(local == nil)
+ goto err3;
+ if(!more(&q)) {
+ fmtprint(&pragcgobuf, "cgo_import_dynamic %q\n", local);
+ goto out;
+ }
+ remote = getimpsym(&q);
+ if(remote == nil)
+ goto err3;
+ if(!more(&q)) {
+ fmtprint(&pragcgobuf, "cgo_import_dynamic %q %q\n", local, remote);
+ goto out;
+ }
+ p = getquoted(&q);
+ if(p == nil)
+ goto err3;
+ fmtprint(&pragcgobuf, "cgo_import_dynamic %q %q %q\n", local, remote, p);
+ goto out;
+
+ err3:
+ yyerror("usage: //go:cgo_import_dynamic local [remote [\"library\"]]");
+ goto out;
+ }
+
+ if(strcmp(verb, "cgo_import_static") == 0) {
+ local = getimpsym(&q);
+ if(local == nil || more(&q))
+ goto err4;
+ fmtprint(&pragcgobuf, "cgo_import_static %q\n", local);
+ goto out;
+
+ err4:
+ yyerror("usage: //go:cgo_import_static local");
+ goto out;
+ }
+
+ if(strcmp(verb, "cgo_ldflag") == 0) {
+ p = getquoted(&q);
+ if(p == nil)
+ goto err5;
+ fmtprint(&pragcgobuf, "cgo_ldflag %q\n", p);
+ goto out;
+
+ err5:
+ yyerror("usage: //go:cgo_ldflag \"arg\"");
+ goto out;
+ }
+
+out:;
+}
+
int32
yylex(void)
{
diff --git a/src/cmd/gc/obj.c b/src/cmd/gc/obj.c
index b752a13ce..7e4e97854 100644
--- a/src/cmd/gc/obj.c
+++ b/src/cmd/gc/obj.c
@@ -67,6 +67,16 @@ dumpobj(void)
startobj = Boffset(bout);
Bprint(bout, "go object %s %s %s %s\n", getgoos(), getgoarch(), getgoversion(), expstring());
}
+
+ if(pragcgobuf.to > pragcgobuf.start) {
+ if(writearchive) {
+ // write empty export section; must be before cgo section
+ Bprint(bout, "\n$$\n\n$$\n\n");
+ }
+ Bprint(bout, "\n$$ // cgo\n");
+ Bprint(bout, "%s\n$$\n\n", fmtstrflush(&pragcgobuf));
+ }
+
Bprint(bout, "\n!\n");
@@ -153,6 +163,8 @@ linksym(Sym *s)
return s->lsym;
if(isblanksym(s))
s->lsym = linklookup(ctxt, "_", 0);
+ else if(s->linkname != nil)
+ s->lsym = linklookup(ctxt, s->linkname, 0);
else {
p = smprint("%s.%s", s->pkg->prefix, s->name);
s->lsym = linklookup(ctxt, p, 0);
diff --git a/src/cmd/gc/reflect.c b/src/cmd/gc/reflect.c
index 0f8802abc..4155953be 100644
--- a/src/cmd/gc/reflect.c
+++ b/src/cmd/gc/reflect.c
@@ -1318,7 +1318,7 @@ gengcmask(Type *t, uint8 gcmask[16])
{
Bvec *vec;
vlong xoffset, nptr, i, j;
- int half, mw;
+ int half;
uint8 bits, *pos;
memset(gcmask, 0, 16);
@@ -1335,7 +1335,6 @@ gengcmask(Type *t, uint8 gcmask[16])
pos = (uint8*)gcmask;
nptr = (t->width+widthptr-1)/widthptr;
half = 0;
- mw = 0;
// If number of words is odd, repeat the mask.
// This makes simpler handling of arrays in runtime.
for(j=0; j<=(nptr%2); j++) {
@@ -1344,9 +1343,8 @@ gengcmask(Type *t, uint8 gcmask[16])
// Some fake types (e.g. Hmap) has missing fileds.
// twobitwalktype1 generates BitsDead for that holes,
// replace BitsDead with BitsScalar.
- if(!mw && bits == BitsDead)
+ if(bits == BitsDead)
bits = BitsScalar;
- mw = !mw && bits == BitsMultiWord;
bits <<= 2;
if(half)
bits <<= 4;
diff --git a/src/cmd/gc/subr.c b/src/cmd/gc/subr.c
index c3bc5af3b..5e369b695 100644
--- a/src/cmd/gc/subr.c
+++ b/src/cmd/gc/subr.c
@@ -3802,39 +3802,25 @@ checknil(Node *x, NodeList **init)
/*
* Can this type be stored directly in an interface word?
+ * Yes, if the representation is a single pointer.
*/
int
isdirectiface(Type *t)
{
- // Setting IfacePointerOnly = 1 changes the
- // interface representation so that the data word
- // in an interface value must always be a pointer.
- // Setting it to 0 uses the original representation,
- // where the data word can hold a pointer or any
- // non-pointer value no bigger than a pointer.
- enum {
- IfacePointerOnly = 1,
- };
-
- if(IfacePointerOnly) {
- switch(t->etype) {
- case TPTR32:
- case TPTR64:
- case TCHAN:
- case TMAP:
- case TFUNC:
- case TUNSAFEPTR:
- return 1;
- case TARRAY:
- // Array of 1 direct iface type can be direct.
- return t->bound == 1 && isdirectiface(t->type);
- case TSTRUCT:
- // Struct with 1 field of direct iface type can be direct.
- return t->type != T && t->type->down == T && isdirectiface(t->type->type);
- }
- return 0;
+ switch(t->etype) {
+ case TPTR32:
+ case TPTR64:
+ case TCHAN:
+ case TMAP:
+ case TFUNC:
+ case TUNSAFEPTR:
+ return 1;
+ case TARRAY:
+ // Array of 1 direct iface type can be direct.
+ return t->bound == 1 && isdirectiface(t->type);
+ case TSTRUCT:
+ // Struct with 1 field of direct iface type can be direct.
+ return t->type != T && t->type->down == T && isdirectiface(t->type->type);
}
-
- dowidth(t);
- return t->width <= widthptr;
+ return 0;
}
diff --git a/src/cmd/go/build.go b/src/cmd/go/build.go
index 1dd4314da..5dcaa04a1 100644
--- a/src/cmd/go/build.go
+++ b/src/cmd/go/build.go
@@ -813,7 +813,7 @@ func (b *builder) build(a *action) (err error) {
}
if a.p.Standard && a.p.ImportPath == "runtime" && buildContext.Compiler == "gc" &&
- !hasString(a.p.HFiles, "zasm_"+buildContext.GOOS+"_"+buildContext.GOARCH+".h") {
+ !hasString(a.p.SFiles, "zsys_"+buildContext.GOOS+"_"+buildContext.GOARCH+".s") {
return fmt.Errorf("%s/%s must be bootstrapped using make%v", buildContext.GOOS, buildContext.GOARCH, defaultSuffix())
}
@@ -941,7 +941,7 @@ func (b *builder) build(a *action) (err error) {
inc := b.includeArgs("-I", a.deps)
// Compile Go.
- ofile, out, err := buildToolchain.gc(b, a.p, a.objpkg, obj, inc, gofiles)
+ ofile, out, err := buildToolchain.gc(b, a.p, a.objpkg, obj, len(sfiles) > 0, inc, gofiles)
if len(out) > 0 {
b.showOutput(a.p.Dir, a.p.ImportPath, b.processOutput(out))
if err != nil {
@@ -1550,7 +1550,7 @@ type toolchain interface {
// gc runs the compiler in a specific directory on a set of files
// and returns the name of the generated output file.
// The compiler runs in the directory dir.
- gc(b *builder, p *Package, archive, obj string, importArgs []string, gofiles []string) (ofile string, out []byte, err error)
+ gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error)
// cc runs the toolchain's C compiler in a directory on a C file
// to produce an output file.
cc(b *builder, p *Package, objdir, ofile, cfile string) error
@@ -1587,7 +1587,7 @@ func (noToolchain) linker() string {
return ""
}
-func (noToolchain) gc(b *builder, p *Package, archive, obj string, importArgs []string, gofiles []string) (ofile string, out []byte, err error) {
+func (noToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error) {
return "", nil, noCompiler()
}
@@ -1623,7 +1623,7 @@ func (gcToolchain) linker() string {
return tool(archChar + "l")
}
-func (gcToolchain) gc(b *builder, p *Package, archive, obj string, importArgs []string, gofiles []string) (ofile string, output []byte, err error) {
+func (gcToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) {
if archive != "" {
ofile = archive
} else {
@@ -1660,6 +1660,9 @@ func (gcToolchain) gc(b *builder, p *Package, archive, obj string, importArgs []
if ofile == archive {
args = append(args, "-pack")
}
+ if asmhdr {
+ args = append(args, "-asmhdr", obj+"go_asm.h")
+ }
for _, f := range gofiles {
args = append(args, mkAbs(p.Dir, f))
}
@@ -1824,18 +1827,7 @@ func (gcToolchain) ld(b *builder, p *Package, out string, allactions []*action,
}
func (gcToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error {
- inc := filepath.Join(goroot, "pkg", fmt.Sprintf("%s_%s", goos, goarch))
- cfile = mkAbs(p.Dir, cfile)
- warn := []string{"-w"}
- if p.usesSwig() {
- // When using SWIG, this compiler is only used to
- // compile the C files generated by SWIG.
- // We don't want warnings.
- // See issue 9065 for details.
- warn = nil
- }
- args := stringList(tool(archChar+"c"), "-F", "-V", warn, "-trimpath", b.work, "-I", objdir, "-I", inc, "-o", ofile, buildCcflags, "-D", "GOOS_"+goos, "-D", "GOARCH_"+goarch, cfile)
- return b.run(p.Dir, p.ImportPath, nil, args)
+ return fmt.Errorf("%s: C source files not supported without cgo", mkAbs(p.Dir, cfile))
}
// The Gccgo toolchain.
@@ -1859,7 +1851,7 @@ func (gccgoToolchain) linker() string {
return gccgoBin
}
-func (gccgoToolchain) gc(b *builder, p *Package, archive, obj string, importArgs []string, gofiles []string) (ofile string, output []byte, err error) {
+func (gccgoToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) {
out := "_go_.o"
ofile = obj + out
gcargs := []string{"-g"}
@@ -2225,11 +2217,14 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, gccfi
outGo = append(outGo, gofiles...)
// cc _cgo_defun.c
- defunObj := obj + "_cgo_defun." + objExt
- if err := buildToolchain.cc(b, p, obj, defunObj, defunC); err != nil {
- return nil, nil, err
+ _, gccgo := buildToolchain.(gccgoToolchain)
+ if gccgo {
+ defunObj := obj + "_cgo_defun." + objExt
+ if err := buildToolchain.cc(b, p, obj, defunObj, defunC); err != nil {
+ return nil, nil, err
+ }
+ outObj = append(outObj, defunObj)
}
- outObj = append(outObj, defunObj)
// gcc
var linkobj []string
@@ -2343,20 +2338,15 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, gccfi
}
// cgo -dynimport
- importC := obj + "_cgo_import.c"
+ importGo := obj + "_cgo_import.go"
cgoflags = []string{}
if p.Standard && p.ImportPath == "runtime/cgo" {
cgoflags = append(cgoflags, "-dynlinker") // record path to dynamic linker
}
- if err := b.run(p.Dir, p.ImportPath, nil, cgoExe, "-objdir", obj, "-dynimport", dynobj, "-dynout", importC, cgoflags); err != nil {
- return nil, nil, err
- }
-
- // cc _cgo_import.ARCH
- importObj := obj + "_cgo_import." + objExt
- if err := buildToolchain.cc(b, p, obj, importObj, importC); err != nil {
+ if err := b.run(p.Dir, p.ImportPath, nil, cgoExe, "-objdir", obj, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags); err != nil {
return nil, nil, err
}
+ outGo = append(outGo, importGo)
ofile := obj + "_all.o"
var gccObjs, nonGccObjs []string
@@ -2390,7 +2380,7 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, gccfi
// NOTE(rsc): The importObj is a 5c/6c/8c object and on Windows
// must be processed before the gcc-generated objects.
// Put it first. http://golang.org/issue/2601
- outObj = stringList(importObj, nonGccObjs, ofile)
+ outObj = stringList(nonGccObjs, ofile)
return outGo, outObj, nil
}
@@ -2526,7 +2516,7 @@ func (b *builder) swigIntSize(obj string) (intsize string, err error) {
p := goFilesPackage(srcs)
- if _, _, e := buildToolchain.gc(b, p, "", obj, nil, srcs); e != nil {
+ if _, _, e := buildToolchain.gc(b, p, "", obj, false, nil, srcs); e != nil {
return "32", nil
}
return "64", nil
diff --git a/src/liblink/asm6.c b/src/liblink/asm6.c
index 428eb9442..7971022b5 100644
--- a/src/liblink/asm6.c
+++ b/src/liblink/asm6.c
@@ -1543,9 +1543,8 @@ static vlong vaddr(Link*, Addr*, Reloc*);
static int
isextern(LSym *s)
{
- // All the Solaris dynamic imports from libc.so begin with "libc·", which
- // the compiler rewrites to "libc." by the time liblink gets it.
- return strncmp(s->name, "libc.", 5) == 0;
+ // All the Solaris dynamic imports from libc.so begin with "libc_".
+ return strncmp(s->name, "libc_", 5) == 0;
}
// single-instruction no-ops of various lengths.
diff --git a/src/liblink/objfile.c b/src/liblink/objfile.c
index 6d8694953..aa701f459 100644
--- a/src/liblink/objfile.c
+++ b/src/liblink/objfile.c
@@ -551,9 +551,10 @@ ldobjfile(Link *ctxt, Biobuf *f, char *pkg, int64 len, char *pn)
static void
readsym(Link *ctxt, Biobuf *f, char *pkg, char *pn)
{
- int i, j, c, t, v, n, size, dupok;
+ int i, j, c, t, v, n, ndata, nreloc, size, dupok;
static int ndup;
char *name;
+ uchar *data;
Reloc *r;
LSym *s, *dup, *typ;
Pcln *pc;
@@ -569,12 +570,24 @@ readsym(Link *ctxt, Biobuf *f, char *pkg, char *pn)
dupok = rdint(f);
dupok &= 1;
size = rdint(f);
+ typ = rdsym(ctxt, f, pkg);
+ rddata(f, &data, &ndata);
+ nreloc = rdint(f);
if(v != 0)
v = ctxt->version;
s = linklookup(ctxt, name, v);
dup = nil;
if(s->type != 0 && s->type != SXREF) {
+ if((t == SDATA || t == SBSS || t == SNOPTRBSS) && ndata == 0 && nreloc == 0) {
+ if(s->size < size)
+ s->size = size;
+ if(typ != nil && s->gotype == nil)
+ s->gotype = typ;
+ return;
+ }
+ if((s->type == SDATA || s->type == SBSS || s->type == SNOPTRBSS) && s->np == 0 && s->nr == 0)
+ goto overwrite;
if(s->type != SBSS && s->type != SNOPTRBSS && !dupok && !s->dupok)
sysfatal("duplicate symbol %s (types %d and %d) in %s and %s", s->name, s->type, t, s->file, pn);
if(s->np > 0) {
@@ -582,28 +595,30 @@ readsym(Link *ctxt, Biobuf *f, char *pkg, char *pn)
s = linknewsym(ctxt, ".dup", ndup++); // scratch
}
}
+overwrite:
s->file = pkg;
s->dupok = dupok;
if(t == SXREF)
sysfatal("bad sxref");
if(t == 0)
sysfatal("missing type for %s in %s", name, pn);
+ if(t == SBSS && (s->type == SRODATA || s->type == SNOPTRBSS))
+ t = s->type;
s->type = t;
if(s->size < size)
s->size = size;
- typ = rdsym(ctxt, f, pkg);
if(typ != nil) // if bss sym defined multiple times, take type from any one def
s->gotype = typ;
if(dup != nil && typ != nil)
dup->gotype = typ;
- rddata(f, &s->p, &s->np);
+ s->p = data;
+ s->np = ndata;
s->maxp = s->np;
- n = rdint(f);
- if(n > 0) {
- s->r = emallocz(n * sizeof s->r[0]);
- s->nr = n;
- s->maxr = n;
- for(i=0; i<n; i++) {
+ if(nreloc > 0) {
+ s->r = emallocz(nreloc * sizeof s->r[0]);
+ s->nr = nreloc;
+ s->maxr = nreloc;
+ for(i=0; i<nreloc; i++) {
r = &s->r[i];
r->off = rdint(f);
r->siz = rdint(f);
diff --git a/src/reflect/type.go b/src/reflect/type.go
index 2064922f6..e05a3f9d1 100644
--- a/src/reflect/type.go
+++ b/src/reflect/type.go
@@ -1588,9 +1588,8 @@ func (gc *gcProg) align(a uintptr) {
// These constants must stay in sync with ../runtime/mgc0.h.
const (
- bitsScalar = 1
- bitsPointer = 2
- bitsMultiWord = 3
+ bitsScalar = 1
+ bitsPointer = 2
bitsIface = 2
bitsEface = 3
diff --git a/src/run.bash b/src/run.bash
index 91f12a174..54b209591 100755
--- a/src/run.bash
+++ b/src/run.bash
@@ -244,8 +244,8 @@ rm -f runtest
[ "$GOOS" == nacl ] ||
(
echo
-echo '# Checking API compatibility.'
-time go run $GOROOT/src/cmd/api/run.go || exit 1
+echo '# SKIPPING API CHECK UNTIL ALL SYSTEMS BUILD.'
+# time go run $GOROOT/src/cmd/api/run.go || exit 1
) || exit $?
echo
diff --git a/src/runtime/alg.go b/src/runtime/alg.go
index e9ed59503..e367bc5b2 100644
--- a/src/runtime/alg.go
+++ b/src/runtime/alg.go
@@ -314,9 +314,6 @@ const hashRandomBytes = 32
var aeskeysched [hashRandomBytes]byte
-//go:noescape
-func get_random_data(rnd *unsafe.Pointer, n *int32)
-
func init() {
if theGoos == "nacl" {
return
diff --git a/src/runtime/arch1_386.go b/src/runtime/arch1_386.go
new file mode 100644
index 000000000..7746dfbf0
--- /dev/null
+++ b/src/runtime/arch1_386.go
@@ -0,0 +1,15 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ thechar = '8'
+ _BigEndian = 0
+ _CacheLineSize = 64
+ _RuntimeGogoBytes = 64
+ _PhysPageSize = _NaCl*65536 + (1-_NaCl)*4096 // 4k normally; 64k on NaCl
+ _PCQuantum = 1
+ _Int64Align = 4
+)
diff --git a/src/runtime/arch1_amd64.go b/src/runtime/arch1_amd64.go
new file mode 100644
index 000000000..83c9c2dc9
--- /dev/null
+++ b/src/runtime/arch1_amd64.go
@@ -0,0 +1,15 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ thechar = '6'
+ _BigEndian = 0
+ _CacheLineSize = 64
+ _RuntimeGogoBytes = 64 + (_Plan9|_Solaris|_Windows)*16
+ _PhysPageSize = 4096
+ _PCQuantum = 1
+ _Int64Align = 8
+)
diff --git a/src/runtime/arch1_arm.go b/src/runtime/arch1_arm.go
new file mode 100644
index 000000000..5cb79fd68
--- /dev/null
+++ b/src/runtime/arch1_arm.go
@@ -0,0 +1,15 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ thechar = '5'
+ _BigEndian = 0
+ _CacheLineSize = 32
+ _RuntimeGogoBytes = 60
+ _PhysPageSize = 65536*_NaCl + 4096*(1-_NaCl)
+ _PCQuantum = 4
+ _Int64Align = 4
+)
diff --git a/src/runtime/arch_386.h b/src/runtime/arch_386.h
deleted file mode 100644
index 75a5ba77f..000000000
--- a/src/runtime/arch_386.h
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-enum {
- thechar = '8',
- BigEndian = 0,
- CacheLineSize = 64,
- RuntimeGogoBytes = 64,
-#ifdef GOOS_nacl
- PhysPageSize = 65536,
-#else
- PhysPageSize = 4096,
-#endif
- PCQuantum = 1,
- Int64Align = 4
-};
diff --git a/src/runtime/arch_amd64.h b/src/runtime/arch_amd64.h
deleted file mode 100644
index d7b81ee90..000000000
--- a/src/runtime/arch_amd64.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-enum {
- thechar = '6',
- BigEndian = 0,
- CacheLineSize = 64,
-#ifdef GOOS_solaris
- RuntimeGogoBytes = 80,
-#else
-#ifdef GOOS_windows
- RuntimeGogoBytes = 80,
-#else
-#ifdef GOOS_plan9
- RuntimeGogoBytes = 80,
-#else
- RuntimeGogoBytes = 64,
-#endif // Plan 9
-#endif // Windows
-#endif // Solaris
- PhysPageSize = 4096,
- PCQuantum = 1,
- Int64Align = 8
-};
diff --git a/src/runtime/arch_arm.h b/src/runtime/arch_arm.h
deleted file mode 100644
index 637a334a0..000000000
--- a/src/runtime/arch_arm.h
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-enum {
- thechar = '5',
- BigEndian = 0,
- CacheLineSize = 32,
- RuntimeGogoBytes = 60,
-#ifdef GOOS_nacl
- PhysPageSize = 65536,
-#else
- PhysPageSize = 4096,
-#endif
- PCQuantum = 4,
- Int64Align = 4
-};
diff --git a/src/runtime/asm.s b/src/runtime/asm.s
index e6d782f37..f1c812b90 100644
--- a/src/runtime/asm.s
+++ b/src/runtime/asm.s
@@ -12,3 +12,8 @@ DATA runtime·no_pointers_stackmap+0x00(SB)/4, $2
DATA runtime·no_pointers_stackmap+0x04(SB)/4, $0
GLOBL runtime·no_pointers_stackmap(SB),RODATA, $8
+TEXT runtime·nop(SB),NOSPLIT,$0-0
+ RET
+
+GLOBL runtime·mheap_(SB), NOPTR, $0
+GLOBL runtime·memstats(SB), NOPTR, $0
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index 501e64b09..a02bb5556 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "funcdata.h"
#include "textflag.h"
@@ -49,7 +50,7 @@ nocpuinfo:
// update stackguard after _cgo_init
MOVL $runtime·g0(SB), CX
MOVL (g_stack+stack_lo)(CX), AX
- ADDL $const_StackGuard, AX
+ ADDL $const__StackGuard, AX
MOVL AX, g_stackguard0(CX)
MOVL AX, g_stackguard1(CX)
@@ -199,62 +200,49 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
JMP AX
RET
-// switchtoM is a dummy routine that onM leaves at the bottom
+// systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that
// lives at the bottom of the G stack from the one that lives
-// at the top of the M stack because the one at the top of
-// the M stack terminates the stack walk (see topofstack()).
-TEXT runtime·switchtoM(SB), NOSPLIT, $0-0
+// at the top of the system stack because the one at the top of
+// the system stack terminates the stack walk (see topofstack()).
+TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
RET
-// func onM_signalok(fn func())
-TEXT runtime·onM_signalok(SB), NOSPLIT, $0-4
+// func systemstack(fn func())
+TEXT runtime·systemstack(SB), NOSPLIT, $0-4
+ MOVL fn+0(FP), DI // DI = fn
get_tls(CX)
MOVL g(CX), AX // AX = g
MOVL g_m(AX), BX // BX = m
+
MOVL m_gsignal(BX), DX // DX = gsignal
CMPL AX, DX
- JEQ ongsignal
- JMP runtime·onM(SB)
-
-ongsignal:
- MOVL fn+0(FP), DI // DI = fn
- MOVL DI, DX
- MOVL 0(DI), DI
- CALL DI
- RET
-
-// func onM(fn func())
-TEXT runtime·onM(SB), NOSPLIT, $0-4
- MOVL fn+0(FP), DI // DI = fn
- get_tls(CX)
- MOVL g(CX), AX // AX = g
- MOVL g_m(AX), BX // BX = m
+ JEQ noswitch
MOVL m_g0(BX), DX // DX = g0
CMPL AX, DX
- JEQ onm
+ JEQ noswitch
MOVL m_curg(BX), BP
CMPL AX, BP
- JEQ oncurg
+ JEQ switch
- // Not g0, not curg. Must be gsignal, but that's not allowed.
+ // Bad: g is not gsignal, not g0, not curg. What is it?
// Hide call from linker nosplit analysis.
- MOVL $runtime·badonm(SB), AX
+ MOVL $runtime·badsystemstack(SB), AX
CALL AX
-oncurg:
+switch:
// save our state in g->sched. Pretend to
- // be switchtoM if the G stack is scanned.
- MOVL $runtime·switchtoM(SB), (g_sched+gobuf_pc)(AX)
+ // be systemstack_switch if the G stack is scanned.
+ MOVL $runtime·systemstack_switch(SB), (g_sched+gobuf_pc)(AX)
MOVL SP, (g_sched+gobuf_sp)(AX)
MOVL AX, (g_sched+gobuf_g)(AX)
// switch to g0
MOVL DX, g(CX)
MOVL (g_sched+gobuf_sp)(DX), BX
- // make it look like mstart called onM on g0, to stop traceback
+ // make it look like mstart called systemstack on g0, to stop traceback
SUBL $4, BX
MOVL $runtime·mstart(SB), DX
MOVL DX, 0(BX)
@@ -275,8 +263,8 @@ oncurg:
MOVL $0, (g_sched+gobuf_sp)(AX)
RET
-onm:
- // already on m stack, just call directly
+noswitch:
+ // already on system stack, just call directly
MOVL DI, DX
MOVL 0(DI), DI
CALL DI
@@ -740,7 +728,7 @@ needm:
// the same SP back to m->sched.sp. That seems redundant,
// but if an unrecovered panic happens, unwindm will
// restore the g->sched.sp from the stack location
- // and then onM will try to use it. If we don't set it here,
+ // and then systemstack will try to use it. If we don't set it here,
// that restored SP will be uninitialized (typically 0) and
// will not be usable.
MOVL m_g0(BP), SI
@@ -2290,3 +2278,10 @@ TEXT _cgo_topofstack(SB),NOSPLIT,$0
TEXT runtime·goexit(SB),NOSPLIT,$0-0
BYTE $0x90 // NOP
CALL runtime·goexit1(SB) // does not return
+
+TEXT runtime·getg(SB),NOSPLIT,$0-4
+ get_tls(CX)
+ MOVL g(CX), AX
+ MOVL AX, ret+0(FP)
+ RET
+
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 1aa2d71a8..6e3f5ff6c 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "funcdata.h"
#include "textflag.h"
@@ -47,7 +48,7 @@ nocpuinfo:
// update stackguard after _cgo_init
MOVQ $runtime·g0(SB), CX
MOVQ (g_stack+stack_lo)(CX), AX
- ADDQ $const_StackGuard, AX
+ ADDQ $const__StackGuard, AX
MOVQ AX, g_stackguard0(CX)
MOVQ AX, g_stackguard1(CX)
@@ -189,55 +190,41 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8
JMP AX
RET
-// switchtoM is a dummy routine that onM leaves at the bottom
+// systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that
// lives at the bottom of the G stack from the one that lives
-// at the top of the M stack because the one at the top of
-// the M stack terminates the stack walk (see topofstack()).
-TEXT runtime·switchtoM(SB), NOSPLIT, $0-0
+// at the top of the system stack because the one at the top of
+// the system stack terminates the stack walk (see topofstack()).
+TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
RET
-// func onM_signalok(fn func())
-TEXT runtime·onM_signalok(SB), NOSPLIT, $0-8
+// func systemstack(fn func())
+TEXT runtime·systemstack(SB), NOSPLIT, $0-8
+ MOVQ fn+0(FP), DI // DI = fn
get_tls(CX)
MOVQ g(CX), AX // AX = g
MOVQ g_m(AX), BX // BX = m
+
MOVQ m_gsignal(BX), DX // DX = gsignal
CMPQ AX, DX
- JEQ ongsignal
- JMP runtime·onM(SB)
-
-ongsignal:
- MOVQ fn+0(FP), DI // DI = fn
- MOVQ DI, DX
- MOVQ 0(DI), DI
- CALL DI
- RET
-
-// func onM(fn func())
-TEXT runtime·onM(SB), NOSPLIT, $0-8
- MOVQ fn+0(FP), DI // DI = fn
- get_tls(CX)
- MOVQ g(CX), AX // AX = g
- MOVQ g_m(AX), BX // BX = m
+ JEQ noswitch
MOVQ m_g0(BX), DX // DX = g0
CMPQ AX, DX
- JEQ onm
+ JEQ noswitch
MOVQ m_curg(BX), BP
CMPQ AX, BP
- JEQ oncurg
+ JEQ switch
- // Not g0, not curg. Must be gsignal, but that's not allowed.
- // Hide call from linker nosplit analysis.
- MOVQ $runtime·badonm(SB), AX
+ // Bad: g is not gsignal, not g0, not curg. What is it?
+ MOVQ $runtime·badsystemstack(SB), AX
CALL AX
-oncurg:
+switch:
// save our state in g->sched. Pretend to
- // be switchtoM if the G stack is scanned.
- MOVQ $runtime·switchtoM(SB), BP
+ // be systemstack_switch if the G stack is scanned.
+ MOVQ $runtime·systemstack_switch(SB), BP
MOVQ BP, (g_sched+gobuf_pc)(AX)
MOVQ SP, (g_sched+gobuf_sp)(AX)
MOVQ AX, (g_sched+gobuf_g)(AX)
@@ -245,7 +232,7 @@ oncurg:
// switch to g0
MOVQ DX, g(CX)
MOVQ (g_sched+gobuf_sp)(DX), BX
- // make it look like mstart called onM on g0, to stop traceback
+ // make it look like mstart called systemstack on g0, to stop traceback
SUBQ $8, BX
MOVQ $runtime·mstart(SB), DX
MOVQ DX, 0(BX)
@@ -266,7 +253,7 @@ oncurg:
MOVQ $0, (g_sched+gobuf_sp)(AX)
RET
-onm:
+noswitch:
// already on m stack, just call directly
MOVQ DI, DX
MOVQ 0(DI), DI
@@ -726,7 +713,7 @@ needm:
// the same SP back to m->sched.sp. That seems redundant,
// but if an unrecovered panic happens, unwindm will
// restore the g->sched.sp from the stack location
- // and then onM will try to use it. If we don't set it here,
+ // and then systemstack will try to use it. If we don't set it here,
// that restored SP will be uninitialized (typically 0) and
// will not be usable.
MOVQ m_g0(BP), SI
@@ -2235,3 +2222,9 @@ TEXT _cgo_topofstack(SB),NOSPLIT,$0
TEXT runtime·goexit(SB),NOSPLIT,$0-0
BYTE $0x90 // NOP
CALL runtime·goexit1(SB) // does not return
+
+TEXT runtime·getg(SB),NOSPLIT,$0-8
+ get_tls(CX)
+ MOVQ g(CX), AX
+ MOVQ AX, ret+0(FP)
+ RET
diff --git a/src/runtime/asm_amd64p32.s b/src/runtime/asm_amd64p32.s
index 153564b14..cead3cd07 100644
--- a/src/runtime/asm_amd64p32.s
+++ b/src/runtime/asm_amd64p32.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "funcdata.h"
#include "textflag.h"
@@ -164,55 +165,42 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
JMP AX
RET
-// switchtoM is a dummy routine that onM leaves at the bottom
+// systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that
// lives at the bottom of the G stack from the one that lives
-// at the top of the M stack because the one at the top of
+// at the top of the system stack because the one at the top of
// the M stack terminates the stack walk (see topofstack()).
-TEXT runtime·switchtoM(SB), NOSPLIT, $0-0
+TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
RET
-// func onM_signalok(fn func())
-TEXT runtime·onM_signalok(SB), NOSPLIT, $0-4
+// func systemstack(fn func())
+TEXT runtime·systemstack(SB), NOSPLIT, $0-4
+ MOVL fn+0(FP), DI // DI = fn
get_tls(CX)
MOVL g(CX), AX // AX = g
MOVL g_m(AX), BX // BX = m
+
MOVL m_gsignal(BX), DX // DX = gsignal
CMPL AX, DX
- JEQ ongsignal
- JMP runtime·onM(SB)
-
-ongsignal:
- MOVL fn+0(FP), DI // DI = fn
- MOVL DI, DX
- MOVL 0(DI), DI
- CALL DI
- RET
-
-// func onM(fn func())
-TEXT runtime·onM(SB), NOSPLIT, $0-4
- MOVL fn+0(FP), DI // DI = fn
- get_tls(CX)
- MOVL g(CX), AX // AX = g
- MOVL g_m(AX), BX // BX = m
+ JEQ noswitch
MOVL m_g0(BX), DX // DX = g0
CMPL AX, DX
- JEQ onm
+ JEQ noswitch
MOVL m_curg(BX), R8
CMPL AX, R8
- JEQ oncurg
+ JEQ switch
// Not g0, not curg. Must be gsignal, but that's not allowed.
// Hide call from linker nosplit analysis.
- MOVL $runtime·badonm(SB), AX
+ MOVL $runtime·badsystemstack(SB), AX
CALL AX
-oncurg:
+switch:
// save our state in g->sched. Pretend to
- // be switchtoM if the G stack is scanned.
- MOVL $runtime·switchtoM(SB), SI
+ // be systemstack_switch if the G stack is scanned.
+ MOVL $runtime·systemstack_switch(SB), SI
MOVL SI, (g_sched+gobuf_pc)(AX)
MOVL SP, (g_sched+gobuf_sp)(AX)
MOVL AX, (g_sched+gobuf_g)(AX)
@@ -236,7 +224,7 @@ oncurg:
MOVL $0, (g_sched+gobuf_sp)(AX)
RET
-onm:
+noswitch:
// already on m stack, just call directly
MOVL DI, DX
MOVL 0(DI), DI
@@ -1085,3 +1073,9 @@ TEXT runtime·return0(SB), NOSPLIT, $0
TEXT runtime·goexit(SB),NOSPLIT,$0-0
BYTE $0x90 // NOP
CALL runtime·goexit1(SB) // does not return
+
+TEXT runtime·getg(SB),NOSPLIT,$0-4
+ get_tls(CX)
+ MOVL g(CX), AX
+ MOVL AX, ret+0(FP)
+ RET
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index 58aebf388..583c7ba50 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "funcdata.h"
#include "textflag.h"
@@ -54,7 +55,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$-4
nocgo:
// update stackguard after _cgo_init
MOVW (g_stack+stack_lo)(g), R0
- ADD $const_StackGuard, R0
+ ADD $const__StackGuard, R0
MOVW R0, g_stackguard0(g)
MOVW R0, g_stackguard1(g)
@@ -190,53 +191,42 @@ TEXT runtime·mcall(SB),NOSPLIT,$-4-4
B runtime·badmcall2(SB)
RET
-// switchtoM is a dummy routine that onM leaves at the bottom
+// systemstack_switch is a dummy routine that systemstack leaves at the bottom
// of the G stack. We need to distinguish the routine that
// lives at the bottom of the G stack from the one that lives
-// at the top of the M stack because the one at the top of
-// the M stack terminates the stack walk (see topofstack()).
-TEXT runtime·switchtoM(SB),NOSPLIT,$0-0
+// at the top of the system stack because the one at the top of
+// the system stack terminates the stack walk (see topofstack()).
+TEXT runtime·systemstack_switch(SB),NOSPLIT,$0-0
MOVW $0, R0
BL (R0) // clobber lr to ensure push {lr} is kept
RET
-// func onM_signalok(fn func())
-TEXT runtime·onM_signalok(SB), NOSPLIT, $-4-4
- MOVW g_m(g), R1
- MOVW m_gsignal(R1), R2
- CMP g, R2
- B.EQ ongsignal
- B runtime·onM(SB)
-
-ongsignal:
- MOVW fn+0(FP), R0
- MOVW R0, R7
- MOVW 0(R0), R0
- BL (R0)
- RET
-
-// func onM(fn func())
-TEXT runtime·onM(SB),NOSPLIT,$0-4
+// func systemstack(fn func())
+TEXT runtime·systemstack(SB),NOSPLIT,$0-4
MOVW fn+0(FP), R0 // R0 = fn
MOVW g_m(g), R1 // R1 = m
+ MOVW m_gsignal(R1), R2 // R2 = gsignal
+ CMP g, R2
+ B.EQ noswitch
+
MOVW m_g0(R1), R2 // R2 = g0
CMP g, R2
- B.EQ onm
+ B.EQ noswitch
MOVW m_curg(R1), R3
CMP g, R3
- B.EQ oncurg
+ B.EQ switch
- // Not g0, not curg. Must be gsignal, but that's not allowed.
+ // Bad: g is not gsignal, not g0, not curg. What is it?
// Hide call from linker nosplit analysis.
- MOVW $runtime·badonm(SB), R0
+ MOVW $runtime·badsystemstack(SB), R0
BL (R0)
-oncurg:
+switch:
// save our state in g->sched. Pretend to
- // be switchtoM if the G stack is scanned.
- MOVW $runtime·switchtoM(SB), R3
+ // be systemstack_switch if the G stack is scanned.
+ MOVW $runtime·systemstack_switch(SB), R3
ADD $4, R3, R3 // get past push {lr}
MOVW R3, (g_sched+gobuf_pc)(g)
MOVW SP, (g_sched+gobuf_sp)(g)
@@ -249,7 +239,7 @@ oncurg:
BL setg<>(SB)
MOVW R5, R0
MOVW (g_sched+gobuf_sp)(R2), R3
- // make it look like mstart called onM on g0, to stop traceback
+ // make it look like mstart called systemstack on g0, to stop traceback
SUB $4, R3, R3
MOVW $runtime·mstart(SB), R4
MOVW R4, 0(R3)
@@ -269,7 +259,7 @@ oncurg:
MOVW R3, (g_sched+gobuf_sp)(g)
RET
-onm:
+noswitch:
MOVW R0, R7
MOVW 0(R0), R0
BL (R0)
@@ -564,7 +554,7 @@ TEXT ·cgocallback_gofunc(SB),NOSPLIT,$8-12
// the same SP back to m->sched.sp. That seems redundant,
// but if an unrecovered panic happens, unwindm will
// restore the g->sched.sp from the stack location
- // and then onM will try to use it. If we don't set it here,
+ // and then systemstack will try to use it. If we don't set it here,
// that restored SP will be uninitialized (typically 0) and
// will not be usable.
MOVW g_m(g), R8
@@ -1326,3 +1316,7 @@ TEXT _cgo_topofstack(SB),NOSPLIT,$8
TEXT runtime·goexit(SB),NOSPLIT,$-4-0
MOVW R0, R0 // NOP
BL runtime·goexit1(SB) // does not return
+
+TEXT runtime·getg(SB),NOSPLIT,$-4-4
+ MOVW g, ret+0(FP)
+ RET
diff --git a/src/runtime/atomic.go b/src/runtime/atomic.go
deleted file mode 100644
index a0e4d84e9..000000000
--- a/src/runtime/atomic.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !arm
-
-package runtime
-
-import "unsafe"
-
-//go:noescape
-func xadd(ptr *uint32, delta int32) uint32
-
-//go:noescape
-func xadd64(ptr *uint64, delta int64) uint64
-
-//go:noescape
-func xchg(ptr *uint32, new uint32) uint32
-
-//go:noescape
-func xchg64(ptr *uint64, new uint64) uint64
-
-// Cannot use noescape here: ptr does not but new does escape.
-// Instead use noescape(ptr) in wrapper below.
-func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
-
-//go:nosplit
-func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
- old := xchgp1(noescape(ptr), new)
- writebarrierptr_nostore((*uintptr)(ptr), uintptr(new))
- return old
-}
-
-//go:noescape
-func xchguintptr(ptr *uintptr, new uintptr) uintptr
-
-//go:noescape
-func atomicload(ptr *uint32) uint32
-
-//go:noescape
-func atomicload64(ptr *uint64) uint64
-
-//go:noescape
-func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer
-
-//go:noescape
-func atomicor8(ptr *uint8, val uint8)
-
-//go:noescape
-func cas64(ptr *uint64, old, new uint64) bool
-
-//go:noescape
-func atomicstore(ptr *uint32, val uint32)
-
-//go:noescape
-func atomicstore64(ptr *uint64, val uint64)
-
-// Cannot use noescape here: ptr does not but val does escape.
-// Instead use noescape(ptr) in wrapper below.
-func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
-
-//go:nosplit
-func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer) {
- atomicstorep1(noescape(ptr), val)
- // TODO(rsc): Why does the compiler think writebarrierptr_nostore's dst argument escapes?
- writebarrierptr_nostore((*uintptr)(noescape(ptr)), uintptr(val))
-}
-
-// Cannot use noescape here: ptr does not but new does escape.
-// Instead use noescape(ptr) in wrapper below.
-func casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
-
-//go:nosplit
-func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
- ok := casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), old, new)
- if !ok {
- return false
- }
- writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
- return true
-}
diff --git a/src/runtime/atomic_386.c b/src/runtime/atomic_386.c
deleted file mode 100644
index 82d36f2d9..000000000
--- a/src/runtime/atomic_386.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "textflag.h"
-
-#pragma textflag NOSPLIT
-uint32
-runtime·atomicload(uint32 volatile* addr)
-{
- return *addr;
-}
-
-#pragma textflag NOSPLIT
-void*
-runtime·atomicloadp(void* volatile* addr)
-{
- return *addr;
-}
-
-#pragma textflag NOSPLIT
-uint64
-runtime·xadd64(uint64 volatile* addr, int64 v)
-{
- uint64 old;
-
- do
- old = *addr;
- while(!runtime·cas64(addr, old, old+v));
-
- return old+v;
-}
-
-#pragma textflag NOSPLIT
-uint64
-runtime·xchg64(uint64 volatile* addr, uint64 v)
-{
- uint64 old;
-
- do
- old = *addr;
- while(!runtime·cas64(addr, old, v));
-
- return old;
-}
diff --git a/src/runtime/atomic_386.go b/src/runtime/atomic_386.go
new file mode 100644
index 000000000..5563432ef
--- /dev/null
+++ b/src/runtime/atomic_386.go
@@ -0,0 +1,91 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// The calls to nop are to keep these functions from being inlined.
+// If they are inlined we have no guarantee that later rewrites of the
+// code by optimizers will preserve the relative order of memory accesses.
+
+//go:nosplit
+func atomicload(ptr *uint32) uint32 {
+ nop()
+ return *ptr
+}
+
+//go:nosplit
+func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer {
+ nop()
+ return *(*unsafe.Pointer)(ptr)
+}
+
+//go:nosplit
+func xadd64(ptr *uint64, delta int64) uint64 {
+ for {
+ old := *ptr
+ if cas64(ptr, old, old+uint64(delta)) {
+ return old + uint64(delta)
+ }
+ }
+}
+
+//go:nosplit
+func xchg64(ptr *uint64, new uint64) uint64 {
+ for {
+ old := *ptr
+ if cas64(ptr, old, new) {
+ return old
+ }
+ }
+}
+
+//go:noescape
+func xadd(ptr *uint32, delta int32) uint32
+
+//go:noescape
+func xchg(ptr *uint32, new uint32) uint32
+
+// xchgp cannot have a go:noescape annotation, because
+// while ptr does not escape, new does. If new is marked as
+// not escaping, the compiler will make incorrect escape analysis
+// decisions about the value being xchg'ed.
+// Instead, make xchgp a wrapper around the actual atomic.
+// When calling the wrapper we mark ptr as noescape explicitly.
+
+//go:nosplit
+func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
+ return xchgp1(noescape(ptr), new)
+}
+
+func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
+
+//go:noescape
+func xchguintptr(ptr *uintptr, new uintptr) uintptr
+
+//go:noescape
+func atomicload64(ptr *uint64) uint64
+
+//go:noescape
+func atomicor8(ptr *uint8, val uint8)
+
+//go:noescape
+func cas64(ptr *uint64, old, new uint64) bool
+
+//go:noescape
+func atomicstore(ptr *uint32, val uint32)
+
+//go:noescape
+func atomicstore64(ptr *uint64, val uint64)
+
+// atomicstorep cannot have a go:noescape annotation.
+// See comment above for xchgp.
+
+//go:nosplit
+func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
+ atomicstorep1(noescape(ptr), new)
+}
+
+func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
diff --git a/src/runtime/atomic_amd64x.c b/src/runtime/atomic_amd64x.c
deleted file mode 100644
index 7be57ac95..000000000
--- a/src/runtime/atomic_amd64x.c
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build amd64 amd64p32
-
-#include "runtime.h"
-#include "textflag.h"
-
-#pragma textflag NOSPLIT
-uint32
-runtime·atomicload(uint32 volatile* addr)
-{
- return *addr;
-}
-
-#pragma textflag NOSPLIT
-uint64
-runtime·atomicload64(uint64 volatile* addr)
-{
- return *addr;
-}
-
-#pragma textflag NOSPLIT
-void*
-runtime·atomicloadp(void* volatile* addr)
-{
- return *addr;
-}
diff --git a/src/runtime/atomic_amd64x.go b/src/runtime/atomic_amd64x.go
new file mode 100644
index 000000000..f2dd58411
--- /dev/null
+++ b/src/runtime/atomic_amd64x.go
@@ -0,0 +1,82 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64 amd64p32
+
+package runtime
+
+import "unsafe"
+
+// The calls to nop are to keep these functions from being inlined.
+// If they are inlined we have no guarantee that later rewrites of the
+// code by optimizers will preserve the relative order of memory accesses.
+
+//go:nosplit
+func atomicload(ptr *uint32) uint32 {
+ nop()
+ return *ptr
+}
+
+//go:nosplit
+func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer {
+ nop()
+ return *(*unsafe.Pointer)(ptr)
+}
+
+//go:nosplit
+func atomicload64(ptr *uint64) uint64 {
+ nop()
+ return *ptr
+}
+
+//go:noescape
+func xadd(ptr *uint32, delta int32) uint32
+
+//go:noescape
+func xadd64(ptr *uint64, delta int64) uint64
+
+//go:noescape
+func xchg(ptr *uint32, new uint32) uint32
+
+//go:noescape
+func xchg64(ptr *uint64, new uint64) uint64
+
+// xchgp cannot have a go:noescape annotation, because
+// while ptr does not escape, new does. If new is marked as
+// not escaping, the compiler will make incorrect escape analysis
+// decisions about the value being xchg'ed.
+// Instead, make xchgp a wrapper around the actual atomic.
+// When calling the wrapper we mark ptr as noescape explicitly.
+
+//go:nosplit
+func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
+ return xchgp1(noescape(ptr), new)
+}
+
+func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
+
+//go:noescape
+func xchguintptr(ptr *uintptr, new uintptr) uintptr
+
+//go:noescape
+func atomicor8(ptr *uint8, val uint8)
+
+//go:noescape
+func cas64(ptr *uint64, old, new uint64) bool
+
+//go:noescape
+func atomicstore(ptr *uint32, val uint32)
+
+//go:noescape
+func atomicstore64(ptr *uint64, val uint64)
+
+// atomicstorep cannot have a go:noescape annotation.
+// See comment above for xchgp.
+
+//go:nosplit
+func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
+ atomicstorep1(noescape(ptr), new)
+}
+
+func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
diff --git a/src/runtime/atomic_arm.go b/src/runtime/atomic_arm.go
index b1632cdd1..fd55a0aca 100644
--- a/src/runtime/atomic_arm.go
+++ b/src/runtime/atomic_arm.go
@@ -85,7 +85,7 @@ func atomicstore(addr *uint32, v uint32) {
//go:nosplit
func cas64(addr *uint64, old, new uint64) bool {
var ok bool
- onM(func() {
+ systemstack(func() {
lock(addrLock(addr))
if *addr == old {
*addr = new
@@ -99,7 +99,7 @@ func cas64(addr *uint64, old, new uint64) bool {
//go:nosplit
func xadd64(addr *uint64, delta int64) uint64 {
var r uint64
- onM(func() {
+ systemstack(func() {
lock(addrLock(addr))
r = *addr + uint64(delta)
*addr = r
@@ -111,7 +111,7 @@ func xadd64(addr *uint64, delta int64) uint64 {
//go:nosplit
func xchg64(addr *uint64, v uint64) uint64 {
var r uint64
- onM(func() {
+ systemstack(func() {
lock(addrLock(addr))
r = *addr
*addr = v
@@ -123,7 +123,7 @@ func xchg64(addr *uint64, v uint64) uint64 {
//go:nosplit
func atomicload64(addr *uint64) uint64 {
var r uint64
- onM(func() {
+ systemstack(func() {
lock(addrLock(addr))
r = *addr
unlock(addrLock(addr))
@@ -133,7 +133,7 @@ func atomicload64(addr *uint64) uint64 {
//go:nosplit
func atomicstore64(addr *uint64, v uint64) {
- onM(func() {
+ systemstack(func() {
lock(addrLock(addr))
*addr = v
unlock(addrLock(addr))
diff --git a/src/runtime/cgo.go b/src/runtime/cgo.go
new file mode 100644
index 000000000..7e6b253af
--- /dev/null
+++ b/src/runtime/cgo.go
@@ -0,0 +1,23 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+//go:cgo_export_static main
+
+// Filled in by runtime/cgo when linked into binary.
+
+//go:linkname _cgo_init _cgo_init
+//go:linkname _cgo_malloc _cgo_malloc
+//go:linkname _cgo_free _cgo_free
+//go:linkname _cgo_thread_start _cgo_thread_start
+
+var (
+ _cgo_init unsafe.Pointer
+ _cgo_malloc unsafe.Pointer
+ _cgo_free unsafe.Pointer
+ _cgo_thread_start unsafe.Pointer
+)
diff --git a/src/runtime/cgo/callbacks.c b/src/runtime/cgo/callbacks.c
deleted file mode 100644
index 282beeea8..000000000
--- a/src/runtime/cgo/callbacks.c
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "../runtime.h"
-#include "../cgocall.h"
-#include "textflag.h"
-
-// These utility functions are available to be called from code
-// compiled with gcc via crosscall2.
-
-// The declaration of crosscall2 is:
-// void crosscall2(void (*fn)(void *, int), void *, int);
-//
-// We need to export the symbol crosscall2 in order to support
-// callbacks from shared libraries. This applies regardless of
-// linking mode.
-#pragma cgo_export_static crosscall2
-#pragma cgo_export_dynamic crosscall2
-
-// Allocate memory. This allocates the requested number of bytes in
-// memory controlled by the Go runtime. The allocated memory will be
-// zeroed. You are responsible for ensuring that the Go garbage
-// collector can see a pointer to the allocated memory for as long as
-// it is valid, e.g., by storing a pointer in a local variable in your
-// C function, or in memory allocated by the Go runtime. If the only
-// pointers are in a C global variable or in memory allocated via
-// malloc, then the Go garbage collector may collect the memory.
-
-// Call like this in code compiled with gcc:
-// struct { size_t len; void *ret; } a;
-// a.len = /* number of bytes to allocate */;
-// crosscall2(_cgo_allocate, &a, sizeof a);
-// /* Here a.ret is a pointer to the allocated memory. */
-
-void runtime·_cgo_allocate_internal(void);
-
-#pragma cgo_export_static _cgo_allocate
-#pragma cgo_export_dynamic _cgo_allocate
-#pragma textflag NOSPLIT
-void
-_cgo_allocate(void *a, int32 n)
-{
- runtime·cgocallback((void(*)(void))runtime·_cgo_allocate_internal, a, n);
-}
-
-// Panic. The argument is converted into a Go string.
-
-// Call like this in code compiled with gcc:
-// struct { const char *p; } a;
-// a.p = /* string to pass to panic */;
-// crosscall2(_cgo_panic, &a, sizeof a);
-// /* The function call will not return. */
-
-void runtime·_cgo_panic_internal(void);
-
-#pragma cgo_export_static _cgo_panic
-#pragma cgo_export_dynamic _cgo_panic
-#pragma textflag NOSPLIT
-void
-_cgo_panic(void *a, int32 n)
-{
- runtime·cgocallback((void(*)(void))runtime·_cgo_panic_internal, a, n);
-}
-
-#pragma cgo_import_static x_cgo_init
-extern void x_cgo_init(G*);
-void (*_cgo_init)(G*) = x_cgo_init;
-
-#pragma cgo_import_static x_cgo_malloc
-extern void x_cgo_malloc(void*);
-void (*_cgo_malloc)(void*) = x_cgo_malloc;
-
-#pragma cgo_import_static x_cgo_free
-extern void x_cgo_free(void*);
-void (*_cgo_free)(void*) = x_cgo_free;
-
-#pragma cgo_import_static x_cgo_thread_start
-extern void x_cgo_thread_start(void*);
-void (*_cgo_thread_start)(void*) = x_cgo_thread_start;
-
-#pragma cgo_export_static _cgo_topofstack
-#pragma cgo_export_dynamic _cgo_topofstack
diff --git a/src/runtime/cgo/callbacks.go b/src/runtime/cgo/callbacks.go
new file mode 100644
index 000000000..1e8b59054
--- /dev/null
+++ b/src/runtime/cgo/callbacks.go
@@ -0,0 +1,95 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgo
+
+import "unsafe"
+
+// These utility functions are available to be called from code
+// compiled with gcc via crosscall2.
+
+// cgocallback is defined in runtime
+//go:linkname _runtime_cgocallback runtime.cgocallback
+func _runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr)
+
+// The declaration of crosscall2 is:
+// void crosscall2(void (*fn)(void *, int), void *, int);
+//
+// We need to export the symbol crosscall2 in order to support
+// callbacks from shared libraries. This applies regardless of
+// linking mode.
+//go:cgo_export_static crosscall2
+//go:cgo_export_dynamic crosscall2
+
+// Allocate memory. This allocates the requested number of bytes in
+// memory controlled by the Go runtime. The allocated memory will be
+// zeroed. You are responsible for ensuring that the Go garbage
+// collector can see a pointer to the allocated memory for as long as
+// it is valid, e.g., by storing a pointer in a local variable in your
+// C function, or in memory allocated by the Go runtime. If the only
+// pointers are in a C global variable or in memory allocated via
+// malloc, then the Go garbage collector may collect the memory.
+
+// Call like this in code compiled with gcc:
+// struct { size_t len; void *ret; } a;
+// a.len = /* number of bytes to allocate */;
+// crosscall2(_cgo_allocate, &a, sizeof a);
+// /* Here a.ret is a pointer to the allocated memory. */
+
+//go:linkname _runtime_cgo_allocate_internal runtime._cgo_allocate_internal
+var _runtime_cgo_allocate_internal byte
+
+//go:linkname _cgo_allocate _cgo_allocate
+//go:cgo_export_static _cgo_allocate
+//go:cgo_export_dynamic _cgo_allocate
+//go:nosplit
+func _cgo_allocate(a unsafe.Pointer, n int32) {
+ _runtime_cgocallback(unsafe.Pointer(&_runtime_cgo_allocate_internal), a, uintptr(n))
+}
+
+// Panic. The argument is converted into a Go string.
+
+// Call like this in code compiled with gcc:
+// struct { const char *p; } a;
+// a.p = /* string to pass to panic */;
+// crosscall2(_cgo_panic, &a, sizeof a);
+// /* The function call will not return. */
+
+//go:linkname _runtime_cgo_panic_internal runtime._cgo_panic_internal
+var _runtime_cgo_panic_internal byte
+
+//go:linkname _cgo_panic _cgo_panic
+//go:cgo_export_static _cgo_panic
+//go:cgo_export_dynamic _cgo_panic
+//go:nosplit
+func _cgo_panic(a unsafe.Pointer, n int32) {
+ _runtime_cgocallback(unsafe.Pointer(&_runtime_cgo_panic_internal), a, uintptr(n))
+}
+
+//go:cgo_import_static x_cgo_init
+//go:linkname x_cgo_init x_cgo_init
+//go:linkname _cgo_init _cgo_init
+var x_cgo_init byte
+var _cgo_init = &x_cgo_init
+
+//go:cgo_import_static x_cgo_malloc
+//go:linkname x_cgo_malloc x_cgo_malloc
+//go:linkname _cgo_malloc _cgo_malloc
+var x_cgo_malloc byte
+var _cgo_malloc = &x_cgo_malloc
+
+//go:cgo_import_static x_cgo_free
+//go:linkname x_cgo_free x_cgo_free
+//go:linkname _cgo_free _cgo_free
+var x_cgo_free byte
+var _cgo_free = &x_cgo_free
+
+//go:cgo_import_static x_cgo_thread_start
+//go:linkname x_cgo_thread_start x_cgo_thread_start
+//go:linkname _cgo_thread_start _cgo_thread_start
+var x_cgo_thread_start byte
+var _cgo_thread_start = &x_cgo_thread_start
+
+//go:cgo_export_static _cgo_topofstack
+//go:cgo_export_dynamic _cgo_topofstack
diff --git a/src/runtime/cgo/dragonfly.c b/src/runtime/cgo/dragonfly.go
index c233c8ba9..69d52b5b2 100644
--- a/src/runtime/cgo/dragonfly.c
+++ b/src/runtime/cgo/dragonfly.go
@@ -4,16 +4,16 @@
// +build dragonfly
-#include "textflag.h"
+package cgo
+
+import _ "unsafe" // for go:linkname
// Supply environ and __progname, because we don't
// link against the standard DragonFly crt0.o and the
// libc dynamic library needs them.
-#pragma dataflag NOPTR
-char *environ[1];
-#pragma dataflag NOPTR
-char *__progname;
+//go:linkname _environ environ
+//go:linkname _progname __progname
-#pragma dynexport environ environ
-#pragma dynexport __progname __progname
+var _environ uintptr
+var _progname uintptr
diff --git a/src/runtime/cgo/freebsd.c b/src/runtime/cgo/freebsd.go
index 4876b2abe..99cf3fbca 100644
--- a/src/runtime/cgo/freebsd.c
+++ b/src/runtime/cgo/freebsd.go
@@ -4,16 +4,19 @@
// +build freebsd
-#include "textflag.h"
+package cgo
+
+import _ "unsafe" // for go:linkname
// Supply environ and __progname, because we don't
// link against the standard FreeBSD crt0.o and the
// libc dynamic library needs them.
-#pragma dataflag NOPTR
-char *environ[1];
-#pragma dataflag NOPTR
-char *__progname;
+//go:linkname _environ environ
+//go:linkname _progname __progname
+
+//go:cgo_export_dynamic environ
+//go:cgo_export_dynamic __progname
-#pragma dynexport environ environ
-#pragma dynexport __progname __progname
+var _environ uintptr
+var _progname uintptr
diff --git a/src/runtime/cgo/iscgo.c b/src/runtime/cgo/iscgo.go
index 0907a1958..61cba73d2 100644
--- a/src/runtime/cgo/iscgo.c
+++ b/src/runtime/cgo/iscgo.go
@@ -9,7 +9,12 @@
// correctly, and sometimes they break. This variable is a
// backup: it depends only on old C style static linking rules.
-#include "../runtime.h"
+package cgo
-bool runtime·iscgo = 1;
-uint32 runtime·needextram = 1; // create an extra M on first cgo call
+import _ "unsafe" // for go:linkname
+
+//go:linkname _iscgo runtime.iscgo
+var _iscgo bool = true
+
+//go:linkname _needextram runtime.needextram
+var _needextram uint32 = 1 // create an extra M on first cgo call
diff --git a/src/runtime/cgo/netbsd.c b/src/runtime/cgo/netbsd.go
index 076cc87f1..ac6b18a93 100644
--- a/src/runtime/cgo/netbsd.c
+++ b/src/runtime/cgo/netbsd.go
@@ -4,16 +4,16 @@
// +build netbsd
-#include "textflag.h"
+package cgo
+
+import _ "unsafe" // for go:linkname
// Supply environ and __progname, because we don't
// link against the standard NetBSD crt0.o and the
// libc dynamic library needs them.
-#pragma dataflag NOPTR
-char *environ[1];
-#pragma dataflag NOPTR
-char *__progname;
+//go:linkname _environ environ
+//go:linkname _progname __progname
-#pragma dynexport environ environ
-#pragma dynexport __progname __progname
+var _environ uintptr
+var _progname uintptr
diff --git a/src/runtime/cgo/openbsd.c b/src/runtime/cgo/openbsd.go
index 476649544..61af3a8e7 100644
--- a/src/runtime/cgo/openbsd.c
+++ b/src/runtime/cgo/openbsd.go
@@ -4,24 +4,28 @@
// +build openbsd
-#include "textflag.h"
+package cgo
+
+import _ "unsafe" // for go:linkname
// Supply environ, __progname and __guard_local, because
// we don't link against the standard OpenBSD crt0.o and
// the libc dynamic library needs them.
-#pragma dataflag NOPTR
-char *environ[1];
-#pragma dataflag NOPTR
-char *__progname;
-long __guard_local;
+//go:linkname _environ environ
+//go:linkname _progname __progname
+//go:linkname _guard_local __guard_local
+
+var _environ uintptr
+var _progname uintptr
+var _guard_local uintptr
-#pragma dynexport environ environ
-#pragma dynexport __progname __progname
+//go:cgo_export_dynamic environ environ
+//go:cgo_export_dynamic __progname __progname
// This is normally marked as hidden and placed in the
// .openbsd.randomdata section.
-#pragma dynexport __guard_local __guard_local
+//go:cgo_export_dynamic __guard_local __guard_local
// We override pthread_create to support PT_TLS.
-#pragma dynexport pthread_create pthread_create
+//go:cgo_export_dynamic pthread_create pthread_create
diff --git a/src/runtime/cgo/setenv.c b/src/runtime/cgo/setenv.c
deleted file mode 100644
index 76d88cbf1..000000000
--- a/src/runtime/cgo/setenv.c
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux netbsd openbsd
-
-#pragma cgo_import_static x_cgo_setenv
-#pragma cgo_import_static x_cgo_unsetenv
-
-void x_cgo_setenv(char**);
-void (*runtime·_cgo_setenv)(char**) = x_cgo_setenv;
-void x_cgo_unsetenv(char**);
-void (*runtime·_cgo_unsetenv)(char**) = x_cgo_unsetenv;
diff --git a/src/runtime/cgo/setenv.go b/src/runtime/cgo/setenv.go
new file mode 100644
index 000000000..97c8c6ac9
--- /dev/null
+++ b/src/runtime/cgo/setenv.go
@@ -0,0 +1,21 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package cgo
+
+import _ "unsafe" // for go:linkname
+
+//go:cgo_import_static x_cgo_setenv
+//go:linkname x_cgo_setenv x_cgo_setenv
+//go:linkname _cgo_setenv runtime._cgo_setenv
+var x_cgo_setenv byte
+var _cgo_setenv = &x_cgo_setenv
+
+//go:cgo_import_static x_cgo_unsetenv
+//go:linkname x_cgo_unsetenv x_cgo_unsetenv
+//go:linkname _cgo_unsetenv runtime._cgo_unsetenv
+var x_cgo_unsetenv byte
+var _cgo_unsetenv = &x_cgo_unsetenv
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index 7fd91469e..258cabfc8 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -103,7 +103,7 @@ func cgocall_errno(fn, arg unsafe.Pointer) int32 {
// Create an extra M for callbacks on threads not created by Go on first cgo call.
if needextram == 1 && cas(&needextram, 1, 0) {
- onM(newextram)
+ systemstack(newextram)
}
/*
@@ -127,9 +127,9 @@ func cgocall_errno(fn, arg unsafe.Pointer) int32 {
* so it is safe to call while "in a system call", outside
* the $GOMAXPROCS accounting.
*/
- entersyscall()
+ entersyscall(0)
errno := asmcgocall_errno(fn, arg)
- exitsyscall()
+ exitsyscall(0)
return errno
}
@@ -153,17 +153,13 @@ func endcgo(mp *m) {
// Helper functions for cgo code.
-// Filled by schedinit from corresponding C variables,
-// which are in turn filled in by dynamic linker when Cgo is available.
-var cgoMalloc, cgoFree unsafe.Pointer
-
func cmalloc(n uintptr) unsafe.Pointer {
var args struct {
n uint64
ret unsafe.Pointer
}
args.n = uint64(n)
- cgocall(cgoMalloc, unsafe.Pointer(&args))
+ cgocall(_cgo_malloc, unsafe.Pointer(&args))
if args.ret == nil {
gothrow("C malloc failed")
}
@@ -171,7 +167,7 @@ func cmalloc(n uintptr) unsafe.Pointer {
}
func cfree(p unsafe.Pointer) {
- cgocall(cgoFree, p)
+ cgocall(_cgo_free, p)
}
// Call from C back to Go.
@@ -189,17 +185,17 @@ func cgocallbackg() {
// save syscall* and let reentersyscall restore them.
savedsp := unsafe.Pointer(gp.syscallsp)
savedpc := gp.syscallpc
- exitsyscall() // coming out of cgo call
+ exitsyscall(0) // coming out of cgo call
cgocallbackg1()
// going back to cgo call
- reentersyscall(savedpc, savedsp)
+ reentersyscall(savedpc, uintptr(savedsp))
}
func cgocallbackg1() {
gp := getg()
if gp.m.needextram {
gp.m.needextram = false
- onM(newextram)
+ systemstack(newextram)
}
// Add entry to defer stack in case of panic.
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index 004970182..bb0110f94 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -26,7 +26,7 @@ func makechan(t *chantype, size int64) *hchan {
if hchanSize%maxAlign != 0 || elem.align > maxAlign {
gothrow("makechan: bad alignment")
}
- if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (maxmem-hchanSize)/uintptr(elem.size)) {
+ if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/uintptr(elem.size)) {
panic("makechan: size out of range")
}
diff --git a/src/runtime/chan.h b/src/runtime/chan.h
deleted file mode 100644
index c34ff1533..000000000
--- a/src/runtime/chan.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define MAXALIGN 8
-
-typedef struct WaitQ WaitQ;
-typedef struct Select Select;
-typedef struct Scase Scase;
-
-struct WaitQ
-{
- SudoG* first;
- SudoG* last;
-};
-
-struct Hchan
-{
- uintgo qcount; // total data in the q
- uintgo dataqsiz; // size of the circular q
- byte* buf;
- uint16 elemsize;
- uint32 closed;
- Type* elemtype; // element type
- uintgo sendx; // send index
- uintgo recvx; // receive index
- WaitQ recvq; // list of recv waiters
- WaitQ sendq; // list of send waiters
- Mutex lock;
-};
-
-// Buffer follows Hchan immediately in memory.
-// chanbuf(c, i) is pointer to the i'th slot in the buffer.
-#define chanbuf(c, i) ((byte*)((c)->buf)+(uintptr)(c)->elemsize*(i))
-
-enum
-{
- debug = 0,
-
- // Scase.kind
- CaseRecv,
- CaseSend,
- CaseDefault,
-};
-
-// Known to compiler.
-// Changes here must also be made in src/cmd/gc/select.c's selecttype.
-struct Scase
-{
- void* elem; // data element
- Hchan* chan; // chan
- uintptr pc; // return pc
- uint16 kind;
- uint16 so; // vararg of selected bool
- bool* receivedp; // pointer to received bool (recv2)
- int64 releasetime;
-};
-
-// Known to compiler.
-// Changes here must also be made in src/cmd/gc/select.c's selecttype.
-struct Select
-{
- uint16 tcase; // total count of scase[]
- uint16 ncase; // currently filled scase[]
- uint16* pollorder; // case poll order
- Hchan** lockorder; // channel lock order
- Scase scase[1]; // one per case (in order of appearance)
-};
diff --git a/src/runtime/chan1.go b/src/runtime/chan1.go
new file mode 100644
index 000000000..000775b1e
--- /dev/null
+++ b/src/runtime/chan1.go
@@ -0,0 +1,61 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+//#define MAXALIGN 8
+
+type waitq struct {
+ first *sudog
+ last *sudog
+}
+
+type hchan struct {
+ qcount uint // total data in the q
+ dataqsiz uint // size of the circular q
+ buf *byte
+ elemsize uint16
+ closed uint32
+ elemtype *_type // element type
+ sendx uint // send index
+ recvx uint // receive index
+ recvq waitq // list of recv waiters
+ sendq waitq // list of send waiters
+ lock mutex
+}
+
+// Buffer follows Hchan immediately in memory.
+// chanbuf(c, i) is pointer to the i'th slot in the buffer.
+// #define chanbuf(c, i) ((byte*)((c)->buf)+(uintptr)(c)->elemsize*(i))
+
+const (
+ // scase.kind
+ _CaseRecv = iota
+ _CaseSend
+ _CaseDefault
+)
+
+// Known to compiler.
+// Changes here must also be made in src/cmd/gc/select.c's selecttype.
+type scase struct {
+ elem unsafe.Pointer // data element
+ _chan *hchan // chan
+ pc uintptr // return pc
+ kind uint16
+ so uint16 // vararg of selected bool
+ receivedp *bool // pointer to received bool (recv2)
+ releasetime int64
+}
+
+// Known to compiler.
+// Changes here must also be made in src/cmd/gc/select.c's selecttype.
+type _select struct {
+ tcase uint16 // total count of scase[]
+ ncase uint16 // currently filled scase[]
+ pollorder *uint16 // case poll order
+ lockorder **hchan // channel lock order
+ scase [1]scase // one per case (in order of appearance)
+}
diff --git a/src/runtime/complex.go b/src/runtime/complex.go
index ec50f8947..73f1161a5 100644
--- a/src/runtime/complex.go
+++ b/src/runtime/complex.go
@@ -4,28 +4,47 @@
package runtime
+func isposinf(f float64) bool { return f > maxFloat64 }
+func isneginf(f float64) bool { return f < -maxFloat64 }
+func isnan(f float64) bool { return f != f }
+
+func nan() float64 {
+ var f float64 = 0
+ return f / f
+}
+
+func posinf() float64 {
+ var f float64 = maxFloat64
+ return f * f
+}
+
+func neginf() float64 {
+ var f float64 = maxFloat64
+ return -f * f
+}
+
func complex128div(n complex128, d complex128) complex128 {
// Special cases as in C99.
- ninf := real(n) == posinf || real(n) == neginf ||
- imag(n) == posinf || imag(n) == neginf
- dinf := real(d) == posinf || real(d) == neginf ||
- imag(d) == posinf || imag(d) == neginf
+ ninf := isposinf(real(n)) || isneginf(real(n)) ||
+ isposinf(imag(n)) || isneginf(imag(n))
+ dinf := isposinf(real(d)) || isneginf(real(d)) ||
+ isposinf(imag(d)) || isneginf(imag(d))
- nnan := !ninf && (real(n) != real(n) || imag(n) != imag(n))
- dnan := !dinf && (real(d) != real(d) || imag(d) != imag(d))
+ nnan := !ninf && (isnan(real(n)) || isnan(imag(n)))
+ dnan := !dinf && (isnan(real(d)) || isnan(imag(d)))
switch {
case nnan || dnan:
- return complex(nan, nan)
+ return complex(nan(), nan())
case ninf && !dinf:
- return complex(posinf, posinf)
+ return complex(posinf(), posinf())
case !ninf && dinf:
return complex(0, 0)
case real(d) == 0 && imag(d) == 0:
if real(n) == 0 && imag(n) == 0 {
- return complex(nan, nan)
+ return complex(nan(), nan())
} else {
- return complex(posinf, posinf)
+ return complex(posinf(), posinf())
}
default:
// Standard complex arithmetic, factored to avoid unnecessary overflow.
diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go
index 8b1c1c632..d56678e21 100644
--- a/src/runtime/cpuprof.go
+++ b/src/runtime/cpuprof.go
@@ -101,12 +101,10 @@ var (
eod = [3]uintptr{0, 1, 0}
)
-func setcpuprofilerate_m() // proc.c
-
func setcpuprofilerate(hz int32) {
- g := getg()
- g.m.scalararg[0] = uintptr(hz)
- onM(setcpuprofilerate_m)
+ systemstack(func() {
+ setcpuprofilerate_m(hz)
+ })
}
// lostProfileData is a no-op function used in profiles
diff --git a/src/runtime/cputicks.go b/src/runtime/cputicks.go
new file mode 100644
index 000000000..e0593d56e
--- /dev/null
+++ b/src/runtime/cputicks.go
@@ -0,0 +1,11 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !arm
+
+package runtime
+
+// careful: cputicks is not guaranteed to be monotonic! In particular, we have
+// noticed drift between cpus on certain os/arch combinations. See issue 8976.
+func cputicks() int64
diff --git a/src/runtime/debug.go b/src/runtime/debug.go
index 4414dd55d..105b79cfe 100644
--- a/src/runtime/debug.go
+++ b/src/runtime/debug.go
@@ -6,18 +6,6 @@ package runtime
import "unsafe"
-// Breakpoint executes a breakpoint trap.
-func Breakpoint()
-
-// LockOSThread wires the calling goroutine to its current operating system thread.
-// Until the calling goroutine exits or calls UnlockOSThread, it will always
-// execute in that thread, and no other goroutine can.
-func LockOSThread()
-
-// UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
-// If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
-func UnlockOSThread()
-
// GOMAXPROCS sets the maximum number of CPUs that can be executing
// simultaneously and returns the previous setting. If n < 1, it does not
// change the current setting.
@@ -37,14 +25,14 @@ func GOMAXPROCS(n int) int {
semacquire(&worldsema, false)
gp := getg()
gp.m.gcing = 1
- onM(stoptheworld)
+ systemstack(stoptheworld)
// newprocs will be processed by starttheworld
newprocs = int32(n)
gp.m.gcing = 0
semrelease(&worldsema)
- onM(starttheworld)
+ systemstack(starttheworld)
return ret
}
@@ -66,5 +54,3 @@ func NumCgoCall() int64 {
func NumGoroutine() int {
return int(gcount())
}
-
-func gcount() int32
diff --git a/src/runtime/defs.c b/src/runtime/defs.c
deleted file mode 100644
index b0a9b20d7..000000000
--- a/src/runtime/defs.c
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file is compiled by cmd/dist to obtain debug information
-// about the given header files.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-#include "type.h"
-#include "race.h"
-#include "chan.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
diff --git a/src/runtime/defs1_netbsd_386.go b/src/runtime/defs1_netbsd_386.go
new file mode 100644
index 000000000..e39fd04c7
--- /dev/null
+++ b/src/runtime/defs1_netbsd_386.go
@@ -0,0 +1,171 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_netbsd.go defs_netbsd_386.go
+
+package runtime
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x6
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_RECEIPT = 0
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = 0x0
+ _EVFILT_WRITE = 0x1
+)
+
+type sigaltstackt struct {
+ ss_sp *byte
+ ss_size uint32
+ ss_flags int32
+}
+
+type sigset struct {
+ __bits [4]uint32
+}
+
+type siginfo struct {
+ _signo int32
+ _code int32
+ _errno int32
+ _reason [20]byte
+}
+
+type stackt struct {
+ ss_sp *byte
+ ss_size uint32
+ ss_flags int32
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int32
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int32
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type mcontextt struct {
+ __gregs [19]int32
+ __fpregs [644]byte
+ _mc_tlsbase int32
+}
+
+type ucontextt struct {
+ uc_flags uint32
+ uc_link *ucontextt
+ uc_sigmask sigset
+ uc_stack stackt
+ uc_mcontext mcontextt
+ __uc_pad [4]int32
+}
+
+type keventt struct {
+ ident uint32
+ filter uint32
+ flags uint32
+ fflags uint32
+ data int64
+ udata *byte
+}
+
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_netbsd.go defs_netbsd_386.go
+
+const (
+ _REG_GS = 0x0
+ _REG_FS = 0x1
+ _REG_ES = 0x2
+ _REG_DS = 0x3
+ _REG_EDI = 0x4
+ _REG_ESI = 0x5
+ _REG_EBP = 0x6
+ _REG_ESP = 0x7
+ _REG_EBX = 0x8
+ _REG_EDX = 0x9
+ _REG_ECX = 0xa
+ _REG_EAX = 0xb
+ _REG_TRAPNO = 0xc
+ _REG_ERR = 0xd
+ _REG_EIP = 0xe
+ _REG_CS = 0xf
+ _REG_EFL = 0x10
+ _REG_UESP = 0x11
+ _REG_SS = 0x12
+)
diff --git a/src/runtime/defs1_netbsd_amd64.go b/src/runtime/defs1_netbsd_amd64.go
new file mode 100644
index 000000000..cca701e5b
--- /dev/null
+++ b/src/runtime/defs1_netbsd_amd64.go
@@ -0,0 +1,183 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_netbsd.go defs_netbsd_amd64.go
+
+package runtime
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x6
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_RECEIPT = 0
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = 0x0
+ _EVFILT_WRITE = 0x1
+)
+
+type sigaltstackt struct {
+ ss_sp *byte
+ ss_size uint64
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type sigset struct {
+ __bits [4]uint32
+}
+
+type siginfo struct {
+ _signo int32
+ _code int32
+ _errno int32
+ _pad int32
+ _reason [24]byte
+}
+
+type stackt struct {
+ ss_sp *byte
+ ss_size uint64
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int32
+ pad_cgo_0 [4]byte
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type mcontextt struct {
+ __gregs [26]uint64
+ _mc_tlsbase uint64
+ __fpregs [512]int8
+}
+
+type ucontextt struct {
+ uc_flags uint32
+ pad_cgo_0 [4]byte
+ uc_link *ucontextt
+ uc_sigmask sigset
+ uc_stack stackt
+ uc_mcontext mcontextt
+}
+
+type keventt struct {
+ ident uint64
+ filter uint32
+ flags uint32
+ fflags uint32
+ pad_cgo_0 [4]byte
+ data int64
+ udata *byte
+}
+
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_netbsd.go defs_netbsd_amd64.go
+
+const (
+ _REG_RDI = 0x0
+ _REG_RSI = 0x1
+ _REG_RDX = 0x2
+ _REG_RCX = 0x3
+ _REG_R8 = 0x4
+ _REG_R9 = 0x5
+ _REG_R10 = 0x6
+ _REG_R11 = 0x7
+ _REG_R12 = 0x8
+ _REG_R13 = 0x9
+ _REG_R14 = 0xa
+ _REG_R15 = 0xb
+ _REG_RBP = 0xc
+ _REG_RBX = 0xd
+ _REG_RAX = 0xe
+ _REG_GS = 0xf
+ _REG_FS = 0x10
+ _REG_ES = 0x11
+ _REG_DS = 0x12
+ _REG_TRAPNO = 0x13
+ _REG_ERR = 0x14
+ _REG_RIP = 0x15
+ _REG_CS = 0x16
+ _REG_RFLAGS = 0x17
+ _REG_RSP = 0x18
+ _REG_SS = 0x19
+)
diff --git a/src/runtime/defs1_netbsd_arm.go b/src/runtime/defs1_netbsd_arm.go
new file mode 100644
index 000000000..54ddf3882
--- /dev/null
+++ b/src/runtime/defs1_netbsd_arm.go
@@ -0,0 +1,170 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_netbsd.go defs_netbsd_arm.go
+
+package runtime
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x6
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_RECEIPT = 0
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = 0x0
+ _EVFILT_WRITE = 0x1
+)
+
+type sigaltstackt struct {
+ ss_sp *byte
+ ss_size uint32
+ ss_flags int32
+}
+
+type sigset struct {
+ __bits [4]uint32
+}
+
+type siginfo struct {
+ _signo int32
+ _code int32
+ _errno int32
+ _reason [20]byte
+}
+
+type stackt struct {
+ ss_sp *byte
+ ss_size uint32
+ ss_flags int32
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int32
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int32
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type mcontextt struct {
+ __gregs [17]uint32
+ __fpu [4 + 8*32 + 4]byte // EABI
+ // __fpu [4+4*33+4]byte // not EABI
+ _mc_tlsbase uint32
+}
+
+type ucontextt struct {
+ uc_flags uint32
+ uc_link *ucontextt
+ uc_sigmask sigset
+ uc_stack stackt
+ uc_mcontext mcontextt
+ __uc_pad [2]int32
+}
+
+type keventt struct {
+ ident uint32
+ filter uint32
+ flags uint32
+ fflags uint32
+ data int64
+ udata *byte
+}
+
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_netbsd.go defs_netbsd_arm.go
+
+const (
+ _REG_R0 = 0x0
+ _REG_R1 = 0x1
+ _REG_R2 = 0x2
+ _REG_R3 = 0x3
+ _REG_R4 = 0x4
+ _REG_R5 = 0x5
+ _REG_R6 = 0x6
+ _REG_R7 = 0x7
+ _REG_R8 = 0x8
+ _REG_R9 = 0x9
+ _REG_R10 = 0xa
+ _REG_R11 = 0xb
+ _REG_R12 = 0xc
+ _REG_R13 = 0xd
+ _REG_R14 = 0xe
+ _REG_R15 = 0xf
+ _REG_CPSR = 0x10
+)
diff --git a/src/runtime/defs1_solaris_amd64.go b/src/runtime/defs1_solaris_amd64.go
new file mode 100644
index 000000000..3bb6f69bf
--- /dev/null
+++ b/src/runtime/defs1_solaris_amd64.go
@@ -0,0 +1,245 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_solaris.go defs_solaris_amd64.go
+
+package runtime
+
+const (
+ _EINTR = 0x4
+ _EBADF = 0x9
+ _EFAULT = 0xe
+ _EAGAIN = 0xb
+ _ETIMEDOUT = 0x91
+ _EWOULDBLOCK = 0xb
+ _EINPROGRESS = 0x96
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x100
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x5
+
+ _SA_SIGINFO = 0x8
+ _SA_RESTART = 0x4
+ _SA_ONSTACK = 0x1
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x15
+ _SIGSTOP = 0x17
+ _SIGTSTP = 0x18
+ _SIGCONT = 0x19
+ _SIGCHLD = 0x12
+ _SIGTTIN = 0x1a
+ _SIGTTOU = 0x1b
+ _SIGIO = 0x16
+ _SIGXCPU = 0x1e
+ _SIGXFSZ = 0x1f
+ _SIGVTALRM = 0x1c
+ _SIGPROF = 0x1d
+ _SIGWINCH = 0x14
+ _SIGUSR1 = 0x10
+ _SIGUSR2 = 0x11
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ __SC_NPROCESSORS_ONLN = 0xf
+
+ _PTHREAD_CREATE_DETACHED = 0x40
+
+ _FORK_NOSIGCHLD = 0x1
+ _FORK_WAITPID = 0x2
+
+ _MAXHOSTNAMELEN = 0x100
+
+ _O_NONBLOCK = 0x80
+ _FD_CLOEXEC = 0x1
+ _F_GETFL = 0x3
+ _F_SETFL = 0x4
+ _F_SETFD = 0x2
+
+ _POLLIN = 0x1
+ _POLLOUT = 0x4
+ _POLLHUP = 0x10
+ _POLLERR = 0x8
+
+ _PORT_SOURCE_FD = 0x4
+)
+
+type semt struct {
+ sem_count uint32
+ sem_type uint16
+ sem_magic uint16
+ sem_pad1 [3]uint64
+ sem_pad2 [2]uint64
+}
+
+type sigaltstackt struct {
+ ss_sp *byte
+ ss_size uint64
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type sigset struct {
+ __sigbits [4]uint32
+}
+
+type stackt struct {
+ ss_sp *byte
+ ss_size uint64
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type siginfo struct {
+ si_signo int32
+ si_code int32
+ si_errno int32
+ si_pad int32
+ __data [240]byte
+}
+
+type sigactiont struct {
+ sa_flags int32
+ pad_cgo_0 [4]byte
+ _funcptr [8]byte
+ sa_mask sigset
+}
+
+type fpregset struct {
+ fp_reg_set [528]byte
+}
+
+type mcontext struct {
+ gregs [28]int64
+ fpregs fpregset
+}
+
+type ucontext struct {
+ uc_flags uint64
+ uc_link *ucontext
+ uc_sigmask sigset
+ uc_stack stackt
+ pad_cgo_0 [8]byte
+ uc_mcontext mcontext
+ uc_filler [5]int64
+ pad_cgo_1 [8]byte
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type portevent struct {
+ portev_events int32
+ portev_source uint16
+ portev_pad uint16
+ portev_object uint64
+ portev_user *byte
+}
+
+type pthread uint32
+type pthreadattr struct {
+ __pthread_attrp *byte
+}
+
+type stat struct {
+ st_dev uint64
+ st_ino uint64
+ st_mode uint32
+ st_nlink uint32
+ st_uid uint32
+ st_gid uint32
+ st_rdev uint64
+ st_size int64
+ st_atim timespec
+ st_mtim timespec
+ st_ctim timespec
+ st_blksize int32
+ pad_cgo_0 [4]byte
+ st_blocks int64
+ st_fstype [16]int8
+}
+
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_solaris.go defs_solaris_amd64.go
+
+const (
+ _REG_RDI = 0x8
+ _REG_RSI = 0x9
+ _REG_RDX = 0xc
+ _REG_RCX = 0xd
+ _REG_R8 = 0x7
+ _REG_R9 = 0x6
+ _REG_R10 = 0x5
+ _REG_R11 = 0x4
+ _REG_R12 = 0x3
+ _REG_R13 = 0x2
+ _REG_R14 = 0x1
+ _REG_R15 = 0x0
+ _REG_RBP = 0xa
+ _REG_RBX = 0xb
+ _REG_RAX = 0xe
+ _REG_GS = 0x17
+ _REG_FS = 0x16
+ _REG_ES = 0x18
+ _REG_DS = 0x19
+ _REG_TRAPNO = 0xf
+ _REG_ERR = 0x10
+ _REG_RIP = 0x11
+ _REG_CS = 0x12
+ _REG_RFLAGS = 0x13
+ _REG_RSP = 0x14
+ _REG_SS = 0x15
+)
diff --git a/src/runtime/defs_android_arm.h b/src/runtime/defs_android_arm.h
deleted file mode 100644
index 3611b3a10..000000000
--- a/src/runtime/defs_android_arm.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// TODO: Generate using cgo like defs_linux_{386,amd64}.h
-
-#include "defs_linux_arm.h"
diff --git a/src/runtime/defs_darwin_386.go b/src/runtime/defs_darwin_386.go
new file mode 100644
index 000000000..cf4812f9f
--- /dev/null
+++ b/src/runtime/defs_darwin_386.go
@@ -0,0 +1,382 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_darwin.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_DONTNEED = 0x4
+ _MADV_FREE = 0x5
+
+ _MACH_MSG_TYPE_MOVE_RECEIVE = 0x10
+ _MACH_MSG_TYPE_MOVE_SEND = 0x11
+ _MACH_MSG_TYPE_MOVE_SEND_ONCE = 0x12
+ _MACH_MSG_TYPE_COPY_SEND = 0x13
+ _MACH_MSG_TYPE_MAKE_SEND = 0x14
+ _MACH_MSG_TYPE_MAKE_SEND_ONCE = 0x15
+ _MACH_MSG_TYPE_COPY_RECEIVE = 0x16
+
+ _MACH_MSG_PORT_DESCRIPTOR = 0x0
+ _MACH_MSG_OOL_DESCRIPTOR = 0x1
+ _MACH_MSG_OOL_PORTS_DESCRIPTOR = 0x2
+ _MACH_MSG_OOL_VOLATILE_DESCRIPTOR = 0x3
+
+ _MACH_MSGH_BITS_COMPLEX = 0x80000000
+
+ _MACH_SEND_MSG = 0x1
+ _MACH_RCV_MSG = 0x2
+ _MACH_RCV_LARGE = 0x4
+
+ _MACH_SEND_TIMEOUT = 0x10
+ _MACH_SEND_INTERRUPT = 0x40
+ _MACH_SEND_ALWAYS = 0x10000
+ _MACH_SEND_TRAILER = 0x20000
+ _MACH_RCV_TIMEOUT = 0x100
+ _MACH_RCV_NOTIFY = 0x200
+ _MACH_RCV_INTERRUPT = 0x400
+ _MACH_RCV_OVERWRITE = 0x1000
+
+ _NDR_PROTOCOL_2_0 = 0x0
+ _NDR_INT_BIG_ENDIAN = 0x0
+ _NDR_INT_LITTLE_ENDIAN = 0x1
+ _NDR_FLOAT_IEEE = 0x0
+ _NDR_CHAR_ASCII = 0x0
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+ _SA_USERTRAMP = 0x100
+ _SA_64REGSET = 0x200
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x7
+ _FPE_INTOVF = 0x8
+ _FPE_FLTDIV = 0x1
+ _FPE_FLTOVF = 0x2
+ _FPE_FLTUND = 0x3
+ _FPE_FLTRES = 0x4
+ _FPE_FLTINV = 0x5
+ _FPE_FLTSUB = 0x6
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_RECEIPT = 0x40
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = -0x1
+ _EVFILT_WRITE = -0x2
+)
+
+type machbody struct {
+ msgh_descriptor_count uint32
+}
+
+type machheader struct {
+ msgh_bits uint32
+ msgh_size uint32
+ msgh_remote_port uint32
+ msgh_local_port uint32
+ msgh_reserved uint32
+ msgh_id int32
+}
+
+type machndr struct {
+ mig_vers uint8
+ if_vers uint8
+ reserved1 uint8
+ mig_encoding uint8
+ int_rep uint8
+ char_rep uint8
+ float_rep uint8
+ reserved2 uint8
+}
+
+type machport struct {
+ name uint32
+ pad1 uint32
+ pad2 uint16
+ disposition uint8
+ _type uint8
+}
+
+type stackt struct {
+ ss_sp *byte
+ ss_size uintptr
+ ss_flags int32
+}
+
+type sigactiont struct {
+ __sigaction_u [4]byte
+ sa_tramp unsafe.Pointer
+ sa_mask uint32
+ sa_flags int32
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ si_pid int32
+ si_uid uint32
+ si_status int32
+ si_addr *byte
+ si_value [4]byte
+ si_band int32
+ __pad [7]uint32
+}
+
+type timeval struct {
+ tv_sec int32
+ tv_usec int32
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = x
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type timespec struct {
+ tv_sec int32
+ tv_nsec int32
+}
+
+type fpcontrol struct {
+ pad_cgo_0 [2]byte
+}
+
+type fpstatus struct {
+ pad_cgo_0 [2]byte
+}
+
+type regmmst struct {
+ mmst_reg [10]int8
+ mmst_rsrv [6]int8
+}
+
+type regxmm struct {
+ xmm_reg [16]int8
+}
+
+type regs64 struct {
+ rax uint64
+ rbx uint64
+ rcx uint64
+ rdx uint64
+ rdi uint64
+ rsi uint64
+ rbp uint64
+ rsp uint64
+ r8 uint64
+ r9 uint64
+ r10 uint64
+ r11 uint64
+ r12 uint64
+ r13 uint64
+ r14 uint64
+ r15 uint64
+ rip uint64
+ rflags uint64
+ cs uint64
+ fs uint64
+ gs uint64
+}
+
+type floatstate64 struct {
+ fpu_reserved [2]int32
+ fpu_fcw fpcontrol
+ fpu_fsw fpstatus
+ fpu_ftw uint8
+ fpu_rsrv1 uint8
+ fpu_fop uint16
+ fpu_ip uint32
+ fpu_cs uint16
+ fpu_rsrv2 uint16
+ fpu_dp uint32
+ fpu_ds uint16
+ fpu_rsrv3 uint16
+ fpu_mxcsr uint32
+ fpu_mxcsrmask uint32
+ fpu_stmm0 regmmst
+ fpu_stmm1 regmmst
+ fpu_stmm2 regmmst
+ fpu_stmm3 regmmst
+ fpu_stmm4 regmmst
+ fpu_stmm5 regmmst
+ fpu_stmm6 regmmst
+ fpu_stmm7 regmmst
+ fpu_xmm0 regxmm
+ fpu_xmm1 regxmm
+ fpu_xmm2 regxmm
+ fpu_xmm3 regxmm
+ fpu_xmm4 regxmm
+ fpu_xmm5 regxmm
+ fpu_xmm6 regxmm
+ fpu_xmm7 regxmm
+ fpu_xmm8 regxmm
+ fpu_xmm9 regxmm
+ fpu_xmm10 regxmm
+ fpu_xmm11 regxmm
+ fpu_xmm12 regxmm
+ fpu_xmm13 regxmm
+ fpu_xmm14 regxmm
+ fpu_xmm15 regxmm
+ fpu_rsrv4 [96]int8
+ fpu_reserved1 int32
+}
+
+type exceptionstate64 struct {
+ trapno uint16
+ cpu uint16
+ err uint32
+ faultvaddr uint64
+}
+
+type mcontext64 struct {
+ es exceptionstate64
+ ss regs64
+ fs floatstate64
+}
+
+type regs32 struct {
+ eax uint32
+ ebx uint32
+ ecx uint32
+ edx uint32
+ edi uint32
+ esi uint32
+ ebp uint32
+ esp uint32
+ ss uint32
+ eflags uint32
+ eip uint32
+ cs uint32
+ ds uint32
+ es uint32
+ fs uint32
+ gs uint32
+}
+
+type floatstate32 struct {
+ fpu_reserved [2]int32
+ fpu_fcw fpcontrol
+ fpu_fsw fpstatus
+ fpu_ftw uint8
+ fpu_rsrv1 uint8
+ fpu_fop uint16
+ fpu_ip uint32
+ fpu_cs uint16
+ fpu_rsrv2 uint16
+ fpu_dp uint32
+ fpu_ds uint16
+ fpu_rsrv3 uint16
+ fpu_mxcsr uint32
+ fpu_mxcsrmask uint32
+ fpu_stmm0 regmmst
+ fpu_stmm1 regmmst
+ fpu_stmm2 regmmst
+ fpu_stmm3 regmmst
+ fpu_stmm4 regmmst
+ fpu_stmm5 regmmst
+ fpu_stmm6 regmmst
+ fpu_stmm7 regmmst
+ fpu_xmm0 regxmm
+ fpu_xmm1 regxmm
+ fpu_xmm2 regxmm
+ fpu_xmm3 regxmm
+ fpu_xmm4 regxmm
+ fpu_xmm5 regxmm
+ fpu_xmm6 regxmm
+ fpu_xmm7 regxmm
+ fpu_rsrv4 [224]int8
+ fpu_reserved1 int32
+}
+
+type exceptionstate32 struct {
+ trapno uint16
+ cpu uint16
+ err uint32
+ faultvaddr uint32
+}
+
+type mcontext32 struct {
+ es exceptionstate32
+ ss regs32
+ fs floatstate32
+}
+
+type ucontext struct {
+ uc_onstack int32
+ uc_sigmask uint32
+ uc_stack stackt
+ uc_link *ucontext
+ uc_mcsize uint32
+ uc_mcontext *mcontext32
+}
+
+type keventt struct {
+ ident uint32
+ filter int16
+ flags uint16
+ fflags uint32
+ data int32
+ udata *byte
+}
diff --git a/src/runtime/defs_darwin_386.h b/src/runtime/defs_darwin_386.h
deleted file mode 100644
index 0e0b4fbf7..000000000
--- a/src/runtime/defs_darwin_386.h
+++ /dev/null
@@ -1,392 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_darwin.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_DONTNEED = 0x4,
- MADV_FREE = 0x5,
-
- MACH_MSG_TYPE_MOVE_RECEIVE = 0x10,
- MACH_MSG_TYPE_MOVE_SEND = 0x11,
- MACH_MSG_TYPE_MOVE_SEND_ONCE = 0x12,
- MACH_MSG_TYPE_COPY_SEND = 0x13,
- MACH_MSG_TYPE_MAKE_SEND = 0x14,
- MACH_MSG_TYPE_MAKE_SEND_ONCE = 0x15,
- MACH_MSG_TYPE_COPY_RECEIVE = 0x16,
-
- MACH_MSG_PORT_DESCRIPTOR = 0x0,
- MACH_MSG_OOL_DESCRIPTOR = 0x1,
- MACH_MSG_OOL_PORTS_DESCRIPTOR = 0x2,
- MACH_MSG_OOL_VOLATILE_DESCRIPTOR = 0x3,
-
- MACH_MSGH_BITS_COMPLEX = 0x80000000,
-
- MACH_SEND_MSG = 0x1,
- MACH_RCV_MSG = 0x2,
- MACH_RCV_LARGE = 0x4,
-
- MACH_SEND_TIMEOUT = 0x10,
- MACH_SEND_INTERRUPT = 0x40,
- MACH_SEND_ALWAYS = 0x10000,
- MACH_SEND_TRAILER = 0x20000,
- MACH_RCV_TIMEOUT = 0x100,
- MACH_RCV_NOTIFY = 0x200,
- MACH_RCV_INTERRUPT = 0x400,
- MACH_RCV_OVERWRITE = 0x1000,
-
- NDR_PROTOCOL_2_0 = 0x0,
- NDR_INT_BIG_ENDIAN = 0x0,
- NDR_INT_LITTLE_ENDIAN = 0x1,
- NDR_FLOAT_IEEE = 0x0,
- NDR_CHAR_ASCII = 0x0,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
- SA_USERTRAMP = 0x100,
- SA_64REGSET = 0x200,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x7,
- FPE_INTOVF = 0x8,
- FPE_FLTDIV = 0x1,
- FPE_FLTOVF = 0x2,
- FPE_FLTUND = 0x3,
- FPE_FLTRES = 0x4,
- FPE_FLTINV = 0x5,
- FPE_FLTSUB = 0x6,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_RECEIPT = 0x40,
- EV_ERROR = 0x4000,
- EVFILT_READ = -0x1,
- EVFILT_WRITE = -0x2,
-};
-
-typedef struct MachBody MachBody;
-typedef struct MachHeader MachHeader;
-typedef struct MachNDR MachNDR;
-typedef struct MachPort MachPort;
-typedef struct StackT StackT;
-typedef struct SigactionT SigactionT;
-typedef struct Siginfo Siginfo;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct Timespec Timespec;
-typedef struct FPControl FPControl;
-typedef struct FPStatus FPStatus;
-typedef struct RegMMST RegMMST;
-typedef struct RegXMM RegXMM;
-typedef struct Regs64 Regs64;
-typedef struct FloatState64 FloatState64;
-typedef struct ExceptionState64 ExceptionState64;
-typedef struct Mcontext64 Mcontext64;
-typedef struct Regs32 Regs32;
-typedef struct FloatState32 FloatState32;
-typedef struct ExceptionState32 ExceptionState32;
-typedef struct Mcontext32 Mcontext32;
-typedef struct Ucontext Ucontext;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct MachBody {
- uint32 msgh_descriptor_count;
-};
-struct MachHeader {
- uint32 msgh_bits;
- uint32 msgh_size;
- uint32 msgh_remote_port;
- uint32 msgh_local_port;
- uint32 msgh_reserved;
- int32 msgh_id;
-};
-struct MachNDR {
- uint8 mig_vers;
- uint8 if_vers;
- uint8 reserved1;
- uint8 mig_encoding;
- uint8 int_rep;
- uint8 char_rep;
- uint8 float_rep;
- uint8 reserved2;
-};
-struct MachPort {
- uint32 name;
- uint32 pad1;
- uint16 pad2;
- uint8 disposition;
- uint8 type;
-};
-
-struct StackT {
- byte *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-typedef byte Sighandler[4];
-
-struct SigactionT {
- byte __sigaction_u[4];
- void *sa_tramp;
- uint32 sa_mask;
- int32 sa_flags;
-};
-
-typedef byte Sigval[4];
-struct Siginfo {
- int32 si_signo;
- int32 si_errno;
- int32 si_code;
- int32 si_pid;
- uint32 si_uid;
- int32 si_status;
- byte *si_addr;
- byte si_value[4];
- int32 si_band;
- uint32 __pad[7];
-};
-struct Timeval {
- int32 tv_sec;
- int32 tv_usec;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-struct Timespec {
- int32 tv_sec;
- int32 tv_nsec;
-};
-
-struct FPControl {
- byte Pad_cgo_0[2];
-};
-struct FPStatus {
- byte Pad_cgo_0[2];
-};
-struct RegMMST {
- int8 mmst_reg[10];
- int8 mmst_rsrv[6];
-};
-struct RegXMM {
- int8 xmm_reg[16];
-};
-
-struct Regs64 {
- uint64 rax;
- uint64 rbx;
- uint64 rcx;
- uint64 rdx;
- uint64 rdi;
- uint64 rsi;
- uint64 rbp;
- uint64 rsp;
- uint64 r8;
- uint64 r9;
- uint64 r10;
- uint64 r11;
- uint64 r12;
- uint64 r13;
- uint64 r14;
- uint64 r15;
- uint64 rip;
- uint64 rflags;
- uint64 cs;
- uint64 fs;
- uint64 gs;
-};
-struct FloatState64 {
- int32 fpu_reserved[2];
- FPControl fpu_fcw;
- FPStatus fpu_fsw;
- uint8 fpu_ftw;
- uint8 fpu_rsrv1;
- uint16 fpu_fop;
- uint32 fpu_ip;
- uint16 fpu_cs;
- uint16 fpu_rsrv2;
- uint32 fpu_dp;
- uint16 fpu_ds;
- uint16 fpu_rsrv3;
- uint32 fpu_mxcsr;
- uint32 fpu_mxcsrmask;
- RegMMST fpu_stmm0;
- RegMMST fpu_stmm1;
- RegMMST fpu_stmm2;
- RegMMST fpu_stmm3;
- RegMMST fpu_stmm4;
- RegMMST fpu_stmm5;
- RegMMST fpu_stmm6;
- RegMMST fpu_stmm7;
- RegXMM fpu_xmm0;
- RegXMM fpu_xmm1;
- RegXMM fpu_xmm2;
- RegXMM fpu_xmm3;
- RegXMM fpu_xmm4;
- RegXMM fpu_xmm5;
- RegXMM fpu_xmm6;
- RegXMM fpu_xmm7;
- RegXMM fpu_xmm8;
- RegXMM fpu_xmm9;
- RegXMM fpu_xmm10;
- RegXMM fpu_xmm11;
- RegXMM fpu_xmm12;
- RegXMM fpu_xmm13;
- RegXMM fpu_xmm14;
- RegXMM fpu_xmm15;
- int8 fpu_rsrv4[96];
- int32 fpu_reserved1;
-};
-struct ExceptionState64 {
- uint16 trapno;
- uint16 cpu;
- uint32 err;
- uint64 faultvaddr;
-};
-struct Mcontext64 {
- ExceptionState64 es;
- Regs64 ss;
- FloatState64 fs;
-};
-
-struct Regs32 {
- uint32 eax;
- uint32 ebx;
- uint32 ecx;
- uint32 edx;
- uint32 edi;
- uint32 esi;
- uint32 ebp;
- uint32 esp;
- uint32 ss;
- uint32 eflags;
- uint32 eip;
- uint32 cs;
- uint32 ds;
- uint32 es;
- uint32 fs;
- uint32 gs;
-};
-struct FloatState32 {
- int32 fpu_reserved[2];
- FPControl fpu_fcw;
- FPStatus fpu_fsw;
- uint8 fpu_ftw;
- uint8 fpu_rsrv1;
- uint16 fpu_fop;
- uint32 fpu_ip;
- uint16 fpu_cs;
- uint16 fpu_rsrv2;
- uint32 fpu_dp;
- uint16 fpu_ds;
- uint16 fpu_rsrv3;
- uint32 fpu_mxcsr;
- uint32 fpu_mxcsrmask;
- RegMMST fpu_stmm0;
- RegMMST fpu_stmm1;
- RegMMST fpu_stmm2;
- RegMMST fpu_stmm3;
- RegMMST fpu_stmm4;
- RegMMST fpu_stmm5;
- RegMMST fpu_stmm6;
- RegMMST fpu_stmm7;
- RegXMM fpu_xmm0;
- RegXMM fpu_xmm1;
- RegXMM fpu_xmm2;
- RegXMM fpu_xmm3;
- RegXMM fpu_xmm4;
- RegXMM fpu_xmm5;
- RegXMM fpu_xmm6;
- RegXMM fpu_xmm7;
- int8 fpu_rsrv4[224];
- int32 fpu_reserved1;
-};
-struct ExceptionState32 {
- uint16 trapno;
- uint16 cpu;
- uint32 err;
- uint32 faultvaddr;
-};
-struct Mcontext32 {
- ExceptionState32 es;
- Regs32 ss;
- FloatState32 fs;
-};
-
-struct Ucontext {
- int32 uc_onstack;
- uint32 uc_sigmask;
- StackT uc_stack;
- Ucontext *uc_link;
- uint32 uc_mcsize;
- Mcontext32 *uc_mcontext;
-};
-
-struct KeventT {
- uint32 ident;
- int16 filter;
- uint16 flags;
- uint32 fflags;
- int32 data;
- byte *udata;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_darwin_amd64.go b/src/runtime/defs_darwin_amd64.go
new file mode 100644
index 000000000..2cd4c0cd0
--- /dev/null
+++ b/src/runtime/defs_darwin_amd64.go
@@ -0,0 +1,385 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_darwin.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_DONTNEED = 0x4
+ _MADV_FREE = 0x5
+
+ _MACH_MSG_TYPE_MOVE_RECEIVE = 0x10
+ _MACH_MSG_TYPE_MOVE_SEND = 0x11
+ _MACH_MSG_TYPE_MOVE_SEND_ONCE = 0x12
+ _MACH_MSG_TYPE_COPY_SEND = 0x13
+ _MACH_MSG_TYPE_MAKE_SEND = 0x14
+ _MACH_MSG_TYPE_MAKE_SEND_ONCE = 0x15
+ _MACH_MSG_TYPE_COPY_RECEIVE = 0x16
+
+ _MACH_MSG_PORT_DESCRIPTOR = 0x0
+ _MACH_MSG_OOL_DESCRIPTOR = 0x1
+ _MACH_MSG_OOL_PORTS_DESCRIPTOR = 0x2
+ _MACH_MSG_OOL_VOLATILE_DESCRIPTOR = 0x3
+
+ _MACH_MSGH_BITS_COMPLEX = 0x80000000
+
+ _MACH_SEND_MSG = 0x1
+ _MACH_RCV_MSG = 0x2
+ _MACH_RCV_LARGE = 0x4
+
+ _MACH_SEND_TIMEOUT = 0x10
+ _MACH_SEND_INTERRUPT = 0x40
+ _MACH_SEND_ALWAYS = 0x10000
+ _MACH_SEND_TRAILER = 0x20000
+ _MACH_RCV_TIMEOUT = 0x100
+ _MACH_RCV_NOTIFY = 0x200
+ _MACH_RCV_INTERRUPT = 0x400
+ _MACH_RCV_OVERWRITE = 0x1000
+
+ _NDR_PROTOCOL_2_0 = 0x0
+ _NDR_INT_BIG_ENDIAN = 0x0
+ _NDR_INT_LITTLE_ENDIAN = 0x1
+ _NDR_FLOAT_IEEE = 0x0
+ _NDR_CHAR_ASCII = 0x0
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+ _SA_USERTRAMP = 0x100
+ _SA_64REGSET = 0x200
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x7
+ _FPE_INTOVF = 0x8
+ _FPE_FLTDIV = 0x1
+ _FPE_FLTOVF = 0x2
+ _FPE_FLTUND = 0x3
+ _FPE_FLTRES = 0x4
+ _FPE_FLTINV = 0x5
+ _FPE_FLTSUB = 0x6
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_RECEIPT = 0x40
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = -0x1
+ _EVFILT_WRITE = -0x2
+)
+
+type machbody struct {
+ msgh_descriptor_count uint32
+}
+
+type machheader struct {
+ msgh_bits uint32
+ msgh_size uint32
+ msgh_remote_port uint32
+ msgh_local_port uint32
+ msgh_reserved uint32
+ msgh_id int32
+}
+
+type machndr struct {
+ mig_vers uint8
+ if_vers uint8
+ reserved1 uint8
+ mig_encoding uint8
+ int_rep uint8
+ char_rep uint8
+ float_rep uint8
+ reserved2 uint8
+}
+
+type machport struct {
+ name uint32
+ pad1 uint32
+ pad2 uint16
+ disposition uint8
+ _type uint8
+}
+
+type stackt struct {
+ ss_sp *byte
+ ss_size uintptr
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type sigactiont struct {
+ __sigaction_u [8]byte
+ sa_tramp unsafe.Pointer
+ sa_mask uint32
+ sa_flags int32
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ si_pid int32
+ si_uid uint32
+ si_status int32
+ si_addr *byte
+ si_value [8]byte
+ si_band int64
+ __pad [7]uint64
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int32
+ pad_cgo_0 [4]byte
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = x
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+type fpcontrol struct {
+ pad_cgo_0 [2]byte
+}
+
+type fpstatus struct {
+ pad_cgo_0 [2]byte
+}
+
+type regmmst struct {
+ mmst_reg [10]int8
+ mmst_rsrv [6]int8
+}
+
+type regxmm struct {
+ xmm_reg [16]int8
+}
+
+type regs64 struct {
+ rax uint64
+ rbx uint64
+ rcx uint64
+ rdx uint64
+ rdi uint64
+ rsi uint64
+ rbp uint64
+ rsp uint64
+ r8 uint64
+ r9 uint64
+ r10 uint64
+ r11 uint64
+ r12 uint64
+ r13 uint64
+ r14 uint64
+ r15 uint64
+ rip uint64
+ rflags uint64
+ cs uint64
+ fs uint64
+ gs uint64
+}
+
+type floatstate64 struct {
+ fpu_reserved [2]int32
+ fpu_fcw fpcontrol
+ fpu_fsw fpstatus
+ fpu_ftw uint8
+ fpu_rsrv1 uint8
+ fpu_fop uint16
+ fpu_ip uint32
+ fpu_cs uint16
+ fpu_rsrv2 uint16
+ fpu_dp uint32
+ fpu_ds uint16
+ fpu_rsrv3 uint16
+ fpu_mxcsr uint32
+ fpu_mxcsrmask uint32
+ fpu_stmm0 regmmst
+ fpu_stmm1 regmmst
+ fpu_stmm2 regmmst
+ fpu_stmm3 regmmst
+ fpu_stmm4 regmmst
+ fpu_stmm5 regmmst
+ fpu_stmm6 regmmst
+ fpu_stmm7 regmmst
+ fpu_xmm0 regxmm
+ fpu_xmm1 regxmm
+ fpu_xmm2 regxmm
+ fpu_xmm3 regxmm
+ fpu_xmm4 regxmm
+ fpu_xmm5 regxmm
+ fpu_xmm6 regxmm
+ fpu_xmm7 regxmm
+ fpu_xmm8 regxmm
+ fpu_xmm9 regxmm
+ fpu_xmm10 regxmm
+ fpu_xmm11 regxmm
+ fpu_xmm12 regxmm
+ fpu_xmm13 regxmm
+ fpu_xmm14 regxmm
+ fpu_xmm15 regxmm
+ fpu_rsrv4 [96]int8
+ fpu_reserved1 int32
+}
+
+type exceptionstate64 struct {
+ trapno uint16
+ cpu uint16
+ err uint32
+ faultvaddr uint64
+}
+
+type mcontext64 struct {
+ es exceptionstate64
+ ss regs64
+ fs floatstate64
+ pad_cgo_0 [4]byte
+}
+
+type regs32 struct {
+ eax uint32
+ ebx uint32
+ ecx uint32
+ edx uint32
+ edi uint32
+ esi uint32
+ ebp uint32
+ esp uint32
+ ss uint32
+ eflags uint32
+ eip uint32
+ cs uint32
+ ds uint32
+ es uint32
+ fs uint32
+ gs uint32
+}
+
+type floatstate32 struct {
+ fpu_reserved [2]int32
+ fpu_fcw fpcontrol
+ fpu_fsw fpstatus
+ fpu_ftw uint8
+ fpu_rsrv1 uint8
+ fpu_fop uint16
+ fpu_ip uint32
+ fpu_cs uint16
+ fpu_rsrv2 uint16
+ fpu_dp uint32
+ fpu_ds uint16
+ fpu_rsrv3 uint16
+ fpu_mxcsr uint32
+ fpu_mxcsrmask uint32
+ fpu_stmm0 regmmst
+ fpu_stmm1 regmmst
+ fpu_stmm2 regmmst
+ fpu_stmm3 regmmst
+ fpu_stmm4 regmmst
+ fpu_stmm5 regmmst
+ fpu_stmm6 regmmst
+ fpu_stmm7 regmmst
+ fpu_xmm0 regxmm
+ fpu_xmm1 regxmm
+ fpu_xmm2 regxmm
+ fpu_xmm3 regxmm
+ fpu_xmm4 regxmm
+ fpu_xmm5 regxmm
+ fpu_xmm6 regxmm
+ fpu_xmm7 regxmm
+ fpu_rsrv4 [224]int8
+ fpu_reserved1 int32
+}
+
+type exceptionstate32 struct {
+ trapno uint16
+ cpu uint16
+ err uint32
+ faultvaddr uint32
+}
+
+type mcontext32 struct {
+ es exceptionstate32
+ ss regs32
+ fs floatstate32
+}
+
+type ucontext struct {
+ uc_onstack int32
+ uc_sigmask uint32
+ uc_stack stackt
+ uc_link *ucontext
+ uc_mcsize uint64
+ uc_mcontext *mcontext64
+}
+
+type keventt struct {
+ ident uint64
+ filter int16
+ flags uint16
+ fflags uint32
+ data int64
+ udata *byte
+}
diff --git a/src/runtime/defs_darwin_amd64.h b/src/runtime/defs_darwin_amd64.h
deleted file mode 100644
index 4bf83c1cb..000000000
--- a/src/runtime/defs_darwin_amd64.h
+++ /dev/null
@@ -1,395 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_darwin.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_DONTNEED = 0x4,
- MADV_FREE = 0x5,
-
- MACH_MSG_TYPE_MOVE_RECEIVE = 0x10,
- MACH_MSG_TYPE_MOVE_SEND = 0x11,
- MACH_MSG_TYPE_MOVE_SEND_ONCE = 0x12,
- MACH_MSG_TYPE_COPY_SEND = 0x13,
- MACH_MSG_TYPE_MAKE_SEND = 0x14,
- MACH_MSG_TYPE_MAKE_SEND_ONCE = 0x15,
- MACH_MSG_TYPE_COPY_RECEIVE = 0x16,
-
- MACH_MSG_PORT_DESCRIPTOR = 0x0,
- MACH_MSG_OOL_DESCRIPTOR = 0x1,
- MACH_MSG_OOL_PORTS_DESCRIPTOR = 0x2,
- MACH_MSG_OOL_VOLATILE_DESCRIPTOR = 0x3,
-
- MACH_MSGH_BITS_COMPLEX = 0x80000000,
-
- MACH_SEND_MSG = 0x1,
- MACH_RCV_MSG = 0x2,
- MACH_RCV_LARGE = 0x4,
-
- MACH_SEND_TIMEOUT = 0x10,
- MACH_SEND_INTERRUPT = 0x40,
- MACH_SEND_ALWAYS = 0x10000,
- MACH_SEND_TRAILER = 0x20000,
- MACH_RCV_TIMEOUT = 0x100,
- MACH_RCV_NOTIFY = 0x200,
- MACH_RCV_INTERRUPT = 0x400,
- MACH_RCV_OVERWRITE = 0x1000,
-
- NDR_PROTOCOL_2_0 = 0x0,
- NDR_INT_BIG_ENDIAN = 0x0,
- NDR_INT_LITTLE_ENDIAN = 0x1,
- NDR_FLOAT_IEEE = 0x0,
- NDR_CHAR_ASCII = 0x0,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
- SA_USERTRAMP = 0x100,
- SA_64REGSET = 0x200,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x7,
- FPE_INTOVF = 0x8,
- FPE_FLTDIV = 0x1,
- FPE_FLTOVF = 0x2,
- FPE_FLTUND = 0x3,
- FPE_FLTRES = 0x4,
- FPE_FLTINV = 0x5,
- FPE_FLTSUB = 0x6,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_RECEIPT = 0x40,
- EV_ERROR = 0x4000,
- EVFILT_READ = -0x1,
- EVFILT_WRITE = -0x2,
-};
-
-typedef struct MachBody MachBody;
-typedef struct MachHeader MachHeader;
-typedef struct MachNDR MachNDR;
-typedef struct MachPort MachPort;
-typedef struct StackT StackT;
-typedef struct SigactionT SigactionT;
-typedef struct Siginfo Siginfo;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct Timespec Timespec;
-typedef struct FPControl FPControl;
-typedef struct FPStatus FPStatus;
-typedef struct RegMMST RegMMST;
-typedef struct RegXMM RegXMM;
-typedef struct Regs64 Regs64;
-typedef struct FloatState64 FloatState64;
-typedef struct ExceptionState64 ExceptionState64;
-typedef struct Mcontext64 Mcontext64;
-typedef struct Regs32 Regs32;
-typedef struct FloatState32 FloatState32;
-typedef struct ExceptionState32 ExceptionState32;
-typedef struct Mcontext32 Mcontext32;
-typedef struct Ucontext Ucontext;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct MachBody {
- uint32 msgh_descriptor_count;
-};
-struct MachHeader {
- uint32 msgh_bits;
- uint32 msgh_size;
- uint32 msgh_remote_port;
- uint32 msgh_local_port;
- uint32 msgh_reserved;
- int32 msgh_id;
-};
-struct MachNDR {
- uint8 mig_vers;
- uint8 if_vers;
- uint8 reserved1;
- uint8 mig_encoding;
- uint8 int_rep;
- uint8 char_rep;
- uint8 float_rep;
- uint8 reserved2;
-};
-struct MachPort {
- uint32 name;
- uint32 pad1;
- uint16 pad2;
- uint8 disposition;
- uint8 type;
-};
-
-struct StackT {
- byte *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-typedef byte Sighandler[8];
-
-struct SigactionT {
- byte __sigaction_u[8];
- void *sa_tramp;
- uint32 sa_mask;
- int32 sa_flags;
-};
-
-typedef byte Sigval[8];
-struct Siginfo {
- int32 si_signo;
- int32 si_errno;
- int32 si_code;
- int32 si_pid;
- uint32 si_uid;
- int32 si_status;
- byte *si_addr;
- byte si_value[8];
- int64 si_band;
- uint64 __pad[7];
-};
-struct Timeval {
- int64 tv_sec;
- int32 tv_usec;
- byte Pad_cgo_0[4];
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-struct Timespec {
- int64 tv_sec;
- int64 tv_nsec;
-};
-
-struct FPControl {
- byte Pad_cgo_0[2];
-};
-struct FPStatus {
- byte Pad_cgo_0[2];
-};
-struct RegMMST {
- int8 mmst_reg[10];
- int8 mmst_rsrv[6];
-};
-struct RegXMM {
- int8 xmm_reg[16];
-};
-
-struct Regs64 {
- uint64 rax;
- uint64 rbx;
- uint64 rcx;
- uint64 rdx;
- uint64 rdi;
- uint64 rsi;
- uint64 rbp;
- uint64 rsp;
- uint64 r8;
- uint64 r9;
- uint64 r10;
- uint64 r11;
- uint64 r12;
- uint64 r13;
- uint64 r14;
- uint64 r15;
- uint64 rip;
- uint64 rflags;
- uint64 cs;
- uint64 fs;
- uint64 gs;
-};
-struct FloatState64 {
- int32 fpu_reserved[2];
- FPControl fpu_fcw;
- FPStatus fpu_fsw;
- uint8 fpu_ftw;
- uint8 fpu_rsrv1;
- uint16 fpu_fop;
- uint32 fpu_ip;
- uint16 fpu_cs;
- uint16 fpu_rsrv2;
- uint32 fpu_dp;
- uint16 fpu_ds;
- uint16 fpu_rsrv3;
- uint32 fpu_mxcsr;
- uint32 fpu_mxcsrmask;
- RegMMST fpu_stmm0;
- RegMMST fpu_stmm1;
- RegMMST fpu_stmm2;
- RegMMST fpu_stmm3;
- RegMMST fpu_stmm4;
- RegMMST fpu_stmm5;
- RegMMST fpu_stmm6;
- RegMMST fpu_stmm7;
- RegXMM fpu_xmm0;
- RegXMM fpu_xmm1;
- RegXMM fpu_xmm2;
- RegXMM fpu_xmm3;
- RegXMM fpu_xmm4;
- RegXMM fpu_xmm5;
- RegXMM fpu_xmm6;
- RegXMM fpu_xmm7;
- RegXMM fpu_xmm8;
- RegXMM fpu_xmm9;
- RegXMM fpu_xmm10;
- RegXMM fpu_xmm11;
- RegXMM fpu_xmm12;
- RegXMM fpu_xmm13;
- RegXMM fpu_xmm14;
- RegXMM fpu_xmm15;
- int8 fpu_rsrv4[96];
- int32 fpu_reserved1;
-};
-struct ExceptionState64 {
- uint16 trapno;
- uint16 cpu;
- uint32 err;
- uint64 faultvaddr;
-};
-struct Mcontext64 {
- ExceptionState64 es;
- Regs64 ss;
- FloatState64 fs;
- byte Pad_cgo_0[4];
-};
-
-struct Regs32 {
- uint32 eax;
- uint32 ebx;
- uint32 ecx;
- uint32 edx;
- uint32 edi;
- uint32 esi;
- uint32 ebp;
- uint32 esp;
- uint32 ss;
- uint32 eflags;
- uint32 eip;
- uint32 cs;
- uint32 ds;
- uint32 es;
- uint32 fs;
- uint32 gs;
-};
-struct FloatState32 {
- int32 fpu_reserved[2];
- FPControl fpu_fcw;
- FPStatus fpu_fsw;
- uint8 fpu_ftw;
- uint8 fpu_rsrv1;
- uint16 fpu_fop;
- uint32 fpu_ip;
- uint16 fpu_cs;
- uint16 fpu_rsrv2;
- uint32 fpu_dp;
- uint16 fpu_ds;
- uint16 fpu_rsrv3;
- uint32 fpu_mxcsr;
- uint32 fpu_mxcsrmask;
- RegMMST fpu_stmm0;
- RegMMST fpu_stmm1;
- RegMMST fpu_stmm2;
- RegMMST fpu_stmm3;
- RegMMST fpu_stmm4;
- RegMMST fpu_stmm5;
- RegMMST fpu_stmm6;
- RegMMST fpu_stmm7;
- RegXMM fpu_xmm0;
- RegXMM fpu_xmm1;
- RegXMM fpu_xmm2;
- RegXMM fpu_xmm3;
- RegXMM fpu_xmm4;
- RegXMM fpu_xmm5;
- RegXMM fpu_xmm6;
- RegXMM fpu_xmm7;
- int8 fpu_rsrv4[224];
- int32 fpu_reserved1;
-};
-struct ExceptionState32 {
- uint16 trapno;
- uint16 cpu;
- uint32 err;
- uint32 faultvaddr;
-};
-struct Mcontext32 {
- ExceptionState32 es;
- Regs32 ss;
- FloatState32 fs;
-};
-
-struct Ucontext {
- int32 uc_onstack;
- uint32 uc_sigmask;
- StackT uc_stack;
- Ucontext *uc_link;
- uint64 uc_mcsize;
- Mcontext64 *uc_mcontext;
-};
-
-struct KeventT {
- uint64 ident;
- int16 filter;
- uint16 flags;
- uint32 fflags;
- int64 data;
- byte *udata;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_dragonfly_386.go b/src/runtime/defs_dragonfly_386.go
new file mode 100644
index 000000000..1768dbac4
--- /dev/null
+++ b/src/runtime/defs_dragonfly_386.go
@@ -0,0 +1,190 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_dragonfly.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+ _EBUSY = 0x10
+ _EAGAIN = 0x23
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x5
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x2
+ _FPE_INTOVF = 0x1
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = -0x1
+ _EVFILT_WRITE = -0x2
+)
+
+type rtprio struct {
+ _type uint16
+ prio uint16
+}
+
+type lwpparams struct {
+ _type unsafe.Pointer
+ arg *byte
+ stack *byte
+ tid1 *int32
+ tid2 *int32
+}
+
+type sigaltstackt struct {
+ ss_sp *int8
+ ss_size uint32
+ ss_flags int32
+}
+
+type sigset struct {
+ __bits [4]uint32
+}
+
+type stackt struct {
+ ss_sp *int8
+ ss_size uint32
+ ss_flags int32
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ si_pid int32
+ si_uid uint32
+ si_status int32
+ si_addr *byte
+ si_value [4]byte
+ si_band int32
+ __spare__ [7]int32
+}
+
+type mcontext struct {
+ mc_onstack int32
+ mc_gs int32
+ mc_fs int32
+ mc_es int32
+ mc_ds int32
+ mc_edi int32
+ mc_esi int32
+ mc_ebp int32
+ mc_isp int32
+ mc_ebx int32
+ mc_edx int32
+ mc_ecx int32
+ mc_eax int32
+ mc_xflags int32
+ mc_trapno int32
+ mc_err int32
+ mc_eip int32
+ mc_cs int32
+ mc_eflags int32
+ mc_esp int32
+ mc_ss int32
+ mc_len int32
+ mc_fpformat int32
+ mc_ownedfp int32
+ mc_fpregs [128]int32
+ __spare__ [16]int32
+}
+
+type ucontext struct {
+ uc_sigmask sigset
+ uc_mcontext mcontext
+ uc_link *ucontext
+ uc_stack stackt
+ __spare__ [8]int32
+}
+
+type timespec struct {
+ tv_sec int32
+ tv_nsec int32
+}
+
+type timeval struct {
+ tv_sec int32
+ tv_usec int32
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type keventt struct {
+ ident uint32
+ filter int16
+ flags uint16
+ fflags uint32
+ data int32
+ udata *byte
+}
diff --git a/src/runtime/defs_dragonfly_386.h b/src/runtime/defs_dragonfly_386.h
deleted file mode 100644
index f86b9c6b9..000000000
--- a/src/runtime/defs_dragonfly_386.h
+++ /dev/null
@@ -1,198 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_dragonfly.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
- EBUSY = 0x10,
- EAGAIN = 0x23,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x5,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x2,
- FPE_INTOVF = 0x1,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_ERROR = 0x4000,
- EVFILT_READ = -0x1,
- EVFILT_WRITE = -0x2,
-};
-
-typedef struct Rtprio Rtprio;
-typedef struct Lwpparams Lwpparams;
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigset Sigset;
-typedef struct StackT StackT;
-typedef struct Siginfo Siginfo;
-typedef struct Mcontext Mcontext;
-typedef struct Ucontext Ucontext;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct Rtprio {
- uint16 type;
- uint16 prio;
-};
-struct Lwpparams {
- void *func;
- byte *arg;
- byte *stack;
- int32 *tid1;
- int32 *tid2;
-};
-struct SigaltstackT {
- int8 *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-struct Sigset {
- uint32 __bits[4];
-};
-struct StackT {
- int8 *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-
-struct Siginfo {
- int32 si_signo;
- int32 si_errno;
- int32 si_code;
- int32 si_pid;
- uint32 si_uid;
- int32 si_status;
- byte *si_addr;
- byte si_value[4];
- int32 si_band;
- int32 __spare__[7];
-};
-
-struct Mcontext {
- int32 mc_onstack;
- int32 mc_gs;
- int32 mc_fs;
- int32 mc_es;
- int32 mc_ds;
- int32 mc_edi;
- int32 mc_esi;
- int32 mc_ebp;
- int32 mc_isp;
- int32 mc_ebx;
- int32 mc_edx;
- int32 mc_ecx;
- int32 mc_eax;
- int32 mc_xflags;
- int32 mc_trapno;
- int32 mc_err;
- int32 mc_eip;
- int32 mc_cs;
- int32 mc_eflags;
- int32 mc_esp;
- int32 mc_ss;
- int32 mc_len;
- int32 mc_fpformat;
- int32 mc_ownedfp;
- int32 mc_fpregs[128];
- int32 __spare__[16];
-};
-struct Ucontext {
- Sigset uc_sigmask;
- Mcontext uc_mcontext;
- Ucontext *uc_link;
- StackT uc_stack;
- int32 __spare__[8];
-};
-
-struct Timespec {
- int32 tv_sec;
- int32 tv_nsec;
-};
-struct Timeval {
- int32 tv_sec;
- int32 tv_usec;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct KeventT {
- uint32 ident;
- int16 filter;
- uint16 flags;
- uint32 fflags;
- int32 data;
- byte *udata;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_dragonfly_amd64.go b/src/runtime/defs_dragonfly_amd64.go
new file mode 100644
index 000000000..7e9597705
--- /dev/null
+++ b/src/runtime/defs_dragonfly_amd64.go
@@ -0,0 +1,208 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_dragonfly.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+ _EBUSY = 0x10
+ _EAGAIN = 0x23
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x5
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x2
+ _FPE_INTOVF = 0x1
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = -0x1
+ _EVFILT_WRITE = -0x2
+)
+
+type rtprio struct {
+ _type uint16
+ prio uint16
+}
+
+type lwpparams struct {
+ start_func uintptr
+ arg unsafe.Pointer
+ stack uintptr
+ tid1 unsafe.Pointer // *int32
+ tid2 unsafe.Pointer // *int32
+}
+
+type sigaltstackt struct {
+ ss_sp uintptr
+ ss_size uintptr
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type sigset struct {
+ __bits [4]uint32
+}
+
+type stackt struct {
+ ss_sp uintptr
+ ss_size uintptr
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ si_pid int32
+ si_uid uint32
+ si_status int32
+ si_addr uint64
+ si_value [8]byte
+ si_band int64
+ __spare__ [7]int32
+ pad_cgo_0 [4]byte
+}
+
+type mcontext struct {
+ mc_onstack uint64
+ mc_rdi uint64
+ mc_rsi uint64
+ mc_rdx uint64
+ mc_rcx uint64
+ mc_r8 uint64
+ mc_r9 uint64
+ mc_rax uint64
+ mc_rbx uint64
+ mc_rbp uint64
+ mc_r10 uint64
+ mc_r11 uint64
+ mc_r12 uint64
+ mc_r13 uint64
+ mc_r14 uint64
+ mc_r15 uint64
+ mc_xflags uint64
+ mc_trapno uint64
+ mc_addr uint64
+ mc_flags uint64
+ mc_err uint64
+ mc_rip uint64
+ mc_cs uint64
+ mc_rflags uint64
+ mc_rsp uint64
+ mc_ss uint64
+ mc_len uint32
+ mc_fpformat uint32
+ mc_ownedfp uint32
+ mc_reserved uint32
+ mc_unused [8]uint32
+ mc_fpregs [256]int32
+}
+
+type ucontext struct {
+ uc_sigmask sigset
+ pad_cgo_0 [48]byte
+ uc_mcontext mcontext
+ uc_link *ucontext
+ uc_stack stackt
+ __spare__ [8]int32
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+func (ts *timespec) set_sec(x int32) {
+ ts.tv_sec = int64(x)
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type keventt struct {
+ ident uint64
+ filter int16
+ flags uint16
+ fflags uint32
+ data int64
+ udata *byte
+}
diff --git a/src/runtime/defs_dragonfly_amd64.h b/src/runtime/defs_dragonfly_amd64.h
deleted file mode 100644
index 671555241..000000000
--- a/src/runtime/defs_dragonfly_amd64.h
+++ /dev/null
@@ -1,208 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_dragonfly.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
- EBUSY = 0x10,
- EAGAIN = 0x23,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x5,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x2,
- FPE_INTOVF = 0x1,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_ERROR = 0x4000,
- EVFILT_READ = -0x1,
- EVFILT_WRITE = -0x2,
-};
-
-typedef struct Rtprio Rtprio;
-typedef struct Lwpparams Lwpparams;
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigset Sigset;
-typedef struct StackT StackT;
-typedef struct Siginfo Siginfo;
-typedef struct Mcontext Mcontext;
-typedef struct Ucontext Ucontext;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct Rtprio {
- uint16 type;
- uint16 prio;
-};
-struct Lwpparams {
- void *func;
- byte *arg;
- byte *stack;
- int32 *tid1;
- int32 *tid2;
-};
-struct SigaltstackT {
- int8 *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-struct Sigset {
- uint32 __bits[4];
-};
-struct StackT {
- int8 *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-
-struct Siginfo {
- int32 si_signo;
- int32 si_errno;
- int32 si_code;
- int32 si_pid;
- uint32 si_uid;
- int32 si_status;
- byte *si_addr;
- byte si_value[8];
- int64 si_band;
- int32 __spare__[7];
- byte Pad_cgo_0[4];
-};
-
-struct Mcontext {
- int64 mc_onstack;
- int64 mc_rdi;
- int64 mc_rsi;
- int64 mc_rdx;
- int64 mc_rcx;
- int64 mc_r8;
- int64 mc_r9;
- int64 mc_rax;
- int64 mc_rbx;
- int64 mc_rbp;
- int64 mc_r10;
- int64 mc_r11;
- int64 mc_r12;
- int64 mc_r13;
- int64 mc_r14;
- int64 mc_r15;
- int64 mc_xflags;
- int64 mc_trapno;
- int64 mc_addr;
- int64 mc_flags;
- int64 mc_err;
- int64 mc_rip;
- int64 mc_cs;
- int64 mc_rflags;
- int64 mc_rsp;
- int64 mc_ss;
- uint32 mc_len;
- uint32 mc_fpformat;
- uint32 mc_ownedfp;
- uint32 mc_reserved;
- uint32 mc_unused[8];
- int32 mc_fpregs[256];
-};
-struct Ucontext {
- Sigset uc_sigmask;
- byte Pad_cgo_0[48];
- Mcontext uc_mcontext;
- Ucontext *uc_link;
- StackT uc_stack;
- int32 __spare__[8];
-};
-
-struct Timespec {
- int64 tv_sec;
- int64 tv_nsec;
-};
-struct Timeval {
- int64 tv_sec;
- int64 tv_usec;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct KeventT {
- uint64 ident;
- int16 filter;
- uint16 flags;
- uint32 fflags;
- int64 data;
- byte *udata;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_freebsd_386.go b/src/runtime/defs_freebsd_386.go
new file mode 100644
index 000000000..2cb3a8fdb
--- /dev/null
+++ b/src/runtime/defs_freebsd_386.go
@@ -0,0 +1,213 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_freebsd.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x5
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _UMTX_OP_WAIT_UINT = 0xb
+ _UMTX_OP_WAIT_UINT_PRIVATE = 0xf
+ _UMTX_OP_WAKE = 0x3
+ _UMTX_OP_WAKE_PRIVATE = 0x10
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x2
+ _FPE_INTOVF = 0x1
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_RECEIPT = 0x40
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = -0x1
+ _EVFILT_WRITE = -0x2
+)
+
+type rtprio struct {
+ _type uint16
+ prio uint16
+}
+
+type thrparam struct {
+ start_func uintptr
+ arg unsafe.Pointer
+ stack_base uintptr
+ stack_size uintptr
+ tls_base unsafe.Pointer
+ tls_size uintptr
+ child_tid unsafe.Pointer // *int32
+ parent_tid *int32
+ flags int32
+ rtp *rtprio
+ spare [3]uintptr
+}
+
+type sigaltstackt struct {
+ ss_sp *int8
+ ss_size uint32
+ ss_flags int32
+}
+
+type sigset struct {
+ __bits [4]uint32
+}
+
+type stackt struct {
+ ss_sp uintptr
+ ss_size uintptr
+ ss_flags int32
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ si_pid int32
+ si_uid uint32
+ si_status int32
+ si_addr uintptr
+ si_value [4]byte
+ _reason [32]byte
+}
+
+type mcontext struct {
+ mc_onstack uint32
+ mc_gs uint32
+ mc_fs uint32
+ mc_es uint32
+ mc_ds uint32
+ mc_edi uint32
+ mc_esi uint32
+ mc_ebp uint32
+ mc_isp uint32
+ mc_ebx uint32
+ mc_edx uint32
+ mc_ecx uint32
+ mc_eax uint32
+ mc_trapno uint32
+ mc_err uint32
+ mc_eip uint32
+ mc_cs uint32
+ mc_eflags uint32
+ mc_esp uint32
+ mc_ss uint32
+ mc_len uint32
+ mc_fpformat uint32
+ mc_ownedfp uint32
+ mc_flags uint32
+ mc_fpstate [128]uint32
+ mc_fsbase uint32
+ mc_gsbase uint32
+ mc_xfpustate uint32
+ mc_xfpustate_len uint32
+ mc_spare2 [4]uint32
+}
+
+type ucontext struct {
+ uc_sigmask sigset
+ uc_mcontext mcontext
+ uc_link *ucontext
+ uc_stack stackt
+ uc_flags int32
+ __spare__ [4]int32
+ pad_cgo_0 [12]byte
+}
+
+type timespec struct {
+ tv_sec int32
+ tv_nsec int32
+}
+
+func (ts *timespec) set_sec(x int32) {
+ ts.tv_sec = x
+}
+
+type timeval struct {
+ tv_sec int32
+ tv_usec int32
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = x
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type keventt struct {
+ ident uint32
+ filter int16
+ flags uint16
+ fflags uint32
+ data int32
+ udata *byte
+}
diff --git a/src/runtime/defs_freebsd_386.h b/src/runtime/defs_freebsd_386.h
deleted file mode 100644
index 156dccba4..000000000
--- a/src/runtime/defs_freebsd_386.h
+++ /dev/null
@@ -1,213 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_freebsd.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x5,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
-
- UMTX_OP_WAIT_UINT = 0xb,
- UMTX_OP_WAIT_UINT_PRIVATE = 0xf,
- UMTX_OP_WAKE = 0x3,
- UMTX_OP_WAKE_PRIVATE = 0x10,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x2,
- FPE_INTOVF = 0x1,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_RECEIPT = 0x40,
- EV_ERROR = 0x4000,
- EVFILT_READ = -0x1,
- EVFILT_WRITE = -0x2,
-};
-
-typedef struct Rtprio Rtprio;
-typedef struct ThrParam ThrParam;
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigset Sigset;
-typedef struct StackT StackT;
-typedef struct Siginfo Siginfo;
-typedef struct Mcontext Mcontext;
-typedef struct Ucontext Ucontext;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct Rtprio {
- uint16 type;
- uint16 prio;
-};
-struct ThrParam {
- void *start_func;
- byte *arg;
- int8 *stack_base;
- uint32 stack_size;
- int8 *tls_base;
- uint32 tls_size;
- int32 *child_tid;
- int32 *parent_tid;
- int32 flags;
- Rtprio *rtp;
- void *spare[3];
-};
-struct SigaltstackT {
- int8 *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-struct Sigset {
- uint32 __bits[4];
-};
-struct StackT {
- int8 *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-
-struct Siginfo {
- int32 si_signo;
- int32 si_errno;
- int32 si_code;
- int32 si_pid;
- uint32 si_uid;
- int32 si_status;
- byte *si_addr;
- byte si_value[4];
- byte _reason[32];
-};
-
-struct Mcontext {
- int32 mc_onstack;
- int32 mc_gs;
- int32 mc_fs;
- int32 mc_es;
- int32 mc_ds;
- int32 mc_edi;
- int32 mc_esi;
- int32 mc_ebp;
- int32 mc_isp;
- int32 mc_ebx;
- int32 mc_edx;
- int32 mc_ecx;
- int32 mc_eax;
- int32 mc_trapno;
- int32 mc_err;
- int32 mc_eip;
- int32 mc_cs;
- int32 mc_eflags;
- int32 mc_esp;
- int32 mc_ss;
- int32 mc_len;
- int32 mc_fpformat;
- int32 mc_ownedfp;
- int32 mc_flags;
- int32 mc_fpstate[128];
- int32 mc_fsbase;
- int32 mc_gsbase;
- int32 mc_xfpustate;
- int32 mc_xfpustate_len;
- int32 mc_spare2[4];
-};
-struct Ucontext {
- Sigset uc_sigmask;
- Mcontext uc_mcontext;
- Ucontext *uc_link;
- StackT uc_stack;
- int32 uc_flags;
- int32 __spare__[4];
- byte Pad_cgo_0[12];
-};
-
-struct Timespec {
- int32 tv_sec;
- int32 tv_nsec;
-};
-struct Timeval {
- int32 tv_sec;
- int32 tv_usec;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct KeventT {
- uint32 ident;
- int16 filter;
- uint16 flags;
- uint32 fflags;
- int32 data;
- byte *udata;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_freebsd_amd64.go b/src/runtime/defs_freebsd_amd64.go
new file mode 100644
index 000000000..a2646fb24
--- /dev/null
+++ b/src/runtime/defs_freebsd_amd64.go
@@ -0,0 +1,224 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_freebsd.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x5
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _UMTX_OP_WAIT_UINT = 0xb
+ _UMTX_OP_WAIT_UINT_PRIVATE = 0xf
+ _UMTX_OP_WAKE = 0x3
+ _UMTX_OP_WAKE_PRIVATE = 0x10
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x2
+ _FPE_INTOVF = 0x1
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_RECEIPT = 0x40
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = -0x1
+ _EVFILT_WRITE = -0x2
+)
+
+type rtprio struct {
+ _type uint16
+ prio uint16
+}
+
+type thrparam struct {
+ start_func uintptr
+ arg unsafe.Pointer
+ stack_base uintptr
+ stack_size uintptr
+ tls_base unsafe.Pointer
+ tls_size uintptr
+ child_tid unsafe.Pointer // *int64
+ parent_tid *int64
+ flags int32
+ pad_cgo_0 [4]byte
+ rtp *rtprio
+ spare [3]uintptr
+}
+
+type sigaltstackt struct {
+ ss_sp *int8
+ ss_size uint64
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type sigset struct {
+ __bits [4]uint32
+}
+
+type stackt struct {
+ ss_sp uintptr
+ ss_size uintptr
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ si_pid int32
+ si_uid uint32
+ si_status int32
+ si_addr uint64
+ si_value [8]byte
+ _reason [40]byte
+}
+
+type mcontext struct {
+ mc_onstack uint64
+ mc_rdi uint64
+ mc_rsi uint64
+ mc_rdx uint64
+ mc_rcx uint64
+ mc_r8 uint64
+ mc_r9 uint64
+ mc_rax uint64
+ mc_rbx uint64
+ mc_rbp uint64
+ mc_r10 uint64
+ mc_r11 uint64
+ mc_r12 uint64
+ mc_r13 uint64
+ mc_r14 uint64
+ mc_r15 uint64
+ mc_trapno uint32
+ mc_fs uint16
+ mc_gs uint16
+ mc_addr uint64
+ mc_flags uint32
+ mc_es uint16
+ mc_ds uint16
+ mc_err uint64
+ mc_rip uint64
+ mc_cs uint64
+ mc_rflags uint64
+ mc_rsp uint64
+ mc_ss uint64
+ mc_len uint64
+ mc_fpformat uint64
+ mc_ownedfp uint64
+ mc_fpstate [64]uint64
+ mc_fsbase uint64
+ mc_gsbase uint64
+ mc_xfpustate uint64
+ mc_xfpustate_len uint64
+ mc_spare [4]uint64
+}
+
+type ucontext struct {
+ uc_sigmask sigset
+ uc_mcontext mcontext
+ uc_link *ucontext
+ uc_stack stackt
+ uc_flags int32
+ __spare__ [4]int32
+ pad_cgo_0 [12]byte
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+func (ts *timespec) set_sec(x int32) {
+ ts.tv_sec = int64(x)
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type keventt struct {
+ ident uint64
+ filter int16
+ flags uint16
+ fflags uint32
+ data int64
+ udata *byte
+}
diff --git a/src/runtime/defs_freebsd_amd64.h b/src/runtime/defs_freebsd_amd64.h
deleted file mode 100644
index 4ba8956a2..000000000
--- a/src/runtime/defs_freebsd_amd64.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_freebsd.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x5,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
-
- UMTX_OP_WAIT_UINT = 0xb,
- UMTX_OP_WAIT_UINT_PRIVATE = 0xf,
- UMTX_OP_WAKE = 0x3,
- UMTX_OP_WAKE_PRIVATE = 0x10,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x2,
- FPE_INTOVF = 0x1,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_RECEIPT = 0x40,
- EV_ERROR = 0x4000,
- EVFILT_READ = -0x1,
- EVFILT_WRITE = -0x2,
-};
-
-typedef struct Rtprio Rtprio;
-typedef struct ThrParam ThrParam;
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigset Sigset;
-typedef struct StackT StackT;
-typedef struct Siginfo Siginfo;
-typedef struct Mcontext Mcontext;
-typedef struct Ucontext Ucontext;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct Rtprio {
- uint16 type;
- uint16 prio;
-};
-struct ThrParam {
- void *start_func;
- byte *arg;
- int8 *stack_base;
- uint64 stack_size;
- int8 *tls_base;
- uint64 tls_size;
- int64 *child_tid;
- int64 *parent_tid;
- int32 flags;
- byte Pad_cgo_0[4];
- Rtprio *rtp;
- void *spare[3];
-};
-struct SigaltstackT {
- int8 *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-struct Sigset {
- uint32 __bits[4];
-};
-struct StackT {
- int8 *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-
-struct Siginfo {
- int32 si_signo;
- int32 si_errno;
- int32 si_code;
- int32 si_pid;
- uint32 si_uid;
- int32 si_status;
- byte *si_addr;
- byte si_value[8];
- byte _reason[40];
-};
-
-struct Mcontext {
- int64 mc_onstack;
- int64 mc_rdi;
- int64 mc_rsi;
- int64 mc_rdx;
- int64 mc_rcx;
- int64 mc_r8;
- int64 mc_r9;
- int64 mc_rax;
- int64 mc_rbx;
- int64 mc_rbp;
- int64 mc_r10;
- int64 mc_r11;
- int64 mc_r12;
- int64 mc_r13;
- int64 mc_r14;
- int64 mc_r15;
- uint32 mc_trapno;
- uint16 mc_fs;
- uint16 mc_gs;
- int64 mc_addr;
- uint32 mc_flags;
- uint16 mc_es;
- uint16 mc_ds;
- int64 mc_err;
- int64 mc_rip;
- int64 mc_cs;
- int64 mc_rflags;
- int64 mc_rsp;
- int64 mc_ss;
- int64 mc_len;
- int64 mc_fpformat;
- int64 mc_ownedfp;
- int64 mc_fpstate[64];
- int64 mc_fsbase;
- int64 mc_gsbase;
- int64 mc_xfpustate;
- int64 mc_xfpustate_len;
- int64 mc_spare[4];
-};
-struct Ucontext {
- Sigset uc_sigmask;
- Mcontext uc_mcontext;
- Ucontext *uc_link;
- StackT uc_stack;
- int32 uc_flags;
- int32 __spare__[4];
- byte Pad_cgo_0[12];
-};
-
-struct Timespec {
- int64 tv_sec;
- int64 tv_nsec;
-};
-struct Timeval {
- int64 tv_sec;
- int64 tv_usec;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct KeventT {
- uint64 ident;
- int16 filter;
- uint16 flags;
- uint32 fflags;
- int64 data;
- byte *udata;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_freebsd_arm.go b/src/runtime/defs_freebsd_arm.go
new file mode 100644
index 000000000..e86ce45b4
--- /dev/null
+++ b/src/runtime/defs_freebsd_arm.go
@@ -0,0 +1,186 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_freebsd.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x5
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _UMTX_OP_WAIT_UINT = 0xb
+ _UMTX_OP_WAIT_UINT_PRIVATE = 0xf
+ _UMTX_OP_WAKE = 0x3
+ _UMTX_OP_WAKE_PRIVATE = 0x10
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x2
+ _FPE_INTOVF = 0x1
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_RECEIPT = 0x40
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = -0x1
+ _EVFILT_WRITE = -0x2
+)
+
+type rtprio struct {
+ _type uint16
+ prio uint16
+}
+
+type thrparam struct {
+ start_func uintptr
+ arg unsafe.Pointer
+ stack_base uintptr
+ stack_size uintptr
+ tls_base unsafe.Pointer
+ tls_size uintptr
+ child_tid unsafe.Pointer // *int32
+ parent_tid *int32
+ flags int32
+ rtp *rtprio
+ spare [3]uintptr
+}
+
+type sigaltstackt struct {
+ ss_sp *uint8
+ ss_size uint32
+ ss_flags int32
+}
+
+type sigset struct {
+ __bits [4]uint32
+}
+
+type stackt struct {
+ ss_sp uintptr
+ ss_size uintptr
+ ss_flags int32
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ si_pid int32
+ si_uid uint32
+ si_status int32
+ si_addr uintptr
+ si_value [4]byte
+ _reason [32]byte
+}
+
+type mcontext struct {
+ __gregs [17]uint32
+ __fpu [140]byte
+}
+
+type ucontext struct {
+ uc_sigmask sigset
+ uc_mcontext mcontext
+ uc_link *ucontext
+ uc_stack stackt
+ uc_flags int32
+ __spare__ [4]int32
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int32
+ pad_cgo_0 [4]byte
+}
+
+func (ts *timespec) set_sec(x int32) {
+ ts.tv_sec = int64(x)
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int32
+ pad_cgo_0 [4]byte
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = x
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type keventt struct {
+ ident uint32
+ filter int16
+ flags uint16
+ fflags uint32
+ data int32
+ udata *byte
+}
diff --git a/src/runtime/defs_freebsd_arm.h b/src/runtime/defs_freebsd_arm.h
deleted file mode 100644
index 17deba68d..000000000
--- a/src/runtime/defs_freebsd_arm.h
+++ /dev/null
@@ -1,186 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_freebsd.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x5,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
-
- UMTX_OP_WAIT_UINT = 0xb,
- UMTX_OP_WAIT_UINT_PRIVATE = 0xf,
- UMTX_OP_WAKE = 0x3,
- UMTX_OP_WAKE_PRIVATE = 0x10,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x2,
- FPE_INTOVF = 0x1,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_RECEIPT = 0x40,
- EV_ERROR = 0x4000,
- EVFILT_READ = -0x1,
- EVFILT_WRITE = -0x2,
-};
-
-typedef struct Rtprio Rtprio;
-typedef struct ThrParam ThrParam;
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigset Sigset;
-typedef struct StackT StackT;
-typedef struct Siginfo Siginfo;
-typedef struct Mcontext Mcontext;
-typedef struct Ucontext Ucontext;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct Rtprio {
- uint16 type;
- uint16 prio;
-};
-struct ThrParam {
- void *start_func;
- byte *arg;
- uint8 *stack_base;
- uint32 stack_size;
- uint8 *tls_base;
- uint32 tls_size;
- int32 *child_tid;
- int32 *parent_tid;
- int32 flags;
- Rtprio *rtp;
- void *spare[3];
-};
-struct SigaltstackT {
- uint8 *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-struct Sigset {
- uint32 __bits[4];
-};
-struct StackT {
- uint8 *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-
-struct Siginfo {
- int32 si_signo;
- int32 si_errno;
- int32 si_code;
- int32 si_pid;
- uint32 si_uid;
- int32 si_status;
- byte *si_addr;
- byte si_value[4];
- byte _reason[32];
-};
-
-struct Mcontext {
- uint32 __gregs[17];
- byte __fpu[140];
-};
-struct Ucontext {
- Sigset uc_sigmask;
- Mcontext uc_mcontext;
- Ucontext *uc_link;
- StackT uc_stack;
- int32 uc_flags;
- int32 __spare__[4];
-};
-
-struct Timespec {
- int64 tv_sec;
- int32 tv_nsec;
- byte Pad_cgo_0[4];
-};
-struct Timeval {
- int64 tv_sec;
- int32 tv_usec;
- byte Pad_cgo_0[4];
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct KeventT {
- uint32 ident;
- int16 filter;
- uint16 flags;
- uint32 fflags;
- int32 data;
- byte *udata;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_linux_386.go b/src/runtime/defs_linux_386.go
new file mode 100644
index 000000000..a468f60d9
--- /dev/null
+++ b/src/runtime/defs_linux_386.go
@@ -0,0 +1,217 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs2_linux.go
+
+package runtime
+
+const (
+ _EINTR = 0x4
+ _EAGAIN = 0xb
+ _ENOMEM = 0xc
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x20
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_DONTNEED = 0x4
+
+ _SA_RESTART = 0x10000000
+ _SA_ONSTACK = 0x8000000
+ _SA_RESTORER = 0x4000000
+ _SA_SIGINFO = 0x4
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGBUS = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGUSR1 = 0xa
+ _SIGSEGV = 0xb
+ _SIGUSR2 = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGSTKFLT = 0x10
+ _SIGCHLD = 0x11
+ _SIGCONT = 0x12
+ _SIGSTOP = 0x13
+ _SIGTSTP = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGURG = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGIO = 0x1d
+ _SIGPWR = 0x1e
+ _SIGSYS = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _O_RDONLY = 0x0
+ _O_CLOEXEC = 0x80000
+
+ _EPOLLIN = 0x1
+ _EPOLLOUT = 0x4
+ _EPOLLERR = 0x8
+ _EPOLLHUP = 0x10
+ _EPOLLRDHUP = 0x2000
+ _EPOLLET = 0x80000000
+ _EPOLL_CLOEXEC = 0x80000
+ _EPOLL_CTL_ADD = 0x1
+ _EPOLL_CTL_DEL = 0x2
+ _EPOLL_CTL_MOD = 0x3
+)
+
+type fpreg struct {
+ significand [4]uint16
+ exponent uint16
+}
+
+type fpxreg struct {
+ significand [4]uint16
+ exponent uint16
+ padding [3]uint16
+}
+
+type xmmreg struct {
+ element [4]uint32
+}
+
+type fpstate struct {
+ cw uint32
+ sw uint32
+ tag uint32
+ ipoff uint32
+ cssel uint32
+ dataoff uint32
+ datasel uint32
+ _st [8]fpreg
+ status uint16
+ magic uint16
+ _fxsr_env [6]uint32
+ mxcsr uint32
+ reserved uint32
+ _fxsr_st [8]fpxreg
+ _xmm [8]xmmreg
+ padding1 [44]uint32
+ anon0 [48]byte
+}
+
+type timespec struct {
+ tv_sec int32
+ tv_nsec int32
+}
+
+func (ts *timespec) set_sec(x int32) {
+ ts.tv_sec = x
+}
+
+func (ts *timespec) set_nsec(x int32) {
+ ts.tv_nsec = x
+}
+
+type timeval struct {
+ tv_sec int32
+ tv_usec int32
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = x
+}
+
+type sigactiont struct {
+ sa_handler uintptr
+ sa_flags uint32
+ sa_restorer uintptr
+ sa_mask uint64
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ _sifields [116]byte
+}
+
+type sigaltstackt struct {
+ ss_sp *byte
+ ss_flags int32
+ ss_size uintptr
+}
+
+type sigcontext struct {
+ gs uint16
+ __gsh uint16
+ fs uint16
+ __fsh uint16
+ es uint16
+ __esh uint16
+ ds uint16
+ __dsh uint16
+ edi uint32
+ esi uint32
+ ebp uint32
+ esp uint32
+ ebx uint32
+ edx uint32
+ ecx uint32
+ eax uint32
+ trapno uint32
+ err uint32
+ eip uint32
+ cs uint16
+ __csh uint16
+ eflags uint32
+ esp_at_signal uint32
+ ss uint16
+ __ssh uint16
+ fpstate *fpstate
+ oldmask uint32
+ cr2 uint32
+}
+
+type ucontext struct {
+ uc_flags uint32
+ uc_link *ucontext
+ uc_stack sigaltstackt
+ uc_mcontext sigcontext
+ uc_sigmask uint32
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type epollevent struct {
+ events uint32
+ data [8]byte // to match amd64
+}
diff --git a/src/runtime/defs_linux_386.h b/src/runtime/defs_linux_386.h
deleted file mode 100644
index 24a05d862..000000000
--- a/src/runtime/defs_linux_386.h
+++ /dev/null
@@ -1,211 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs2_linux.go
-
-
-enum {
- EINTR = 0x4,
- EAGAIN = 0xb,
- ENOMEM = 0xc,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x20,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_DONTNEED = 0x4,
-
- SA_RESTART = 0x10000000,
- SA_ONSTACK = 0x8000000,
- SA_RESTORER = 0x4000000,
- SA_SIGINFO = 0x4,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGBUS = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGUSR1 = 0xa,
- SIGSEGV = 0xb,
- SIGUSR2 = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGSTKFLT = 0x10,
- SIGCHLD = 0x11,
- SIGCONT = 0x12,
- SIGSTOP = 0x13,
- SIGTSTP = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGURG = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGIO = 0x1d,
- SIGPWR = 0x1e,
- SIGSYS = 0x1f,
-
- FPE_INTDIV = 0x1,
- FPE_INTOVF = 0x2,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- O_RDONLY = 0x0,
- O_CLOEXEC = 0x80000,
-
- EPOLLIN = 0x1,
- EPOLLOUT = 0x4,
- EPOLLERR = 0x8,
- EPOLLHUP = 0x10,
- EPOLLRDHUP = 0x2000,
- EPOLLET = -0x80000000,
- EPOLL_CLOEXEC = 0x80000,
- EPOLL_CTL_ADD = 0x1,
- EPOLL_CTL_DEL = 0x2,
- EPOLL_CTL_MOD = 0x3,
-};
-
-typedef struct Fpreg Fpreg;
-typedef struct Fpxreg Fpxreg;
-typedef struct Xmmreg Xmmreg;
-typedef struct Fpstate Fpstate;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct SigactionT SigactionT;
-typedef struct Siginfo Siginfo;
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigcontext Sigcontext;
-typedef struct Ucontext Ucontext;
-typedef struct Itimerval Itimerval;
-typedef struct EpollEvent EpollEvent;
-
-#pragma pack on
-
-struct Fpreg {
- uint16 significand[4];
- uint16 exponent;
-};
-struct Fpxreg {
- uint16 significand[4];
- uint16 exponent;
- uint16 padding[3];
-};
-struct Xmmreg {
- uint32 element[4];
-};
-struct Fpstate {
- uint32 cw;
- uint32 sw;
- uint32 tag;
- uint32 ipoff;
- uint32 cssel;
- uint32 dataoff;
- uint32 datasel;
- Fpreg _st[8];
- uint16 status;
- uint16 magic;
- uint32 _fxsr_env[6];
- uint32 mxcsr;
- uint32 reserved;
- Fpxreg _fxsr_st[8];
- Xmmreg _xmm[8];
- uint32 padding1[44];
- byte anon0[48];
-};
-struct Timespec {
- int32 tv_sec;
- int32 tv_nsec;
-};
-struct Timeval {
- int32 tv_sec;
- int32 tv_usec;
-};
-struct SigactionT {
- void *k_sa_handler;
- uint32 sa_flags;
- void *sa_restorer;
- uint64 sa_mask;
-};
-struct Siginfo {
- int32 si_signo;
- int32 si_errno;
- int32 si_code;
- byte _sifields[116];
-};
-struct SigaltstackT {
- byte *ss_sp;
- int32 ss_flags;
- uint32 ss_size;
-};
-struct Sigcontext {
- uint16 gs;
- uint16 __gsh;
- uint16 fs;
- uint16 __fsh;
- uint16 es;
- uint16 __esh;
- uint16 ds;
- uint16 __dsh;
- uint32 edi;
- uint32 esi;
- uint32 ebp;
- uint32 esp;
- uint32 ebx;
- uint32 edx;
- uint32 ecx;
- uint32 eax;
- uint32 trapno;
- uint32 err;
- uint32 eip;
- uint16 cs;
- uint16 __csh;
- uint32 eflags;
- uint32 esp_at_signal;
- uint16 ss;
- uint16 __ssh;
- Fpstate *fpstate;
- uint32 oldmask;
- uint32 cr2;
-};
-struct Ucontext {
- uint32 uc_flags;
- Ucontext *uc_link;
- SigaltstackT uc_stack;
- Sigcontext uc_mcontext;
- uint32 uc_sigmask;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-struct EpollEvent {
- uint32 events;
- byte data[8]; // to match amd64
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_linux_amd64.go b/src/runtime/defs_linux_amd64.go
new file mode 100644
index 000000000..7a1caea74
--- /dev/null
+++ b/src/runtime/defs_linux_amd64.go
@@ -0,0 +1,253 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_linux.go defs1_linux.go
+
+package runtime
+
+const (
+ _EINTR = 0x4
+ _EAGAIN = 0xb
+ _ENOMEM = 0xc
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x20
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_DONTNEED = 0x4
+
+ _SA_RESTART = 0x10000000
+ _SA_ONSTACK = 0x8000000
+ _SA_RESTORER = 0x4000000
+ _SA_SIGINFO = 0x4
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGBUS = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGUSR1 = 0xa
+ _SIGSEGV = 0xb
+ _SIGUSR2 = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGSTKFLT = 0x10
+ _SIGCHLD = 0x11
+ _SIGCONT = 0x12
+ _SIGSTOP = 0x13
+ _SIGTSTP = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGURG = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGIO = 0x1d
+ _SIGPWR = 0x1e
+ _SIGSYS = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EPOLLIN = 0x1
+ _EPOLLOUT = 0x4
+ _EPOLLERR = 0x8
+ _EPOLLHUP = 0x10
+ _EPOLLRDHUP = 0x2000
+ _EPOLLET = 0x80000000
+ _EPOLL_CLOEXEC = 0x80000
+ _EPOLL_CTL_ADD = 0x1
+ _EPOLL_CTL_DEL = 0x2
+ _EPOLL_CTL_MOD = 0x3
+)
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+func (ts *timespec) set_sec(x int32) {
+ ts.tv_sec = int64(x)
+}
+
+func (ts *timespec) set_nsec(x int32) {
+ ts.tv_nsec = int64(x)
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type sigactiont struct {
+ sa_handler uintptr
+ sa_flags uint64
+ sa_restorer uintptr
+ sa_mask uint64
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ pad_cgo_0 [4]byte
+ _sifields [112]byte
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type epollevent struct {
+ events uint32
+ data [8]byte // unaligned uintptr
+}
+
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_linux.go defs1_linux.go
+
+const (
+ _O_RDONLY = 0x0
+ _O_CLOEXEC = 0x80000
+)
+
+type usigset struct {
+ __val [16]uint64
+}
+
+type fpxreg struct {
+ significand [4]uint16
+ exponent uint16
+ padding [3]uint16
+}
+
+type xmmreg struct {
+ element [4]uint32
+}
+
+type fpstate struct {
+ cwd uint16
+ swd uint16
+ ftw uint16
+ fop uint16
+ rip uint64
+ rdp uint64
+ mxcsr uint32
+ mxcr_mask uint32
+ _st [8]fpxreg
+ _xmm [16]xmmreg
+ padding [24]uint32
+}
+
+type fpxreg1 struct {
+ significand [4]uint16
+ exponent uint16
+ padding [3]uint16
+}
+
+type xmmreg1 struct {
+ element [4]uint32
+}
+
+type fpstate1 struct {
+ cwd uint16
+ swd uint16
+ ftw uint16
+ fop uint16
+ rip uint64
+ rdp uint64
+ mxcsr uint32
+ mxcr_mask uint32
+ _st [8]fpxreg1
+ _xmm [16]xmmreg1
+ padding [24]uint32
+}
+
+type fpreg1 struct {
+ significand [4]uint16
+ exponent uint16
+}
+
+type sigaltstackt struct {
+ ss_sp *byte
+ ss_flags int32
+ pad_cgo_0 [4]byte
+ ss_size uintptr
+}
+
+type mcontext struct {
+ gregs [23]uint64
+ fpregs *fpstate
+ __reserved1 [8]uint64
+}
+
+type ucontext struct {
+ uc_flags uint64
+ uc_link *ucontext
+ uc_stack sigaltstackt
+ uc_mcontext mcontext
+ uc_sigmask usigset
+ __fpregs_mem fpstate
+}
+
+type sigcontext struct {
+ r8 uint64
+ r9 uint64
+ r10 uint64
+ r11 uint64
+ r12 uint64
+ r13 uint64
+ r14 uint64
+ r15 uint64
+ rdi uint64
+ rsi uint64
+ rbp uint64
+ rbx uint64
+ rdx uint64
+ rax uint64
+ rcx uint64
+ rsp uint64
+ rip uint64
+ eflags uint64
+ cs uint16
+ gs uint16
+ fs uint16
+ __pad0 uint16
+ err uint64
+ trapno uint64
+ oldmask uint64
+ cr2 uint64
+ fpstate *fpstate1
+ __reserved1 [8]uint64
+}
diff --git a/src/runtime/defs_linux_amd64.h b/src/runtime/defs_linux_amd64.h
deleted file mode 100644
index 14616dffe..000000000
--- a/src/runtime/defs_linux_amd64.h
+++ /dev/null
@@ -1,254 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_linux.go defs1_linux.go
-
-
-enum {
- EINTR = 0x4,
- EAGAIN = 0xb,
- ENOMEM = 0xc,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x20,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_DONTNEED = 0x4,
-
- SA_RESTART = 0x10000000,
- SA_ONSTACK = 0x8000000,
- SA_RESTORER = 0x4000000,
- SA_SIGINFO = 0x4,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGBUS = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGUSR1 = 0xa,
- SIGSEGV = 0xb,
- SIGUSR2 = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGSTKFLT = 0x10,
- SIGCHLD = 0x11,
- SIGCONT = 0x12,
- SIGSTOP = 0x13,
- SIGTSTP = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGURG = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGIO = 0x1d,
- SIGPWR = 0x1e,
- SIGSYS = 0x1f,
-
- FPE_INTDIV = 0x1,
- FPE_INTOVF = 0x2,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EPOLLIN = 0x1,
- EPOLLOUT = 0x4,
- EPOLLERR = 0x8,
- EPOLLHUP = 0x10,
- EPOLLRDHUP = 0x2000,
- EPOLLET = -0x80000000,
- EPOLL_CLOEXEC = 0x80000,
- EPOLL_CTL_ADD = 0x1,
- EPOLL_CTL_DEL = 0x2,
- EPOLL_CTL_MOD = 0x3,
-};
-
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct SigactionT SigactionT;
-typedef struct Siginfo Siginfo;
-typedef struct Itimerval Itimerval;
-typedef struct EpollEvent EpollEvent;
-
-#pragma pack on
-
-struct Timespec {
- int64 tv_sec;
- int64 tv_nsec;
-};
-struct Timeval {
- int64 tv_sec;
- int64 tv_usec;
-};
-struct SigactionT {
- void *sa_handler;
- uint64 sa_flags;
- void *sa_restorer;
- uint64 sa_mask;
-};
-struct Siginfo {
- int32 si_signo;
- int32 si_errno;
- int32 si_code;
- byte Pad_cgo_0[4];
- byte _sifields[112];
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-struct EpollEvent {
- uint32 events;
- byte data[8]; // unaligned uintptr
-};
-
-
-#pragma pack off
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_linux.go defs1_linux.go
-
-
-enum {
- O_RDONLY = 0x0,
- O_CLOEXEC = 0x80000,
-};
-
-typedef struct Usigset Usigset;
-typedef struct Fpxreg Fpxreg;
-typedef struct Xmmreg Xmmreg;
-typedef struct Fpstate Fpstate;
-typedef struct Fpxreg1 Fpxreg1;
-typedef struct Xmmreg1 Xmmreg1;
-typedef struct Fpstate1 Fpstate1;
-typedef struct Fpreg1 Fpreg1;
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Mcontext Mcontext;
-typedef struct Ucontext Ucontext;
-typedef struct Sigcontext Sigcontext;
-
-#pragma pack on
-
-struct Usigset {
- uint64 __val[16];
-};
-struct Fpxreg {
- uint16 significand[4];
- uint16 exponent;
- uint16 padding[3];
-};
-struct Xmmreg {
- uint32 element[4];
-};
-struct Fpstate {
- uint16 cwd;
- uint16 swd;
- uint16 ftw;
- uint16 fop;
- uint64 rip;
- uint64 rdp;
- uint32 mxcsr;
- uint32 mxcr_mask;
- Fpxreg _st[8];
- Xmmreg _xmm[16];
- uint32 padding[24];
-};
-struct Fpxreg1 {
- uint16 significand[4];
- uint16 exponent;
- uint16 padding[3];
-};
-struct Xmmreg1 {
- uint32 element[4];
-};
-struct Fpstate1 {
- uint16 cwd;
- uint16 swd;
- uint16 ftw;
- uint16 fop;
- uint64 rip;
- uint64 rdp;
- uint32 mxcsr;
- uint32 mxcr_mask;
- Fpxreg1 _st[8];
- Xmmreg1 _xmm[16];
- uint32 padding[24];
-};
-struct Fpreg1 {
- uint16 significand[4];
- uint16 exponent;
-};
-struct SigaltstackT {
- byte *ss_sp;
- int32 ss_flags;
- byte Pad_cgo_0[4];
- uint64 ss_size;
-};
-struct Mcontext {
- int64 gregs[23];
- Fpstate *fpregs;
- uint64 __reserved1[8];
-};
-struct Ucontext {
- uint64 uc_flags;
- Ucontext *uc_link;
- SigaltstackT uc_stack;
- Mcontext uc_mcontext;
- Usigset uc_sigmask;
- Fpstate __fpregs_mem;
-};
-struct Sigcontext {
- uint64 r8;
- uint64 r9;
- uint64 r10;
- uint64 r11;
- uint64 r12;
- uint64 r13;
- uint64 r14;
- uint64 r15;
- uint64 rdi;
- uint64 rsi;
- uint64 rbp;
- uint64 rbx;
- uint64 rdx;
- uint64 rax;
- uint64 rcx;
- uint64 rsp;
- uint64 rip;
- uint64 eflags;
- uint16 cs;
- uint16 gs;
- uint16 fs;
- uint16 __pad0;
- uint64 err;
- uint64 trapno;
- uint64 oldmask;
- uint64 cr2;
- Fpstate1 *fpstate;
- uint64 __reserved1[8];
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_linux_arm.go b/src/runtime/defs_linux_arm.go
new file mode 100644
index 000000000..7f8300293
--- /dev/null
+++ b/src/runtime/defs_linux_arm.go
@@ -0,0 +1,167 @@
+package runtime
+
+// Constants
+const (
+ _EINTR = 0x4
+ _ENOMEM = 0xc
+ _EAGAIN = 0xb
+
+ _PROT_NONE = 0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+ _MAP_ANON = 0x20
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+ _MADV_DONTNEED = 0x4
+ _SA_RESTART = 0x10000000
+ _SA_ONSTACK = 0x8000000
+ _SA_RESTORER = 0 // unused on ARM
+ _SA_SIGINFO = 0x4
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGBUS = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGUSR1 = 0xa
+ _SIGSEGV = 0xb
+ _SIGUSR2 = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGSTKFLT = 0x10
+ _SIGCHLD = 0x11
+ _SIGCONT = 0x12
+ _SIGSTOP = 0x13
+ _SIGTSTP = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGURG = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGIO = 0x1d
+ _SIGPWR = 0x1e
+ _SIGSYS = 0x1f
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+ _ITIMER_REAL = 0
+ _ITIMER_PROF = 0x2
+ _ITIMER_VIRTUAL = 0x1
+ _O_RDONLY = 0
+ _O_CLOEXEC = 02000000
+
+ _EPOLLIN = 0x1
+ _EPOLLOUT = 0x4
+ _EPOLLERR = 0x8
+ _EPOLLHUP = 0x10
+ _EPOLLRDHUP = 0x2000
+ _EPOLLET = 0x80000000
+ _EPOLL_CLOEXEC = 0x80000
+ _EPOLL_CTL_ADD = 0x1
+ _EPOLL_CTL_DEL = 0x2
+ _EPOLL_CTL_MOD = 0x3
+)
+
+type timespec struct {
+ tv_sec int32
+ tv_nsec int32
+}
+
+func (ts *timespec) set_sec(x int32) {
+ ts.tv_sec = x
+}
+
+func (ts *timespec) set_nsec(x int32) {
+ ts.tv_nsec = x
+}
+
+type sigaltstackt struct {
+ ss_sp *byte
+ ss_flags int32
+ ss_size uintptr
+}
+
+type sigcontext struct {
+ trap_no uint32
+ error_code uint32
+ oldmask uint32
+ r0 uint32
+ r1 uint32
+ r2 uint32
+ r3 uint32
+ r4 uint32
+ r5 uint32
+ r6 uint32
+ r7 uint32
+ r8 uint32
+ r9 uint32
+ r10 uint32
+ fp uint32
+ ip uint32
+ sp uint32
+ lr uint32
+ pc uint32
+ cpsr uint32
+ fault_address uint32
+}
+
+type ucontext struct {
+ uc_flags uint32
+ uc_link *ucontext
+ uc_stack sigaltstackt
+ uc_mcontext sigcontext
+ uc_sigmask uint32
+ __unused [31]int32
+ uc_regspace [128]uint32
+}
+
+type timeval struct {
+ tv_sec int32
+ tv_usec int32
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = x
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ _sifields [4]uint8
+}
+
+type sigactiont struct {
+ sa_handler uintptr
+ sa_flags uint32
+ sa_restorer uintptr
+ sa_mask uint64
+}
+
+type epollevent struct {
+ events uint32
+ _pad uint32
+ data [8]byte // to match amd64
+}
diff --git a/src/runtime/defs_linux_arm.h b/src/runtime/defs_linux_arm.h
deleted file mode 100644
index 50b3c919e..000000000
--- a/src/runtime/defs_linux_arm.h
+++ /dev/null
@@ -1,168 +0,0 @@
-// TODO: Generate using cgo like defs_linux_{386,amd64}.h
-
-// Constants
-enum {
- EINTR = 0x4,
- ENOMEM = 0xc,
- EAGAIN = 0xb,
-
- PROT_NONE = 0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
- MAP_ANON = 0x20,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
- MADV_DONTNEED = 0x4,
- SA_RESTART = 0x10000000,
- SA_ONSTACK = 0x8000000,
- SA_RESTORER = 0, // unused on ARM
- SA_SIGINFO = 0x4,
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGBUS = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGUSR1 = 0xa,
- SIGSEGV = 0xb,
- SIGUSR2 = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGSTKFLT = 0x10,
- SIGCHLD = 0x11,
- SIGCONT = 0x12,
- SIGSTOP = 0x13,
- SIGTSTP = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGURG = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGIO = 0x1d,
- SIGPWR = 0x1e,
- SIGSYS = 0x1f,
- FPE_INTDIV = 0x1,
- FPE_INTOVF = 0x2,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
- ITIMER_REAL = 0,
- ITIMER_PROF = 0x2,
- ITIMER_VIRTUAL = 0x1,
- O_RDONLY = 0,
- O_CLOEXEC = 02000000,
-
- EPOLLIN = 0x1,
- EPOLLOUT = 0x4,
- EPOLLERR = 0x8,
- EPOLLHUP = 0x10,
- EPOLLRDHUP = 0x2000,
- EPOLLET = -0x80000000,
- EPOLL_CLOEXEC = 0x80000,
- EPOLL_CTL_ADD = 0x1,
- EPOLL_CTL_DEL = 0x2,
- EPOLL_CTL_MOD = 0x3,
-};
-
-// Types
-#pragma pack on
-
-typedef struct Timespec Timespec;
-struct Timespec {
- int32 tv_sec;
- int32 tv_nsec;
-};
-
-typedef struct SigaltstackT SigaltstackT;
-struct SigaltstackT {
- void *ss_sp;
- int32 ss_flags;
- uint32 ss_size;
-};
-
-typedef struct Sigcontext Sigcontext;
-struct Sigcontext {
- uint32 trap_no;
- uint32 error_code;
- uint32 oldmask;
- uint32 arm_r0;
- uint32 arm_r1;
- uint32 arm_r2;
- uint32 arm_r3;
- uint32 arm_r4;
- uint32 arm_r5;
- uint32 arm_r6;
- uint32 arm_r7;
- uint32 arm_r8;
- uint32 arm_r9;
- uint32 arm_r10;
- uint32 arm_fp;
- uint32 arm_ip;
- uint32 arm_sp;
- uint32 arm_lr;
- uint32 arm_pc;
- uint32 arm_cpsr;
- uint32 fault_address;
-};
-
-typedef struct Ucontext Ucontext;
-struct Ucontext {
- uint32 uc_flags;
- Ucontext *uc_link;
- SigaltstackT uc_stack;
- Sigcontext uc_mcontext;
- uint32 uc_sigmask;
- int32 __unused[31];
- uint32 uc_regspace[128];
-};
-
-typedef struct Timeval Timeval;
-struct Timeval {
- int32 tv_sec;
- int32 tv_usec;
-};
-
-typedef struct Itimerval Itimerval;
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-typedef struct Siginfo Siginfo;
-struct Siginfo {
- int32 si_signo;
- int32 si_errno;
- int32 si_code;
- uint8 _sifields[4];
-};
-
-typedef struct SigactionT SigactionT;
-struct SigactionT {
- void *sa_handler;
- uint32 sa_flags;
- void *sa_restorer;
- uint64 sa_mask;
-};
-
-typedef struct EpollEvent EpollEvent;
-struct EpollEvent {
- uint32 events;
- uint32 _pad;
- byte data[8]; // to match amd64
-};
-#pragma pack off
diff --git a/src/runtime/defs_nacl_386.go b/src/runtime/defs_nacl_386.go
new file mode 100644
index 000000000..498882904
--- /dev/null
+++ b/src/runtime/defs_nacl_386.go
@@ -0,0 +1,42 @@
+package runtime
+
+const (
+ // These values are referred to in the source code
+ // but really don't matter. Even so, use the standard numbers.
+ _SIGSEGV = 11
+ _SIGPROF = 27
+)
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int32
+}
+
+type excregs386 struct {
+ eax uint32
+ ecx uint32
+ edx uint32
+ ebx uint32
+ esp uint32
+ ebp uint32
+ esi uint32
+ edi uint32
+ eip uint32
+ eflags uint32
+}
+
+type exccontext struct {
+ size uint32
+ portable_context_offset uint32
+ portable_context_size uint32
+ arch uint32
+ regs_size uint32
+ reserved [11]uint32
+ regs excregs386
+}
+
+type excportablecontext struct {
+ pc uint32
+ sp uint32
+ fp uint32
+}
diff --git a/src/runtime/defs_nacl_386.h b/src/runtime/defs_nacl_386.h
deleted file mode 100644
index e8fbb38e1..000000000
--- a/src/runtime/defs_nacl_386.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Created by hand, not machine generated.
-
-enum
-{
- // These values are referred to in the source code
- // but really don't matter. Even so, use the standard numbers.
- SIGSEGV = 11,
- SIGPROF = 27,
-};
-
-typedef struct Siginfo Siginfo;
-
-// native_client/src/trusted/service_runtime/include/machine/_types.h
-typedef struct Timespec Timespec;
-
-struct Timespec
-{
- int64 tv_sec;
- int32 tv_nsec;
-};
-
-// native_client/src/trusted/service_runtime/nacl_exception.h
-// native_client/src/include/nacl/nacl_exception.h
-
-typedef struct ExcContext ExcContext;
-typedef struct ExcPortable ExcPortable;
-typedef struct ExcRegs386 ExcRegs386;
-
-struct ExcRegs386
-{
- uint32 eax;
- uint32 ecx;
- uint32 edx;
- uint32 ebx;
- uint32 esp;
- uint32 ebp;
- uint32 esi;
- uint32 edi;
- uint32 eip;
- uint32 eflags;
-};
-
-struct ExcContext
-{
- uint32 size;
- uint32 portable_context_offset;
- uint32 portable_context_size;
- uint32 arch;
- uint32 regs_size;
- uint32 reserved[11];
- ExcRegs386 regs;
-};
-
-struct ExcPortableContext
-{
- uint32 pc;
- uint32 sp;
- uint32 fp;
-};
diff --git a/src/runtime/defs_nacl_amd64p32.go b/src/runtime/defs_nacl_amd64p32.go
new file mode 100644
index 000000000..add11fe06
--- /dev/null
+++ b/src/runtime/defs_nacl_amd64p32.go
@@ -0,0 +1,63 @@
+package runtime
+
+const (
+ // These values are referred to in the source code
+ // but really don't matter. Even so, use the standard numbers.
+ _SIGSEGV = 11
+ _SIGPROF = 27
+)
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int32
+}
+
+type excregs386 struct {
+ eax uint32
+ ecx uint32
+ edx uint32
+ ebx uint32
+ esp uint32
+ ebp uint32
+ esi uint32
+ edi uint32
+ eip uint32
+ eflags uint32
+}
+
+type excregsamd64 struct {
+ rax uint64
+ rcx uint64
+ rdx uint64
+ rbx uint64
+ rsp uint64
+ rbp uint64
+ rsi uint64
+ rdi uint64
+ r8 uint64
+ r9 uint64
+ r10 uint64
+ r11 uint64
+ r12 uint64
+ r13 uint64
+ r14 uint64
+ r15 uint64
+ rip uint64
+ rflags uint32
+}
+
+type exccontext struct {
+ size uint32
+ portable_context_offset uint32
+ portable_context_size uint32
+ arch uint32
+ regs_size uint32
+ reserved [11]uint32
+ regs excregsamd64
+}
+
+type excportablecontext struct {
+ pc uint32
+ sp uint32
+ fp uint32
+}
diff --git a/src/runtime/defs_nacl_amd64p32.h b/src/runtime/defs_nacl_amd64p32.h
deleted file mode 100644
index 45663d40a..000000000
--- a/src/runtime/defs_nacl_amd64p32.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Created by hand, not machine generated.
-
-enum
-{
- // These values are referred to in the source code
- // but really don't matter. Even so, use the standard numbers.
- SIGSEGV = 11,
- SIGPROF = 27,
-};
-
-typedef struct Siginfo Siginfo;
-
-
-// native_client/src/trusted/service_runtime/include/machine/_types.h
-typedef struct Timespec Timespec;
-
-struct Timespec
-{
- int64 tv_sec;
- int32 tv_nsec;
-};
-
-// native_client/src/trusted/service_runtime/nacl_exception.h
-// native_client/src/include/nacl/nacl_exception.h
-
-typedef struct ExcContext ExcContext;
-typedef struct ExcPortable ExcPortable;
-typedef struct ExcRegs386 ExcRegs386;
-typedef struct ExcRegsAmd64 ExcRegsAmd64;
-
-struct ExcRegs386
-{
- uint32 eax;
- uint32 ecx;
- uint32 edx;
- uint32 ebx;
- uint32 esp;
- uint32 ebp;
- uint32 esi;
- uint32 edi;
- uint32 eip;
- uint32 eflags;
-};
-
-struct ExcRegsAmd64
-{
- uint64 rax;
- uint64 rcx;
- uint64 rdx;
- uint64 rbx;
- uint64 rsp;
- uint64 rbp;
- uint64 rsi;
- uint64 rdi;
- uint64 r8;
- uint64 r9;
- uint64 r10;
- uint64 r11;
- uint64 r12;
- uint64 r13;
- uint64 r14;
- uint64 r15;
- uint64 rip;
- uint32 rflags;
-};
-
-struct ExcContext
-{
- uint32 size;
- uint32 portable_context_offset;
- uint32 portable_context_size;
- uint32 arch;
- uint32 regs_size;
- uint32 reserved[11];
- union {
- ExcRegs386 regs;
- ExcRegsAmd64 regs64;
- } regs;
-};
-
-struct ExcPortableContext
-{
- uint32 pc;
- uint32 sp;
- uint32 fp;
-};
diff --git a/src/runtime/defs_nacl_arm.go b/src/runtime/defs_nacl_arm.go
new file mode 100644
index 000000000..c983cffb9
--- /dev/null
+++ b/src/runtime/defs_nacl_arm.go
@@ -0,0 +1,49 @@
+package runtime
+
+const (
+ // These values are referred to in the source code
+ // but really don't matter. Even so, use the standard numbers.
+ _SIGSEGV = 11
+ _SIGPROF = 27
+)
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int32
+}
+
+type excregsarm struct {
+ r0 uint32
+ r1 uint32
+ r2 uint32
+ r3 uint32
+ r4 uint32
+ r5 uint32
+ r6 uint32
+ r7 uint32
+ r8 uint32
+ r9 uint32 // the value reported here is undefined.
+ r10 uint32
+ r11 uint32
+ r12 uint32
+ sp uint32 /* r13 */
+ lr uint32 /* r14 */
+ pc uint32 /* r15 */
+ cpsr uint32
+}
+
+type exccontext struct {
+ size uint32
+ portable_context_offset uint32
+ portable_context_size uint32
+ arch uint32
+ regs_size uint32
+ reserved [11]uint32
+ regs excregsarm
+}
+
+type excportablecontext struct {
+ pc uint32
+ sp uint32
+ fp uint32
+}
diff --git a/src/runtime/defs_nacl_arm.h b/src/runtime/defs_nacl_arm.h
deleted file mode 100644
index 9ce07ccb2..000000000
--- a/src/runtime/defs_nacl_arm.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Created by hand, not machine generated.
-
-enum
-{
- // These values are referred to in the source code
- // but really don't matter. Even so, use the standard numbers.
- SIGSEGV = 11,
- SIGPROF = 27,
-};
-
-typedef struct Siginfo Siginfo;
-
-// native_client/src/trusted/service_runtime/include/machine/_types.h
-typedef struct Timespec Timespec;
-
-struct Timespec
-{
- int64 tv_sec;
- int32 tv_nsec;
-};
-
-// native_client/src/trusted/service_runtime/nacl_exception.h
-// native_client/src/include/nacl/nacl_exception.h
-
-typedef struct ExcContext ExcContext;
-typedef struct ExcPortable ExcPortable;
-typedef struct ExcRegsARM ExcRegsARM;
-
-struct ExcRegsARM
-{
- uint32 r0;
- uint32 r1;
- uint32 r2;
- uint32 r3;
- uint32 r4;
- uint32 r5;
- uint32 r6;
- uint32 r7;
- uint32 r8;
- uint32 r9; // the value reported here is undefined.
- uint32 r10;
- uint32 r11;
- uint32 r12;
- uint32 sp; /* r13 */
- uint32 lr; /* r14 */
- uint32 pc; /* r15 */
- uint32 cpsr;
-};
-
-struct ExcContext
-{
- uint32 size;
- uint32 portable_context_offset;
- uint32 portable_context_size;
- uint32 arch;
- uint32 regs_size;
- uint32 reserved[11];
- ExcRegsARM regs;
-};
-
-struct ExcPortableContext
-{
- uint32 pc;
- uint32 sp;
- uint32 fp;
-};
diff --git a/src/runtime/defs_netbsd_386.h b/src/runtime/defs_netbsd_386.h
deleted file mode 100644
index fd87804f9..000000000
--- a/src/runtime/defs_netbsd_386.h
+++ /dev/null
@@ -1,182 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_netbsd.go defs_netbsd_386.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x6,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x1,
- FPE_INTOVF = 0x2,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_RECEIPT = 0,
- EV_ERROR = 0x4000,
- EVFILT_READ = 0x0,
- EVFILT_WRITE = 0x1,
-};
-
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigset Sigset;
-typedef struct Siginfo Siginfo;
-typedef struct StackT StackT;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct McontextT McontextT;
-typedef struct UcontextT UcontextT;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct SigaltstackT {
- byte *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-struct Sigset {
- uint32 __bits[4];
-};
-struct Siginfo {
- int32 _signo;
- int32 _code;
- int32 _errno;
- byte _reason[20];
-};
-
-struct StackT {
- byte *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-
-struct Timespec {
- int64 tv_sec;
- int32 tv_nsec;
-};
-struct Timeval {
- int64 tv_sec;
- int32 tv_usec;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct McontextT {
- int32 __gregs[19];
- byte __fpregs[644];
- int32 _mc_tlsbase;
-};
-struct UcontextT {
- uint32 uc_flags;
- UcontextT *uc_link;
- Sigset uc_sigmask;
- StackT uc_stack;
- McontextT uc_mcontext;
- int32 __uc_pad[4];
-};
-
-struct KeventT {
- uint32 ident;
- uint32 filter;
- uint32 flags;
- uint32 fflags;
- int64 data;
- byte *udata;
-};
-
-
-#pragma pack off
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_netbsd.go defs_netbsd_386.go
-
-
-enum {
- REG_GS = 0x0,
- REG_FS = 0x1,
- REG_ES = 0x2,
- REG_DS = 0x3,
- REG_EDI = 0x4,
- REG_ESI = 0x5,
- REG_EBP = 0x6,
- REG_ESP = 0x7,
- REG_EBX = 0x8,
- REG_EDX = 0x9,
- REG_ECX = 0xa,
- REG_EAX = 0xb,
- REG_TRAPNO = 0xc,
- REG_ERR = 0xd,
- REG_EIP = 0xe,
- REG_CS = 0xf,
- REG_EFL = 0x10,
- REG_UESP = 0x11,
- REG_SS = 0x12,
-};
-
diff --git a/src/runtime/defs_netbsd_amd64.h b/src/runtime/defs_netbsd_amd64.h
deleted file mode 100644
index dac94b113..000000000
--- a/src/runtime/defs_netbsd_amd64.h
+++ /dev/null
@@ -1,194 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_netbsd.go defs_netbsd_amd64.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x6,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x1,
- FPE_INTOVF = 0x2,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_RECEIPT = 0,
- EV_ERROR = 0x4000,
- EVFILT_READ = 0x0,
- EVFILT_WRITE = 0x1,
-};
-
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigset Sigset;
-typedef struct Siginfo Siginfo;
-typedef struct StackT StackT;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct McontextT McontextT;
-typedef struct UcontextT UcontextT;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct SigaltstackT {
- byte *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-struct Sigset {
- uint32 __bits[4];
-};
-struct Siginfo {
- int32 _signo;
- int32 _code;
- int32 _errno;
- int32 _pad;
- byte _reason[24];
-};
-
-struct StackT {
- byte *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-
-struct Timespec {
- int64 tv_sec;
- int64 tv_nsec;
-};
-struct Timeval {
- int64 tv_sec;
- int32 tv_usec;
- byte Pad_cgo_0[4];
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct McontextT {
- uint64 __gregs[26];
- uint64 _mc_tlsbase;
- int8 __fpregs[512];
-};
-struct UcontextT {
- uint32 uc_flags;
- byte Pad_cgo_0[4];
- UcontextT *uc_link;
- Sigset uc_sigmask;
- StackT uc_stack;
- McontextT uc_mcontext;
-};
-
-struct KeventT {
- uint64 ident;
- uint32 filter;
- uint32 flags;
- uint32 fflags;
- byte Pad_cgo_0[4];
- int64 data;
- byte *udata;
-};
-
-
-#pragma pack off
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_netbsd.go defs_netbsd_amd64.go
-
-
-enum {
- REG_RDI = 0x0,
- REG_RSI = 0x1,
- REG_RDX = 0x2,
- REG_RCX = 0x3,
- REG_R8 = 0x4,
- REG_R9 = 0x5,
- REG_R10 = 0x6,
- REG_R11 = 0x7,
- REG_R12 = 0x8,
- REG_R13 = 0x9,
- REG_R14 = 0xa,
- REG_R15 = 0xb,
- REG_RBP = 0xc,
- REG_RBX = 0xd,
- REG_RAX = 0xe,
- REG_GS = 0xf,
- REG_FS = 0x10,
- REG_ES = 0x11,
- REG_DS = 0x12,
- REG_TRAPNO = 0x13,
- REG_ERR = 0x14,
- REG_RIP = 0x15,
- REG_CS = 0x16,
- REG_RFLAGS = 0x17,
- REG_RSP = 0x18,
- REG_SS = 0x19,
-};
-
diff --git a/src/runtime/defs_netbsd_arm.h b/src/runtime/defs_netbsd_arm.h
deleted file mode 100644
index 70f34af47..000000000
--- a/src/runtime/defs_netbsd_arm.h
+++ /dev/null
@@ -1,184 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_netbsd.go defs_netbsd_arm.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x6,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x1,
- FPE_INTOVF = 0x2,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_RECEIPT = 0,
- EV_ERROR = 0x4000,
- EVFILT_READ = 0x0,
- EVFILT_WRITE = 0x1,
-};
-
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigset Sigset;
-typedef struct Siginfo Siginfo;
-typedef struct StackT StackT;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct McontextT McontextT;
-typedef struct UcontextT UcontextT;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct SigaltstackT {
- byte *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-struct Sigset {
- uint32 __bits[4];
-};
-struct Siginfo {
- int32 _signo;
- int32 _code;
- int32 _errno;
- byte _reason[20];
-};
-
-struct StackT {
- byte *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-
-struct Timespec {
- int64 tv_sec;
- int32 tv_nsec;
-};
-struct Timeval {
- int64 tv_sec;
- int32 tv_usec;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct McontextT {
- uint32 __gregs[17];
-#ifdef __ARM_EABI__
- byte __fpu[4+8*32+4];
-#else
- byte __fpu[4+4*33+4];
-#endif
- uint32 _mc_tlsbase;
-};
-struct UcontextT {
- uint32 uc_flags;
- UcontextT *uc_link;
- Sigset uc_sigmask;
- StackT uc_stack;
- McontextT uc_mcontext;
- int32 __uc_pad[2];
-};
-
-struct KeventT {
- uint32 ident;
- uint32 filter;
- uint32 flags;
- uint32 fflags;
- int64 data;
- byte *udata;
-};
-
-
-#pragma pack off
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_netbsd.go defs_netbsd_arm.go
-
-
-enum {
- REG_R0 = 0x0,
- REG_R1 = 0x1,
- REG_R2 = 0x2,
- REG_R3 = 0x3,
- REG_R4 = 0x4,
- REG_R5 = 0x5,
- REG_R6 = 0x6,
- REG_R7 = 0x7,
- REG_R8 = 0x8,
- REG_R9 = 0x9,
- REG_R10 = 0xa,
- REG_R11 = 0xb,
- REG_R12 = 0xc,
- REG_R13 = 0xd,
- REG_R14 = 0xe,
- REG_R15 = 0xf,
- REG_CPSR = 0x10,
-};
-
diff --git a/src/runtime/defs_openbsd_386.go b/src/runtime/defs_openbsd_386.go
new file mode 100644
index 000000000..d7cdbd227
--- /dev/null
+++ b/src/runtime/defs_openbsd_386.go
@@ -0,0 +1,170 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_openbsd.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x6
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = -0x1
+ _EVFILT_WRITE = -0x2
+)
+
+type tforkt struct {
+ tf_tcb unsafe.Pointer
+ tf_tid *int32
+ tf_stack uintptr
+}
+
+type sigaltstackt struct {
+ ss_sp uintptr
+ ss_size uintptr
+ ss_flags int32
+}
+
+type sigcontext struct {
+ sc_gs uint32
+ sc_fs uint32
+ sc_es uint32
+ sc_ds uint32
+ sc_edi uint32
+ sc_esi uint32
+ sc_ebp uint32
+ sc_ebx uint32
+ sc_edx uint32
+ sc_ecx uint32
+ sc_eax uint32
+ sc_eip uint32
+ sc_cs uint32
+ sc_eflags uint32
+ sc_esp uint32
+ sc_ss uint32
+ __sc_unused uint32
+ sc_mask uint32
+ sc_trapno uint32
+ sc_err uint32
+ sc_fpstate unsafe.Pointer
+}
+
+type siginfo struct {
+ si_signo int32
+ si_code int32
+ si_errno int32
+ _data [116]byte
+}
+
+type stackt struct {
+ ss_sp uintptr
+ ss_size uintptr
+ ss_flags int32
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int32
+}
+
+func (ts *timespec) set_sec(x int32) {
+ ts.tv_sec = int64(x)
+}
+
+func (ts *timespec) set_nsec(x int32) {
+ ts.tv_nsec = x
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int32
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = x
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type keventt struct {
+ ident uint32
+ filter int16
+ flags uint16
+ fflags uint32
+ data int64
+ udata *byte
+}
diff --git a/src/runtime/defs_openbsd_386.h b/src/runtime/defs_openbsd_386.h
deleted file mode 100644
index 6b77e0084..000000000
--- a/src/runtime/defs_openbsd_386.h
+++ /dev/null
@@ -1,168 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_openbsd.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x6,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x1,
- FPE_INTOVF = 0x2,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_ERROR = 0x4000,
- EVFILT_READ = -0x1,
- EVFILT_WRITE = -0x2,
-};
-
-typedef struct TforkT TforkT;
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigcontext Sigcontext;
-typedef struct Siginfo Siginfo;
-typedef struct StackT StackT;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct TforkT {
- byte *tf_tcb;
- int32 *tf_tid;
- byte *tf_stack;
-};
-
-struct SigaltstackT {
- byte *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-struct Sigcontext {
- int32 sc_gs;
- int32 sc_fs;
- int32 sc_es;
- int32 sc_ds;
- int32 sc_edi;
- int32 sc_esi;
- int32 sc_ebp;
- int32 sc_ebx;
- int32 sc_edx;
- int32 sc_ecx;
- int32 sc_eax;
- int32 sc_eip;
- int32 sc_cs;
- int32 sc_eflags;
- int32 sc_esp;
- int32 sc_ss;
- int32 __sc_unused;
- int32 sc_mask;
- int32 sc_trapno;
- int32 sc_err;
- void *sc_fpstate;
-};
-struct Siginfo {
- int32 si_signo;
- int32 si_code;
- int32 si_errno;
- byte _data[116];
-};
-typedef uint32 Sigset;
-typedef byte Sigval[4];
-
-struct StackT {
- byte *ss_sp;
- uint32 ss_size;
- int32 ss_flags;
-};
-
-struct Timespec {
- int64 tv_sec;
- int32 tv_nsec;
-};
-struct Timeval {
- int64 tv_sec;
- int32 tv_usec;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct KeventT {
- uint32 ident;
- int16 filter;
- uint16 flags;
- uint32 fflags;
- int64 data;
- byte *udata;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_openbsd_amd64.go b/src/runtime/defs_openbsd_amd64.go
new file mode 100644
index 000000000..122f46cf3
--- /dev/null
+++ b/src/runtime/defs_openbsd_amd64.go
@@ -0,0 +1,181 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_openbsd.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x6
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_ERROR = 0x4000
+ _EVFILT_READ = -0x1
+ _EVFILT_WRITE = -0x2
+)
+
+type tforkt struct {
+ tf_tcb unsafe.Pointer
+ tf_tid *int32
+ tf_stack uintptr
+}
+
+type sigaltstackt struct {
+ ss_sp uintptr
+ ss_size uintptr
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type sigcontext struct {
+ sc_rdi uint64
+ sc_rsi uint64
+ sc_rdx uint64
+ sc_rcx uint64
+ sc_r8 uint64
+ sc_r9 uint64
+ sc_r10 uint64
+ sc_r11 uint64
+ sc_r12 uint64
+ sc_r13 uint64
+ sc_r14 uint64
+ sc_r15 uint64
+ sc_rbp uint64
+ sc_rbx uint64
+ sc_rax uint64
+ sc_gs uint64
+ sc_fs uint64
+ sc_es uint64
+ sc_ds uint64
+ sc_trapno uint64
+ sc_err uint64
+ sc_rip uint64
+ sc_cs uint64
+ sc_rflags uint64
+ sc_rsp uint64
+ sc_ss uint64
+ sc_fpstate unsafe.Pointer
+ __sc_unused int32
+ sc_mask int32
+}
+
+type siginfo struct {
+ si_signo int32
+ si_code int32
+ si_errno int32
+ pad_cgo_0 [4]byte
+ _data [120]byte
+}
+
+type stackt struct {
+ ss_sp uintptr
+ ss_size uintptr
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+func (ts *timespec) set_sec(x int32) {
+ ts.tv_sec = int64(x)
+}
+
+func (ts *timespec) set_nsec(x int32) {
+ ts.tv_nsec = int64(x)
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type keventt struct {
+ ident uint64
+ filter int16
+ flags uint16
+ fflags uint32
+ data int64
+ udata *byte
+}
diff --git a/src/runtime/defs_openbsd_amd64.h b/src/runtime/defs_openbsd_amd64.h
deleted file mode 100644
index 761e8e47d..000000000
--- a/src/runtime/defs_openbsd_amd64.h
+++ /dev/null
@@ -1,179 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_openbsd.go
-
-
-enum {
- EINTR = 0x4,
- EFAULT = 0xe,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x1000,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x6,
-
- SA_SIGINFO = 0x40,
- SA_RESTART = 0x2,
- SA_ONSTACK = 0x1,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x10,
- SIGSTOP = 0x11,
- SIGTSTP = 0x12,
- SIGCONT = 0x13,
- SIGCHLD = 0x14,
- SIGTTIN = 0x15,
- SIGTTOU = 0x16,
- SIGIO = 0x17,
- SIGXCPU = 0x18,
- SIGXFSZ = 0x19,
- SIGVTALRM = 0x1a,
- SIGPROF = 0x1b,
- SIGWINCH = 0x1c,
- SIGINFO = 0x1d,
- SIGUSR1 = 0x1e,
- SIGUSR2 = 0x1f,
-
- FPE_INTDIV = 0x1,
- FPE_INTOVF = 0x2,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- EV_ADD = 0x1,
- EV_DELETE = 0x2,
- EV_CLEAR = 0x20,
- EV_ERROR = 0x4000,
- EVFILT_READ = -0x1,
- EVFILT_WRITE = -0x2,
-};
-
-typedef struct TforkT TforkT;
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigcontext Sigcontext;
-typedef struct Siginfo Siginfo;
-typedef struct StackT StackT;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct KeventT KeventT;
-
-#pragma pack on
-
-struct TforkT {
- byte *tf_tcb;
- int32 *tf_tid;
- byte *tf_stack;
-};
-
-struct SigaltstackT {
- byte *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-struct Sigcontext {
- int64 sc_rdi;
- int64 sc_rsi;
- int64 sc_rdx;
- int64 sc_rcx;
- int64 sc_r8;
- int64 sc_r9;
- int64 sc_r10;
- int64 sc_r11;
- int64 sc_r12;
- int64 sc_r13;
- int64 sc_r14;
- int64 sc_r15;
- int64 sc_rbp;
- int64 sc_rbx;
- int64 sc_rax;
- int64 sc_gs;
- int64 sc_fs;
- int64 sc_es;
- int64 sc_ds;
- int64 sc_trapno;
- int64 sc_err;
- int64 sc_rip;
- int64 sc_cs;
- int64 sc_rflags;
- int64 sc_rsp;
- int64 sc_ss;
- void *sc_fpstate;
- int32 __sc_unused;
- int32 sc_mask;
-};
-struct Siginfo {
- int32 si_signo;
- int32 si_code;
- int32 si_errno;
- byte Pad_cgo_0[4];
- byte _data[120];
-};
-typedef uint32 Sigset;
-typedef byte Sigval[8];
-
-struct StackT {
- byte *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-
-struct Timespec {
- int64 tv_sec;
- int64 tv_nsec;
-};
-struct Timeval {
- int64 tv_sec;
- int64 tv_usec;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct KeventT {
- uint64 ident;
- int16 filter;
- uint16 flags;
- uint32 fflags;
- int64 data;
- byte *udata;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_plan9_386.go b/src/runtime/defs_plan9_386.go
new file mode 100644
index 000000000..170506b23
--- /dev/null
+++ b/src/runtime/defs_plan9_386.go
@@ -0,0 +1,23 @@
+package runtime
+
+type ureg struct {
+ di uint32 /* general registers */
+ si uint32 /* ... */
+ bp uint32 /* ... */
+ nsp uint32
+ bx uint32 /* ... */
+ dx uint32 /* ... */
+ cx uint32 /* ... */
+ ax uint32 /* ... */
+ gs uint32 /* data segments */
+ fs uint32 /* ... */
+ es uint32 /* ... */
+ ds uint32 /* ... */
+ trap uint32 /* trap _type */
+ ecode uint32 /* error code (or zero) */
+ pc uint32 /* pc */
+ cs uint32 /* old context */
+ flags uint32 /* old flags */
+ sp uint32
+ ss uint32 /* old stack segment */
+}
diff --git a/src/runtime/defs_plan9_386.h b/src/runtime/defs_plan9_386.h
deleted file mode 100644
index a762b8589..000000000
--- a/src/runtime/defs_plan9_386.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#define PAGESIZE 0x1000
-
-typedef struct Ureg Ureg;
-
-struct Ureg
-{
- uint32 di; /* general registers */
- uint32 si; /* ... */
- uint32 bp; /* ... */
- uint32 nsp;
- uint32 bx; /* ... */
- uint32 dx; /* ... */
- uint32 cx; /* ... */
- uint32 ax; /* ... */
- uint32 gs; /* data segments */
- uint32 fs; /* ... */
- uint32 es; /* ... */
- uint32 ds; /* ... */
- uint32 trap; /* trap type */
- uint32 ecode; /* error code (or zero) */
- uint32 pc; /* pc */
- uint32 cs; /* old context */
- uint32 flags; /* old flags */
- uint32 sp;
- uint32 ss; /* old stack segment */
-};
diff --git a/src/runtime/defs_plan9_amd64.go b/src/runtime/defs_plan9_amd64.go
new file mode 100644
index 000000000..17becfb66
--- /dev/null
+++ b/src/runtime/defs_plan9_amd64.go
@@ -0,0 +1,32 @@
+package runtime
+
+type ureg struct {
+ ax uint64
+ bx uint64
+ cx uint64
+ dx uint64
+ si uint64
+ di uint64
+ bp uint64
+ r8 uint64
+ r9 uint64
+ r10 uint64
+ r11 uint64
+ r12 uint64
+ r13 uint64
+ r14 uint64
+ r15 uint64
+
+ ds uint16
+ es uint16
+ fs uint16
+ gs uint16
+
+ _type uint64
+ error uint64 /* error code (or zero) */
+ ip uint64 /* pc */
+ cs uint64 /* old context */
+ flags uint64 /* old flags */
+ sp uint64 /* sp */
+ ss uint64 /* old stack segment */
+}
diff --git a/src/runtime/defs_plan9_amd64.h b/src/runtime/defs_plan9_amd64.h
deleted file mode 100644
index 20bca479c..000000000
--- a/src/runtime/defs_plan9_amd64.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#define PAGESIZE 0x1000
-
-typedef struct Ureg Ureg;
-
-struct Ureg {
- uint64 ax;
- uint64 bx;
- uint64 cx;
- uint64 dx;
- uint64 si;
- uint64 di;
- uint64 bp;
- uint64 r8;
- uint64 r9;
- uint64 r10;
- uint64 r11;
- uint64 r12;
- uint64 r13;
- uint64 r14;
- uint64 r15;
-
- uint16 ds;
- uint16 es;
- uint16 fs;
- uint16 gs;
-
- uint64 type;
- uint64 error; /* error code (or zero) */
- uint64 ip; /* pc */
- uint64 cs; /* old context */
- uint64 flags; /* old flags */
- uint64 sp; /* sp */
- uint64 ss; /* old stack segment */
-};
diff --git a/src/runtime/defs_solaris_amd64.h b/src/runtime/defs_solaris_amd64.h
deleted file mode 100644
index cb1cfeadc..000000000
--- a/src/runtime/defs_solaris_amd64.h
+++ /dev/null
@@ -1,254 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_solaris.go defs_solaris_amd64.go
-
-
-enum {
- EINTR = 0x4,
- EBADF = 0x9,
- EFAULT = 0xe,
- EAGAIN = 0xb,
- ETIMEDOUT = 0x91,
- EWOULDBLOCK = 0xb,
- EINPROGRESS = 0x96,
-
- PROT_NONE = 0x0,
- PROT_READ = 0x1,
- PROT_WRITE = 0x2,
- PROT_EXEC = 0x4,
-
- MAP_ANON = 0x100,
- MAP_PRIVATE = 0x2,
- MAP_FIXED = 0x10,
-
- MADV_FREE = 0x5,
-
- SA_SIGINFO = 0x8,
- SA_RESTART = 0x4,
- SA_ONSTACK = 0x1,
-
- SIGHUP = 0x1,
- SIGINT = 0x2,
- SIGQUIT = 0x3,
- SIGILL = 0x4,
- SIGTRAP = 0x5,
- SIGABRT = 0x6,
- SIGEMT = 0x7,
- SIGFPE = 0x8,
- SIGKILL = 0x9,
- SIGBUS = 0xa,
- SIGSEGV = 0xb,
- SIGSYS = 0xc,
- SIGPIPE = 0xd,
- SIGALRM = 0xe,
- SIGTERM = 0xf,
- SIGURG = 0x15,
- SIGSTOP = 0x17,
- SIGTSTP = 0x18,
- SIGCONT = 0x19,
- SIGCHLD = 0x12,
- SIGTTIN = 0x1a,
- SIGTTOU = 0x1b,
- SIGIO = 0x16,
- SIGXCPU = 0x1e,
- SIGXFSZ = 0x1f,
- SIGVTALRM = 0x1c,
- SIGPROF = 0x1d,
- SIGWINCH = 0x14,
- SIGUSR1 = 0x10,
- SIGUSR2 = 0x11,
-
- FPE_INTDIV = 0x1,
- FPE_INTOVF = 0x2,
- FPE_FLTDIV = 0x3,
- FPE_FLTOVF = 0x4,
- FPE_FLTUND = 0x5,
- FPE_FLTRES = 0x6,
- FPE_FLTINV = 0x7,
- FPE_FLTSUB = 0x8,
-
- BUS_ADRALN = 0x1,
- BUS_ADRERR = 0x2,
- BUS_OBJERR = 0x3,
-
- SEGV_MAPERR = 0x1,
- SEGV_ACCERR = 0x2,
-
- ITIMER_REAL = 0x0,
- ITIMER_VIRTUAL = 0x1,
- ITIMER_PROF = 0x2,
-
- _SC_NPROCESSORS_ONLN = 0xf,
-
- PTHREAD_CREATE_DETACHED = 0x40,
-
- FORK_NOSIGCHLD = 0x1,
- FORK_WAITPID = 0x2,
-
- MAXHOSTNAMELEN = 0x100,
-
- O_NONBLOCK = 0x80,
- FD_CLOEXEC = 0x1,
- F_GETFL = 0x3,
- F_SETFL = 0x4,
- F_SETFD = 0x2,
-
- POLLIN = 0x1,
- POLLOUT = 0x4,
- POLLHUP = 0x10,
- POLLERR = 0x8,
-
- PORT_SOURCE_FD = 0x4,
-};
-
-typedef struct SemT SemT;
-typedef struct SigaltstackT SigaltstackT;
-typedef struct Sigset Sigset;
-typedef struct StackT StackT;
-typedef struct Siginfo Siginfo;
-typedef struct SigactionT SigactionT;
-typedef struct Fpregset Fpregset;
-typedef struct Mcontext Mcontext;
-typedef struct Ucontext Ucontext;
-typedef struct Timespec Timespec;
-typedef struct Timeval Timeval;
-typedef struct Itimerval Itimerval;
-typedef struct PortEvent PortEvent;
-typedef struct PthreadAttr PthreadAttr;
-typedef struct Stat Stat;
-
-#pragma pack on
-
-struct SemT {
- uint32 sem_count;
- uint16 sem_type;
- uint16 sem_magic;
- uint64 sem_pad1[3];
- uint64 sem_pad2[2];
-};
-
-struct SigaltstackT {
- byte *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-struct Sigset {
- uint32 __sigbits[4];
-};
-struct StackT {
- byte *ss_sp;
- uint64 ss_size;
- int32 ss_flags;
- byte Pad_cgo_0[4];
-};
-
-struct Siginfo {
- int32 si_signo;
- int32 si_code;
- int32 si_errno;
- int32 si_pad;
- byte __data[240];
-};
-struct SigactionT {
- int32 sa_flags;
- byte Pad_cgo_0[4];
- byte _funcptr[8];
- Sigset sa_mask;
-};
-
-struct Fpregset {
- byte fp_reg_set[528];
-};
-struct Mcontext {
- int64 gregs[28];
- Fpregset fpregs;
-};
-struct Ucontext {
- uint64 uc_flags;
- Ucontext *uc_link;
- Sigset uc_sigmask;
- StackT uc_stack;
- byte Pad_cgo_0[8];
- Mcontext uc_mcontext;
- int64 uc_filler[5];
- byte Pad_cgo_1[8];
-};
-
-struct Timespec {
- int64 tv_sec;
- int64 tv_nsec;
-};
-struct Timeval {
- int64 tv_sec;
- int64 tv_usec;
-};
-struct Itimerval {
- Timeval it_interval;
- Timeval it_value;
-};
-
-struct PortEvent {
- int32 portev_events;
- uint16 portev_source;
- uint16 portev_pad;
- uint64 portev_object;
- byte *portev_user;
-};
-typedef uint32 Pthread;
-struct PthreadAttr {
- byte *__pthread_attrp;
-};
-
-struct Stat {
- uint64 st_dev;
- uint64 st_ino;
- uint32 st_mode;
- uint32 st_nlink;
- uint32 st_uid;
- uint32 st_gid;
- uint64 st_rdev;
- int64 st_size;
- Timespec st_atim;
- Timespec st_mtim;
- Timespec st_ctim;
- int32 st_blksize;
- byte Pad_cgo_0[4];
- int64 st_blocks;
- int8 st_fstype[16];
-};
-
-
-#pragma pack off
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_solaris.go defs_solaris_amd64.go
-
-
-enum {
- REG_RDI = 0x8,
- REG_RSI = 0x9,
- REG_RDX = 0xc,
- REG_RCX = 0xd,
- REG_R8 = 0x7,
- REG_R9 = 0x6,
- REG_R10 = 0x5,
- REG_R11 = 0x4,
- REG_R12 = 0x3,
- REG_R13 = 0x2,
- REG_R14 = 0x1,
- REG_R15 = 0x0,
- REG_RBP = 0xa,
- REG_RBX = 0xb,
- REG_RAX = 0xe,
- REG_GS = 0x17,
- REG_FS = 0x16,
- REG_ES = 0x18,
- REG_DS = 0x19,
- REG_TRAPNO = 0xf,
- REG_ERR = 0x10,
- REG_RIP = 0x11,
- REG_CS = 0x12,
- REG_RFLAGS = 0x13,
- REG_RSP = 0x14,
- REG_SS = 0x15,
-};
-
diff --git a/src/runtime/defs_windows_386.go b/src/runtime/defs_windows_386.go
new file mode 100644
index 000000000..abec2d839
--- /dev/null
+++ b/src/runtime/defs_windows_386.go
@@ -0,0 +1,109 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_windows.go
+
+package runtime
+
+const (
+ _PROT_NONE = 0
+ _PROT_READ = 1
+ _PROT_WRITE = 2
+ _PROT_EXEC = 4
+
+ _MAP_ANON = 1
+ _MAP_PRIVATE = 2
+
+ _DUPLICATE_SAME_ACCESS = 0x2
+ _THREAD_PRIORITY_HIGHEST = 0x2
+
+ _SIGINT = 0x2
+ _CTRL_C_EVENT = 0x0
+ _CTRL_BREAK_EVENT = 0x1
+
+ _CONTEXT_CONTROL = 0x10001
+ _CONTEXT_FULL = 0x10007
+
+ _EXCEPTION_ACCESS_VIOLATION = 0xc0000005
+ _EXCEPTION_BREAKPOINT = 0x80000003
+ _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d
+ _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e
+ _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f
+ _EXCEPTION_FLT_OVERFLOW = 0xc0000091
+ _EXCEPTION_FLT_UNDERFLOW = 0xc0000093
+ _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094
+ _EXCEPTION_INT_OVERFLOW = 0xc0000095
+
+ _INFINITE = 0xffffffff
+ _WAIT_TIMEOUT = 0x102
+
+ _EXCEPTION_CONTINUE_EXECUTION = -0x1
+ _EXCEPTION_CONTINUE_SEARCH = 0x0
+)
+
+type systeminfo struct {
+ anon0 [4]byte
+ dwpagesize uint32
+ lpminimumapplicationaddress *byte
+ lpmaximumapplicationaddress *byte
+ dwactiveprocessormask uint32
+ dwnumberofprocessors uint32
+ dwprocessortype uint32
+ dwallocationgranularity uint32
+ wprocessorlevel uint16
+ wprocessorrevision uint16
+}
+
+type exceptionrecord struct {
+ exceptioncode uint32
+ exceptionflags uint32
+ exceptionrecord *exceptionrecord
+ exceptionaddress *byte
+ numberparameters uint32
+ exceptioninformation [15]uint32
+}
+
+type floatingsavearea struct {
+ controlword uint32
+ statusword uint32
+ tagword uint32
+ erroroffset uint32
+ errorselector uint32
+ dataoffset uint32
+ dataselector uint32
+ registerarea [80]uint8
+ cr0npxstate uint32
+}
+
+type context struct {
+ contextflags uint32
+ dr0 uint32
+ dr1 uint32
+ dr2 uint32
+ dr3 uint32
+ dr6 uint32
+ dr7 uint32
+ floatsave floatingsavearea
+ seggs uint32
+ segfs uint32
+ seges uint32
+ segds uint32
+ edi uint32
+ esi uint32
+ ebx uint32
+ edx uint32
+ ecx uint32
+ eax uint32
+ ebp uint32
+ eip uint32
+ segcs uint32
+ eflags uint32
+ esp uint32
+ segss uint32
+ extendedregisters [512]uint8
+}
+
+type overlapped struct {
+ internal uint32
+ internalhigh uint32
+ anon0 [8]byte
+ hevent *byte
+}
diff --git a/src/runtime/defs_windows_386.h b/src/runtime/defs_windows_386.h
deleted file mode 100644
index 2317c04f6..000000000
--- a/src/runtime/defs_windows_386.h
+++ /dev/null
@@ -1,116 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_windows.go
-
-
-enum {
- PROT_NONE = 0,
- PROT_READ = 1,
- PROT_WRITE = 2,
- PROT_EXEC = 4,
-
- MAP_ANON = 1,
- MAP_PRIVATE = 2,
-
- DUPLICATE_SAME_ACCESS = 0x2,
- THREAD_PRIORITY_HIGHEST = 0x2,
-
- SIGINT = 0x2,
- CTRL_C_EVENT = 0x0,
- CTRL_BREAK_EVENT = 0x1,
-
- CONTEXT_CONTROL = 0x10001,
- CONTEXT_FULL = 0x10007,
-
- EXCEPTION_ACCESS_VIOLATION = 0xc0000005,
- EXCEPTION_BREAKPOINT = 0x80000003,
- EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d,
- EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e,
- EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f,
- EXCEPTION_FLT_OVERFLOW = 0xc0000091,
- EXCEPTION_FLT_UNDERFLOW = 0xc0000093,
- EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094,
- EXCEPTION_INT_OVERFLOW = 0xc0000095,
-
- INFINITE = 0xffffffff,
- WAIT_TIMEOUT = 0x102,
-
- EXCEPTION_CONTINUE_EXECUTION = -0x1,
- EXCEPTION_CONTINUE_SEARCH = 0x0,
-};
-
-typedef struct SystemInfo SystemInfo;
-typedef struct ExceptionRecord ExceptionRecord;
-typedef struct FloatingSaveArea FloatingSaveArea;
-typedef struct M128a M128a;
-typedef struct Context Context;
-typedef struct Overlapped Overlapped;
-
-#pragma pack on
-
-struct SystemInfo {
- byte anon0[4];
- uint32 dwPageSize;
- byte *lpMinimumApplicationAddress;
- byte *lpMaximumApplicationAddress;
- uint32 dwActiveProcessorMask;
- uint32 dwNumberOfProcessors;
- uint32 dwProcessorType;
- uint32 dwAllocationGranularity;
- uint16 wProcessorLevel;
- uint16 wProcessorRevision;
-};
-struct ExceptionRecord {
- uint32 ExceptionCode;
- uint32 ExceptionFlags;
- ExceptionRecord *ExceptionRecord;
- byte *ExceptionAddress;
- uint32 NumberParameters;
- uint32 ExceptionInformation[15];
-};
-struct FloatingSaveArea {
- uint32 ControlWord;
- uint32 StatusWord;
- uint32 TagWord;
- uint32 ErrorOffset;
- uint32 ErrorSelector;
- uint32 DataOffset;
- uint32 DataSelector;
- uint8 RegisterArea[80];
- uint32 Cr0NpxState;
-};
-struct Context {
- uint32 ContextFlags;
- uint32 Dr0;
- uint32 Dr1;
- uint32 Dr2;
- uint32 Dr3;
- uint32 Dr6;
- uint32 Dr7;
- FloatingSaveArea FloatSave;
- uint32 SegGs;
- uint32 SegFs;
- uint32 SegEs;
- uint32 SegDs;
- uint32 Edi;
- uint32 Esi;
- uint32 Ebx;
- uint32 Edx;
- uint32 Ecx;
- uint32 Eax;
- uint32 Ebp;
- uint32 Eip;
- uint32 SegCs;
- uint32 EFlags;
- uint32 Esp;
- uint32 SegSs;
- uint8 ExtendedRegisters[512];
-};
-struct Overlapped {
- uint32 Internal;
- uint32 InternalHigh;
- byte anon0[8];
- byte *hEvent;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/defs_windows_amd64.go b/src/runtime/defs_windows_amd64.go
new file mode 100644
index 000000000..81b13597b
--- /dev/null
+++ b/src/runtime/defs_windows_amd64.go
@@ -0,0 +1,124 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_windows.go
+
+package runtime
+
+const (
+ _PROT_NONE = 0
+ _PROT_READ = 1
+ _PROT_WRITE = 2
+ _PROT_EXEC = 4
+
+ _MAP_ANON = 1
+ _MAP_PRIVATE = 2
+
+ _DUPLICATE_SAME_ACCESS = 0x2
+ _THREAD_PRIORITY_HIGHEST = 0x2
+
+ _SIGINT = 0x2
+ _CTRL_C_EVENT = 0x0
+ _CTRL_BREAK_EVENT = 0x1
+
+ _CONTEXT_CONTROL = 0x100001
+ _CONTEXT_FULL = 0x10000b
+
+ _EXCEPTION_ACCESS_VIOLATION = 0xc0000005
+ _EXCEPTION_BREAKPOINT = 0x80000003
+ _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d
+ _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e
+ _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f
+ _EXCEPTION_FLT_OVERFLOW = 0xc0000091
+ _EXCEPTION_FLT_UNDERFLOW = 0xc0000093
+ _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094
+ _EXCEPTION_INT_OVERFLOW = 0xc0000095
+
+ _INFINITE = 0xffffffff
+ _WAIT_TIMEOUT = 0x102
+
+ _EXCEPTION_CONTINUE_EXECUTION = -0x1
+ _EXCEPTION_CONTINUE_SEARCH = 0x0
+)
+
+type systeminfo struct {
+ anon0 [4]byte
+ dwpagesize uint32
+ lpminimumapplicationaddress *byte
+ lpmaximumapplicationaddress *byte
+ dwactiveprocessormask uint64
+ dwnumberofprocessors uint32
+ dwprocessortype uint32
+ dwallocationgranularity uint32
+ wprocessorlevel uint16
+ wprocessorrevision uint16
+}
+
+type exceptionrecord struct {
+ exceptioncode uint32
+ exceptionflags uint32
+ exceptionrecord *exceptionrecord
+ exceptionaddress *byte
+ numberparameters uint32
+ pad_cgo_0 [4]byte
+ exceptioninformation [15]uint64
+}
+
+type m128a struct {
+ low uint64
+ high int64
+}
+
+type context struct {
+ p1home uint64
+ p2home uint64
+ p3home uint64
+ p4home uint64
+ p5home uint64
+ p6home uint64
+ contextflags uint32
+ mxcsr uint32
+ segcs uint16
+ segds uint16
+ seges uint16
+ segfs uint16
+ seggs uint16
+ segss uint16
+ eflags uint32
+ dr0 uint64
+ dr1 uint64
+ dr2 uint64
+ dr3 uint64
+ dr6 uint64
+ dr7 uint64
+ rax uint64
+ rcx uint64
+ rdx uint64
+ rbx uint64
+ rsp uint64
+ rbp uint64
+ rsi uint64
+ rdi uint64
+ r8 uint64
+ r9 uint64
+ r10 uint64
+ r11 uint64
+ r12 uint64
+ r13 uint64
+ r14 uint64
+ r15 uint64
+ rip uint64
+ anon0 [512]byte
+ vectorregister [26]m128a
+ vectorcontrol uint64
+ debugcontrol uint64
+ lastbranchtorip uint64
+ lastbranchfromrip uint64
+ lastexceptiontorip uint64
+ lastexceptionfromrip uint64
+}
+
+type overlapped struct {
+ internal uint64
+ internalhigh uint64
+ anon0 [8]byte
+ hevent *byte
+}
diff --git a/src/runtime/defs_windows_amd64.h b/src/runtime/defs_windows_amd64.h
deleted file mode 100644
index 7f37a7a8c..000000000
--- a/src/runtime/defs_windows_amd64.h
+++ /dev/null
@@ -1,131 +0,0 @@
-// Created by cgo -cdefs - DO NOT EDIT
-// cgo -cdefs defs_windows.go
-
-
-enum {
- PROT_NONE = 0,
- PROT_READ = 1,
- PROT_WRITE = 2,
- PROT_EXEC = 4,
-
- MAP_ANON = 1,
- MAP_PRIVATE = 2,
-
- DUPLICATE_SAME_ACCESS = 0x2,
- THREAD_PRIORITY_HIGHEST = 0x2,
-
- SIGINT = 0x2,
- CTRL_C_EVENT = 0x0,
- CTRL_BREAK_EVENT = 0x1,
-
- CONTEXT_CONTROL = 0x100001,
- CONTEXT_FULL = 0x10000b,
-
- EXCEPTION_ACCESS_VIOLATION = 0xc0000005,
- EXCEPTION_BREAKPOINT = 0x80000003,
- EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d,
- EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e,
- EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f,
- EXCEPTION_FLT_OVERFLOW = 0xc0000091,
- EXCEPTION_FLT_UNDERFLOW = 0xc0000093,
- EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094,
- EXCEPTION_INT_OVERFLOW = 0xc0000095,
-
- INFINITE = 0xffffffff,
- WAIT_TIMEOUT = 0x102,
-
- EXCEPTION_CONTINUE_EXECUTION = -0x1,
- EXCEPTION_CONTINUE_SEARCH = 0x0,
-};
-
-typedef struct SystemInfo SystemInfo;
-typedef struct ExceptionRecord ExceptionRecord;
-typedef struct FloatingSaveArea FloatingSaveArea;
-typedef struct M128a M128a;
-typedef struct Context Context;
-typedef struct Overlapped Overlapped;
-
-#pragma pack on
-
-struct SystemInfo {
- byte anon0[4];
- uint32 dwPageSize;
- byte *lpMinimumApplicationAddress;
- byte *lpMaximumApplicationAddress;
- uint64 dwActiveProcessorMask;
- uint32 dwNumberOfProcessors;
- uint32 dwProcessorType;
- uint32 dwAllocationGranularity;
- uint16 wProcessorLevel;
- uint16 wProcessorRevision;
-};
-struct ExceptionRecord {
- uint32 ExceptionCode;
- uint32 ExceptionFlags;
- ExceptionRecord *ExceptionRecord;
- byte *ExceptionAddress;
- uint32 NumberParameters;
- byte Pad_cgo_0[4];
- uint64 ExceptionInformation[15];
-};
-struct M128a {
- uint64 Low;
- int64 High;
-};
-struct Context {
- uint64 P1Home;
- uint64 P2Home;
- uint64 P3Home;
- uint64 P4Home;
- uint64 P5Home;
- uint64 P6Home;
- uint32 ContextFlags;
- uint32 MxCsr;
- uint16 SegCs;
- uint16 SegDs;
- uint16 SegEs;
- uint16 SegFs;
- uint16 SegGs;
- uint16 SegSs;
- uint32 EFlags;
- uint64 Dr0;
- uint64 Dr1;
- uint64 Dr2;
- uint64 Dr3;
- uint64 Dr6;
- uint64 Dr7;
- uint64 Rax;
- uint64 Rcx;
- uint64 Rdx;
- uint64 Rbx;
- uint64 Rsp;
- uint64 Rbp;
- uint64 Rsi;
- uint64 Rdi;
- uint64 R8;
- uint64 R9;
- uint64 R10;
- uint64 R11;
- uint64 R12;
- uint64 R13;
- uint64 R14;
- uint64 R15;
- uint64 Rip;
- byte anon0[512];
- M128a VectorRegister[26];
- uint64 VectorControl;
- uint64 DebugControl;
- uint64 LastBranchToRip;
- uint64 LastBranchFromRip;
- uint64 LastExceptionToRip;
- uint64 LastExceptionFromRip;
-};
-struct Overlapped {
- uint64 Internal;
- uint64 InternalHigh;
- byte anon0[8];
- byte *hEvent;
-};
-
-
-#pragma pack off
diff --git a/src/runtime/env_posix.go b/src/runtime/env_posix.go
index dd57872d7..03c7a5a4a 100644
--- a/src/runtime/env_posix.go
+++ b/src/runtime/env_posix.go
@@ -8,8 +8,6 @@ package runtime
import "unsafe"
-func environ() []string
-
func getenv(s *byte) *byte {
val := gogetenv(gostringnocopy(s))
if val == "" {
@@ -32,13 +30,13 @@ func gogetenv(key string) string {
return ""
}
-var _cgo_setenv uintptr // pointer to C function
-var _cgo_unsetenv uintptr // pointer to C function
+var _cgo_setenv unsafe.Pointer // pointer to C function
+var _cgo_unsetenv unsafe.Pointer // pointer to C function
// Update the C environment if cgo is loaded.
// Called from syscall.Setenv.
func syscall_setenv_c(k string, v string) {
- if _cgo_setenv == 0 {
+ if _cgo_setenv == nil {
return
}
arg := [2]unsafe.Pointer{cstring(k), cstring(v)}
@@ -48,7 +46,7 @@ func syscall_setenv_c(k string, v string) {
// Update the C environment if cgo is loaded.
// Called from syscall.unsetenv.
func syscall_unsetenv_c(k string) {
- if _cgo_unsetenv == 0 {
+ if _cgo_unsetenv == nil {
return
}
arg := [1]unsafe.Pointer{cstring(k)}
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 65e918e84..5ed255026 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -34,21 +34,11 @@ func lfstackpush_m()
func lfstackpop_m()
func LFStackPush(head *uint64, node *LFNode) {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(head)
- mp.ptrarg[1] = unsafe.Pointer(node)
- onM(lfstackpush_m)
- releasem(mp)
+ lfstackpush(head, (*lfnode)(unsafe.Pointer(node)))
}
func LFStackPop(head *uint64) *LFNode {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(head)
- onM(lfstackpop_m)
- node := (*LFNode)(unsafe.Pointer(mp.ptrarg[0]))
- mp.ptrarg[0] = nil
- releasem(mp)
- return node
+ return (*LFNode)(unsafe.Pointer(lfstackpop(head)))
}
type ParFor struct {
@@ -68,69 +58,49 @@ func parfordo_m()
func parforiters_m()
func NewParFor(nthrmax uint32) *ParFor {
- mp := acquirem()
- mp.scalararg[0] = uintptr(nthrmax)
- onM(newparfor_m)
- desc := (*ParFor)(mp.ptrarg[0])
- mp.ptrarg[0] = nil
- releasem(mp)
+ var desc *ParFor
+ systemstack(func() {
+ desc = (*ParFor)(unsafe.Pointer(parforalloc(nthrmax)))
+ })
return desc
}
func ParForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32)) {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(desc)
- mp.ptrarg[1] = unsafe.Pointer(ctx)
- mp.ptrarg[2] = unsafe.Pointer(funcPC(body)) // TODO(rsc): Should be a scalar.
- mp.scalararg[0] = uintptr(nthr)
- mp.scalararg[1] = uintptr(n)
- mp.scalararg[2] = 0
- if wait {
- mp.scalararg[2] = 1
- }
- onM(parforsetup_m)
- releasem(mp)
+ systemstack(func() {
+ parforsetup((*parfor)(unsafe.Pointer(desc)), nthr, n, unsafe.Pointer(ctx), wait,
+ *(*func(*parfor, uint32))(unsafe.Pointer(&body)))
+ })
}
func ParForDo(desc *ParFor) {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(desc)
- onM(parfordo_m)
- releasem(mp)
+ systemstack(func() {
+ parfordo((*parfor)(unsafe.Pointer(desc)))
+ })
}
func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(desc)
- mp.scalararg[0] = uintptr(tid)
- onM(parforiters_m)
- begin := uint32(mp.scalararg[0])
- end := uint32(mp.scalararg[1])
- releasem(mp)
- return begin, end
+ desc1 := (*parfor)(unsafe.Pointer(desc))
+ pos := desc_thr_index(desc1, tid).pos
+ return uint32(pos), uint32(pos >> 32)
}
-// in mgc0.c
-//go:noescape
-func getgcmask(data unsafe.Pointer, typ *_type, array **byte, len *uint)
-
func GCMask(x interface{}) (ret []byte) {
e := (*eface)(unsafe.Pointer(&x))
s := (*slice)(unsafe.Pointer(&ret))
- onM(func() {
- getgcmask(e.data, e._type, &s.array, &s.len)
+ systemstack(func() {
+ var len uintptr
+ getgcmask(e.data, e._type, &s.array, &len)
+ s.len = uint(len)
s.cap = s.len
})
return
}
-func testSchedLocalQueue()
-func testSchedLocalQueueSteal()
func RunSchedLocalQueueTest() {
- onM(testSchedLocalQueue)
+ systemstack(testSchedLocalQueue)
}
func RunSchedLocalQueueStealTest() {
- onM(testSchedLocalQueueSteal)
+ systemstack(testSchedLocalQueueSteal)
}
var HaveGoodHash = haveGoodHash
@@ -149,13 +119,9 @@ func GogoBytes() int32 {
return _RuntimeGogoBytes
}
-// in string.c
-//go:noescape
-func gostringw(w *uint16) string
-
// entry point for testing
func GostringW(w []uint16) (s string) {
- onM(func() {
+ systemstack(func() {
s = gostringw(&w[0])
})
return
diff --git a/src/runtime/extern.go b/src/runtime/extern.go
index 6cc5df810..34fdeb2b4 100644
--- a/src/runtime/extern.go
+++ b/src/runtime/extern.go
@@ -112,7 +112,8 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
if xpc > f.entry && (g == nil || g.entry != funcPC(sigpanic)) {
xpc--
}
- line = int(funcline(f, xpc, &file))
+ file, line32 := funcline(f, xpc)
+ line = int(line32)
ok = true
return
}
diff --git a/src/runtime/float.c b/src/runtime/float.c
deleted file mode 100644
index 42082e434..000000000
--- a/src/runtime/float.c
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-
-// used as float64 via runtime· names
-uint64 ·nan = 0x7FF8000000000001ULL;
-uint64 ·posinf = 0x7FF0000000000000ULL;
-uint64 ·neginf = 0xFFF0000000000000ULL;
diff --git a/src/runtime/funcdata.h b/src/runtime/funcdata.h
index d6c14fcb4..ce62dabe3 100644
--- a/src/runtime/funcdata.h
+++ b/src/runtime/funcdata.h
@@ -3,9 +3,10 @@
// license that can be found in the LICENSE file.
// This file defines the IDs for PCDATA and FUNCDATA instructions
-// in Go binaries. It is included by both C and assembly, so it must
-// be written using #defines. It is included by the runtime package
-// as well as the compilers.
+// in Go binaries. It is included by assembly sources, so it must
+// be written using #defines.
+//
+// The Go compiler also #includes this file, for now.
//
// symtab.go also contains a copy of these constants.
@@ -50,8 +51,7 @@
/*c2go
enum {
- PCDATA_ArgSize = 0,
- PCDATA_StackMapIndex = 1,
+ PCDATA_StackMapIndex = 0,
FUNCDATA_ArgsPointerMaps = 0,
FUNCDATA_LocalsPointerMaps = 1,
FUNCDATA_DeadValueMaps = 2,
diff --git a/src/runtime/futex_test.go b/src/runtime/futex_test.go
index f57fc52b8..b85249a54 100644
--- a/src/runtime/futex_test.go
+++ b/src/runtime/futex_test.go
@@ -44,9 +44,9 @@ func TestFutexsleep(t *testing.T) {
start := time.Now()
for _, tt := range futexsleepTests {
go func(tt futexsleepTest) {
- runtime.Entersyscall()
+ runtime.Entersyscall(0)
runtime.Futexsleep(&tt.mtx, tt.mtx, tt.ns)
- runtime.Exitsyscall()
+ runtime.Exitsyscall(0)
tt.ch <- tt
}(tt)
}
diff --git a/src/runtime/gcinfo_test.go b/src/runtime/gcinfo_test.go
index 662b7546d..2b45c8184 100644
--- a/src/runtime/gcinfo_test.go
+++ b/src/runtime/gcinfo_test.go
@@ -62,12 +62,10 @@ func verifyGCInfo(t *testing.T, name string, p interface{}, mask0 []byte) {
func nonStackInfo(mask []byte) []byte {
// BitsDead is replaced with BitsScalar everywhere except stacks.
mask1 := make([]byte, len(mask))
- mw := false
for i, v := range mask {
- if !mw && v == BitsDead {
+ if v == BitsDead {
v = BitsScalar
}
- mw = !mw && v == BitsMultiWord
mask1[i] = v
}
return mask1
@@ -84,7 +82,6 @@ const (
BitsDead = iota
BitsScalar
BitsPointer
- BitsMultiWord
)
const (
diff --git a/src/runtime/go_tls.h b/src/runtime/go_tls.h
new file mode 100644
index 000000000..6a707cf1e
--- /dev/null
+++ b/src/runtime/go_tls.h
@@ -0,0 +1,22 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#ifdef GOARCH_arm
+#define LR R14
+#endif
+
+#ifdef GOARCH_amd64
+#define get_tls(r) MOVQ TLS, r
+#define g(r) 0(r)(TLS*1)
+#endif
+
+#ifdef GOARCH_amd64p32
+#define get_tls(r) MOVL TLS, r
+#define g(r) 0(r)(TLS*1)
+#endif
+
+#ifdef GOARCH_386
+#define get_tls(r) MOVL TLS, r
+#define g(r) 0(r)(TLS*1)
+#endif
diff --git a/src/runtime/heapdump.c b/src/runtime/heapdump.c
deleted file mode 100644
index da14f2d24..000000000
--- a/src/runtime/heapdump.c
+++ /dev/null
@@ -1,851 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Implementation of runtime/debug.WriteHeapDump. Writes all
-// objects in the heap plus additional info (roots, threads,
-// finalizers, etc.) to a file.
-
-// The format of the dumped file is described at
-// http://code.google.com/p/go-wiki/wiki/heapdump14
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-#include "mgc0.h"
-#include "type.h"
-#include "typekind.h"
-#include "funcdata.h"
-#include "zaexperiment.h"
-#include "textflag.h"
-
-extern byte runtime·data[];
-extern byte runtime·edata[];
-extern byte runtime·bss[];
-extern byte runtime·ebss[];
-
-enum {
- FieldKindEol = 0,
- FieldKindPtr = 1,
- FieldKindIface = 2,
- FieldKindEface = 3,
-
- TagEOF = 0,
- TagObject = 1,
- TagOtherRoot = 2,
- TagType = 3,
- TagGoRoutine = 4,
- TagStackFrame = 5,
- TagParams = 6,
- TagFinalizer = 7,
- TagItab = 8,
- TagOSThread = 9,
- TagMemStats = 10,
- TagQueuedFinalizer = 11,
- TagData = 12,
- TagBss = 13,
- TagDefer = 14,
- TagPanic = 15,
- TagMemProf = 16,
- TagAllocSample = 17,
-};
-
-static uintptr* playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg);
-static void dumpfields(BitVector bv);
-static void dumpbvtypes(BitVector *bv, byte *base);
-static BitVector makeheapobjbv(byte *p, uintptr size);
-
-// fd to write the dump to.
-static uintptr dumpfd;
-
-#pragma dataflag NOPTR /* tmpbuf not a heap pointer at least */
-static byte *tmpbuf;
-static uintptr tmpbufsize;
-
-// buffer of pending write data
-enum {
- BufSize = 4096,
-};
-#pragma dataflag NOPTR
-static byte buf[BufSize];
-static uintptr nbuf;
-
-static void
-write(byte *data, uintptr len)
-{
- if(len + nbuf <= BufSize) {
- runtime·memmove(buf + nbuf, data, len);
- nbuf += len;
- return;
- }
- runtime·write(dumpfd, buf, nbuf);
- if(len >= BufSize) {
- runtime·write(dumpfd, data, len);
- nbuf = 0;
- } else {
- runtime·memmove(buf, data, len);
- nbuf = len;
- }
-}
-
-static void
-flush(void)
-{
- runtime·write(dumpfd, buf, nbuf);
- nbuf = 0;
-}
-
-// Cache of types that have been serialized already.
-// We use a type's hash field to pick a bucket.
-// Inside a bucket, we keep a list of types that
-// have been serialized so far, most recently used first.
-// Note: when a bucket overflows we may end up
-// serializing a type more than once. That's ok.
-enum {
- TypeCacheBuckets = 256, // must be a power of 2
- TypeCacheAssoc = 4,
-};
-typedef struct TypeCacheBucket TypeCacheBucket;
-struct TypeCacheBucket {
- Type *t[TypeCacheAssoc];
-};
-#pragma dataflag NOPTR /* only initialized and used while world is stopped */
-static TypeCacheBucket typecache[TypeCacheBuckets];
-
-// dump a uint64 in a varint format parseable by encoding/binary
-static void
-dumpint(uint64 v)
-{
- byte buf[10];
- int32 n;
- n = 0;
- while(v >= 0x80) {
- buf[n++] = v | 0x80;
- v >>= 7;
- }
- buf[n++] = v;
- write(buf, n);
-}
-
-static void
-dumpbool(bool b)
-{
- dumpint(b ? 1 : 0);
-}
-
-// dump varint uint64 length followed by memory contents
-static void
-dumpmemrange(byte *data, uintptr len)
-{
- dumpint(len);
- write(data, len);
-}
-
-static void
-dumpstr(String s)
-{
- dumpmemrange(s.str, s.len);
-}
-
-static void
-dumpcstr(int8 *c)
-{
- dumpmemrange((byte*)c, runtime·findnull((byte*)c));
-}
-
-// dump information for a type
-static void
-dumptype(Type *t)
-{
- TypeCacheBucket *b;
- int32 i, j;
-
- if(t == nil) {
- return;
- }
-
- // If we've definitely serialized the type before,
- // no need to do it again.
- b = &typecache[t->hash & (TypeCacheBuckets-1)];
- if(t == b->t[0]) return;
- for(i = 1; i < TypeCacheAssoc; i++) {
- if(t == b->t[i]) {
- // Move-to-front
- for(j = i; j > 0; j--) {
- b->t[j] = b->t[j-1];
- }
- b->t[0] = t;
- return;
- }
- }
- // Might not have been dumped yet. Dump it and
- // remember we did so.
- for(j = TypeCacheAssoc-1; j > 0; j--) {
- b->t[j] = b->t[j-1];
- }
- b->t[0] = t;
-
- // dump the type
- dumpint(TagType);
- dumpint((uintptr)t);
- dumpint(t->size);
- if(t->x == nil || t->x->pkgPath == nil || t->x->name == nil) {
- dumpstr(*t->string);
- } else {
- dumpint(t->x->pkgPath->len + 1 + t->x->name->len);
- write(t->x->pkgPath->str, t->x->pkgPath->len);
- write((byte*)".", 1);
- write(t->x->name->str, t->x->name->len);
- }
- dumpbool((t->kind & KindDirectIface) == 0 || (t->kind & KindNoPointers) == 0);
-}
-
-// dump an object
-static void
-dumpobj(byte *obj, uintptr size, BitVector bv)
-{
- dumpbvtypes(&bv, obj);
- dumpint(TagObject);
- dumpint((uintptr)obj);
- dumpmemrange(obj, size);
- dumpfields(bv);
-}
-
-static void
-dumpotherroot(int8 *description, byte *to)
-{
- dumpint(TagOtherRoot);
- dumpcstr(description);
- dumpint((uintptr)to);
-}
-
-static void
-dumpfinalizer(byte *obj, FuncVal *fn, Type* fint, PtrType *ot)
-{
- dumpint(TagFinalizer);
- dumpint((uintptr)obj);
- dumpint((uintptr)fn);
- dumpint((uintptr)fn->fn);
- dumpint((uintptr)fint);
- dumpint((uintptr)ot);
-}
-
-typedef struct ChildInfo ChildInfo;
-struct ChildInfo {
- // Information passed up from the callee frame about
- // the layout of the outargs region.
- uintptr argoff; // where the arguments start in the frame
- uintptr arglen; // size of args region
- BitVector args; // if args.n >= 0, pointer map of args region
-
- byte *sp; // callee sp
- uintptr depth; // depth in call stack (0 == most recent)
-};
-
-// dump kinds & offsets of interesting fields in bv
-static void
-dumpbv(BitVector *bv, uintptr offset)
-{
- uintptr i;
-
- for(i = 0; i < bv->n; i += BitsPerPointer) {
- switch(bv->bytedata[i/8] >> i%8 & 3) {
- case BitsDead:
- // BitsDead has already been processed in makeheapobjbv.
- // We should only see it in stack maps, in which case we should continue processing.
- break;
- case BitsScalar:
- break;
- case BitsPointer:
- dumpint(FieldKindPtr);
- dumpint(offset + i / BitsPerPointer * PtrSize);
- break;
- case BitsMultiWord:
- runtime·throw("bumpbv unexpected garbage collection bits");
- }
- }
-}
-
-static bool
-dumpframe(Stkframe *s, void *arg)
-{
- Func *f;
- ChildInfo *child;
- uintptr pc, off, size;
- int32 pcdata;
- StackMap *stackmap;
- int8 *name;
- BitVector bv;
-
- child = (ChildInfo*)arg;
- f = s->fn;
-
- // Figure out what we can about our stack map
- pc = s->pc;
- if(pc != f->entry)
- pc--;
- pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, pc);
- if(pcdata == -1) {
- // We do not have a valid pcdata value but there might be a
- // stackmap for this function. It is likely that we are looking
- // at the function prologue, assume so and hope for the best.
- pcdata = 0;
- }
- stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
-
- // Dump any types we will need to resolve Efaces.
- if(child->args.n >= 0)
- dumpbvtypes(&child->args, (byte*)s->sp + child->argoff);
- if(stackmap != nil && stackmap->n > 0) {
- bv = runtime·stackmapdata(stackmap, pcdata);
- dumpbvtypes(&bv, (byte*)(s->varp - bv.n / BitsPerPointer * PtrSize));
- } else {
- bv.n = -1;
- }
-
- // Dump main body of stack frame.
- dumpint(TagStackFrame);
- dumpint(s->sp); // lowest address in frame
- dumpint(child->depth); // # of frames deep on the stack
- dumpint((uintptr)child->sp); // sp of child, or 0 if bottom of stack
- dumpmemrange((byte*)s->sp, s->fp - s->sp); // frame contents
- dumpint(f->entry);
- dumpint(s->pc);
- dumpint(s->continpc);
- name = runtime·funcname(f);
- if(name == nil)
- name = "unknown function";
- dumpcstr(name);
-
- // Dump fields in the outargs section
- if(child->args.n >= 0) {
- dumpbv(&child->args, child->argoff);
- } else {
- // conservative - everything might be a pointer
- for(off = child->argoff; off < child->argoff + child->arglen; off += PtrSize) {
- dumpint(FieldKindPtr);
- dumpint(off);
- }
- }
-
- // Dump fields in the local vars section
- if(stackmap == nil) {
- // No locals information, dump everything.
- for(off = child->arglen; off < s->varp - s->sp; off += PtrSize) {
- dumpint(FieldKindPtr);
- dumpint(off);
- }
- } else if(stackmap->n < 0) {
- // Locals size information, dump just the locals.
- size = -stackmap->n;
- for(off = s->varp - size - s->sp; off < s->varp - s->sp; off += PtrSize) {
- dumpint(FieldKindPtr);
- dumpint(off);
- }
- } else if(stackmap->n > 0) {
- // Locals bitmap information, scan just the pointers in
- // locals.
- dumpbv(&bv, s->varp - bv.n / BitsPerPointer * PtrSize - s->sp);
- }
- dumpint(FieldKindEol);
-
- // Record arg info for parent.
- child->argoff = s->argp - s->fp;
- child->arglen = s->arglen;
- child->sp = (byte*)s->sp;
- child->depth++;
- stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
- if(stackmap != nil)
- child->args = runtime·stackmapdata(stackmap, pcdata);
- else
- child->args.n = -1;
- return true;
-}
-
-static void
-dumpgoroutine(G *gp)
-{
- uintptr sp, pc, lr;
- ChildInfo child;
- Defer *d;
- Panic *p;
- bool (*fn)(Stkframe*, void*);
-
- if(gp->syscallsp != (uintptr)nil) {
- sp = gp->syscallsp;
- pc = gp->syscallpc;
- lr = 0;
- } else {
- sp = gp->sched.sp;
- pc = gp->sched.pc;
- lr = gp->sched.lr;
- }
-
- dumpint(TagGoRoutine);
- dumpint((uintptr)gp);
- dumpint((uintptr)sp);
- dumpint(gp->goid);
- dumpint(gp->gopc);
- dumpint(runtime·readgstatus(gp));
- dumpbool(gp->issystem);
- dumpbool(false); // isbackground
- dumpint(gp->waitsince);
- dumpstr(gp->waitreason);
- dumpint((uintptr)gp->sched.ctxt);
- dumpint((uintptr)gp->m);
- dumpint((uintptr)gp->defer);
- dumpint((uintptr)gp->panic);
-
- // dump stack
- child.args.n = -1;
- child.arglen = 0;
- child.sp = nil;
- child.depth = 0;
- fn = dumpframe;
- runtime·gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, &fn, &child, 0);
-
- // dump defer & panic records
- for(d = gp->defer; d != nil; d = d->link) {
- dumpint(TagDefer);
- dumpint((uintptr)d);
- dumpint((uintptr)gp);
- dumpint((uintptr)d->argp);
- dumpint((uintptr)d->pc);
- dumpint((uintptr)d->fn);
- dumpint((uintptr)d->fn->fn);
- dumpint((uintptr)d->link);
- }
- for (p = gp->panic; p != nil; p = p->link) {
- dumpint(TagPanic);
- dumpint((uintptr)p);
- dumpint((uintptr)gp);
- dumpint((uintptr)p->arg.type);
- dumpint((uintptr)p->arg.data);
- dumpint(0); // was p->defer, no longer recorded
- dumpint((uintptr)p->link);
- }
-}
-
-static void
-dumpgs(void)
-{
- G *gp;
- uint32 i;
- uint32 status;
-
- // goroutines & stacks
- for(i = 0; i < runtime·allglen; i++) {
- gp = runtime·allg[i];
- status = runtime·readgstatus(gp); // The world is stopped so gp will not be in a scan state.
- switch(status){
- default:
- runtime·printf("runtime: unexpected G.status %d\n", status);
- runtime·throw("dumpgs in STW - bad status");
- case Gdead:
- break;
- case Grunnable:
- case Gsyscall:
- case Gwaiting:
- dumpgoroutine(gp);
- break;
- }
- }
-}
-
-static void
-finq_callback(FuncVal *fn, byte *obj, uintptr nret, Type *fint, PtrType *ot)
-{
- dumpint(TagQueuedFinalizer);
- dumpint((uintptr)obj);
- dumpint((uintptr)fn);
- dumpint((uintptr)fn->fn);
- dumpint((uintptr)fint);
- dumpint((uintptr)ot);
- USED(&nret);
-}
-
-
-static void
-dumproots(void)
-{
- MSpan *s, **allspans;
- uint32 spanidx;
- Special *sp;
- SpecialFinalizer *spf;
- byte *p;
-
- // data segment
- dumpbvtypes(&runtime·gcdatamask, runtime·data);
- dumpint(TagData);
- dumpint((uintptr)runtime·data);
- dumpmemrange(runtime·data, runtime·edata - runtime·data);
- dumpfields(runtime·gcdatamask);
-
- // bss segment
- dumpbvtypes(&runtime·gcbssmask, runtime·bss);
- dumpint(TagBss);
- dumpint((uintptr)runtime·bss);
- dumpmemrange(runtime·bss, runtime·ebss - runtime·bss);
- dumpfields(runtime·gcbssmask);
-
- // MSpan.types
- allspans = runtime·mheap.allspans;
- for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) {
- s = allspans[spanidx];
- if(s->state == MSpanInUse) {
- // Finalizers
- for(sp = s->specials; sp != nil; sp = sp->next) {
- if(sp->kind != KindSpecialFinalizer)
- continue;
- spf = (SpecialFinalizer*)sp;
- p = (byte*)((s->start << PageShift) + spf->special.offset);
- dumpfinalizer(p, spf->fn, spf->fint, spf->ot);
- }
- }
- }
-
- // Finalizer queue
- runtime·iterate_finq(finq_callback);
-}
-
-// Bit vector of free marks.
-// Needs to be as big as the largest number of objects per span.
-#pragma dataflag NOPTR
-static byte free[PageSize/8];
-
-static void
-dumpobjs(void)
-{
- uintptr i, j, size, n;
- MSpan *s;
- MLink *l;
- byte *p;
-
- for(i = 0; i < runtime·mheap.nspan; i++) {
- s = runtime·mheap.allspans[i];
- if(s->state != MSpanInUse)
- continue;
- p = (byte*)(s->start << PageShift);
- size = s->elemsize;
- n = (s->npages << PageShift) / size;
- if(n > nelem(free))
- runtime·throw("free array doesn't have enough entries");
- for(l = s->freelist; l != nil; l = l->next)
- free[((byte*)l - p) / size] = true;
- for(j = 0; j < n; j++, p += size) {
- if(free[j]) {
- free[j] = false;
- continue;
- }
- dumpobj(p, size, makeheapobjbv(p, size));
- }
- }
-}
-
-static void
-dumpparams(void)
-{
- byte *x;
-
- dumpint(TagParams);
- x = (byte*)1;
- if(*(byte*)&x == 1)
- dumpbool(false); // little-endian ptrs
- else
- dumpbool(true); // big-endian ptrs
- dumpint(PtrSize);
- dumpint((uintptr)runtime·mheap.arena_start);
- dumpint((uintptr)runtime·mheap.arena_used);
- dumpint(thechar);
- dumpcstr(GOEXPERIMENT);
- dumpint(runtime·ncpu);
-}
-
-static void
-itab_callback(Itab *tab)
-{
- Type *t;
-
- t = tab->type;
- // Dump a map from itab* to the type of its data field.
- // We want this map so we can deduce types of interface referents.
- if((t->kind & KindDirectIface) == 0) {
- // indirect - data slot is a pointer to t.
- dumptype(t->ptrto);
- dumpint(TagItab);
- dumpint((uintptr)tab);
- dumpint((uintptr)t->ptrto);
- } else if((t->kind & KindNoPointers) == 0) {
- // t is pointer-like - data slot is a t.
- dumptype(t);
- dumpint(TagItab);
- dumpint((uintptr)tab);
- dumpint((uintptr)t);
- } else {
- // Data slot is a scalar. Dump type just for fun.
- // With pointer-only interfaces, this shouldn't happen.
- dumptype(t);
- dumpint(TagItab);
- dumpint((uintptr)tab);
- dumpint((uintptr)t);
- }
-}
-
-static void
-dumpitabs(void)
-{
- void (*fn)(Itab*);
-
- fn = itab_callback;
- runtime·iterate_itabs(&fn);
-}
-
-static void
-dumpms(void)
-{
- M *mp;
-
- for(mp = runtime·allm; mp != nil; mp = mp->alllink) {
- dumpint(TagOSThread);
- dumpint((uintptr)mp);
- dumpint(mp->id);
- dumpint(mp->procid);
- }
-}
-
-static void
-dumpmemstats(void)
-{
- int32 i;
-
- dumpint(TagMemStats);
- dumpint(mstats.alloc);
- dumpint(mstats.total_alloc);
- dumpint(mstats.sys);
- dumpint(mstats.nlookup);
- dumpint(mstats.nmalloc);
- dumpint(mstats.nfree);
- dumpint(mstats.heap_alloc);
- dumpint(mstats.heap_sys);
- dumpint(mstats.heap_idle);
- dumpint(mstats.heap_inuse);
- dumpint(mstats.heap_released);
- dumpint(mstats.heap_objects);
- dumpint(mstats.stacks_inuse);
- dumpint(mstats.stacks_sys);
- dumpint(mstats.mspan_inuse);
- dumpint(mstats.mspan_sys);
- dumpint(mstats.mcache_inuse);
- dumpint(mstats.mcache_sys);
- dumpint(mstats.buckhash_sys);
- dumpint(mstats.gc_sys);
- dumpint(mstats.other_sys);
- dumpint(mstats.next_gc);
- dumpint(mstats.last_gc);
- dumpint(mstats.pause_total_ns);
- for(i = 0; i < 256; i++)
- dumpint(mstats.pause_ns[i]);
- dumpint(mstats.numgc);
-}
-
-static void
-dumpmemprof_callback(Bucket *b, uintptr nstk, uintptr *stk, uintptr size, uintptr allocs, uintptr frees)
-{
- uintptr i, pc;
- Func *f;
- byte buf[20];
- String file;
- int32 line;
-
- dumpint(TagMemProf);
- dumpint((uintptr)b);
- dumpint(size);
- dumpint(nstk);
- for(i = 0; i < nstk; i++) {
- pc = stk[i];
- f = runtime·findfunc(pc);
- if(f == nil) {
- runtime·snprintf(buf, sizeof(buf), "%X", (uint64)pc);
- dumpcstr((int8*)buf);
- dumpcstr("?");
- dumpint(0);
- } else {
- dumpcstr(runtime·funcname(f));
- // TODO: Why do we need to back up to a call instruction here?
- // Maybe profiler should do this.
- if(i > 0 && pc > f->entry) {
- if(thechar == '6' || thechar == '8')
- pc--;
- else
- pc -= 4; // arm, etc
- }
- line = runtime·funcline(f, pc, &file);
- dumpstr(file);
- dumpint(line);
- }
- }
- dumpint(allocs);
- dumpint(frees);
-}
-
-static void
-dumpmemprof(void)
-{
- MSpan *s, **allspans;
- uint32 spanidx;
- Special *sp;
- SpecialProfile *spp;
- byte *p;
- void (*fn)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr);
-
- fn = dumpmemprof_callback;
- runtime·iterate_memprof(&fn);
-
- allspans = runtime·mheap.allspans;
- for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) {
- s = allspans[spanidx];
- if(s->state != MSpanInUse)
- continue;
- for(sp = s->specials; sp != nil; sp = sp->next) {
- if(sp->kind != KindSpecialProfile)
- continue;
- spp = (SpecialProfile*)sp;
- p = (byte*)((s->start << PageShift) + spp->special.offset);
- dumpint(TagAllocSample);
- dumpint((uintptr)p);
- dumpint((uintptr)spp->b);
- }
- }
-}
-
-static void
-mdump(void)
-{
- byte *hdr;
- uintptr i;
- MSpan *s;
-
- // make sure we're done sweeping
- for(i = 0; i < runtime·mheap.nspan; i++) {
- s = runtime·mheap.allspans[i];
- if(s->state == MSpanInUse)
- runtime·MSpan_EnsureSwept(s);
- }
-
- runtime·memclr((byte*)&typecache[0], sizeof(typecache));
- hdr = (byte*)"go1.4 heap dump\n";
- write(hdr, runtime·findnull(hdr));
- dumpparams();
- dumpitabs();
- dumpobjs();
- dumpgs();
- dumpms();
- dumproots();
- dumpmemstats();
- dumpmemprof();
- dumpint(TagEOF);
- flush();
-}
-
-void
-runtime·writeheapdump_m(void)
-{
- uintptr fd;
-
- fd = g->m->scalararg[0];
- g->m->scalararg[0] = 0;
-
- runtime·casgstatus(g->m->curg, Grunning, Gwaiting);
- g->waitreason = runtime·gostringnocopy((byte*)"dumping heap");
-
- // Update stats so we can dump them.
- // As a side effect, flushes all the MCaches so the MSpan.freelist
- // lists contain all the free objects.
- runtime·updatememstats(nil);
-
- // Set dump file.
- dumpfd = fd;
-
- // Call dump routine.
- mdump();
-
- // Reset dump file.
- dumpfd = 0;
- if(tmpbuf != nil) {
- runtime·SysFree(tmpbuf, tmpbufsize, &mstats.other_sys);
- tmpbuf = nil;
- tmpbufsize = 0;
- }
-
- runtime·casgstatus(g->m->curg, Gwaiting, Grunning);
-}
-
-// dumpint() the kind & offset of each field in an object.
-static void
-dumpfields(BitVector bv)
-{
- dumpbv(&bv, 0);
- dumpint(FieldKindEol);
-}
-
-// The heap dump reader needs to be able to disambiguate
-// Eface entries. So it needs to know every type that might
-// appear in such an entry. The following routine accomplishes that.
-
-// Dump all the types that appear in the type field of
-// any Eface described by this bit vector.
-static void
-dumpbvtypes(BitVector *bv, byte *base)
-{
- uintptr i;
-
- for(i = 0; i < bv->n; i += BitsPerPointer) {
- if((bv->bytedata[i/8] >> i%8 & 3) != BitsMultiWord)
- continue;
- switch(bv->bytedata[(i+BitsPerPointer)/8] >> (i+BitsPerPointer)%8 & 3) {
- default:
- runtime·throw("unexpected garbage collection bits");
- case BitsIface:
- i += BitsPerPointer;
- break;
- case BitsEface:
- dumptype(*(Type**)(base + i / BitsPerPointer * PtrSize));
- i += BitsPerPointer;
- break;
- }
- }
-}
-
-static BitVector
-makeheapobjbv(byte *p, uintptr size)
-{
- uintptr off, nptr, i;
- byte shift, *bitp, bits;
- bool mw;
-
- // Extend the temp buffer if necessary.
- nptr = size/PtrSize;
- if(tmpbufsize < nptr*BitsPerPointer/8+1) {
- if(tmpbuf != nil)
- runtime·SysFree(tmpbuf, tmpbufsize, &mstats.other_sys);
- tmpbufsize = nptr*BitsPerPointer/8+1;
- tmpbuf = runtime·sysAlloc(tmpbufsize, &mstats.other_sys);
- if(tmpbuf == nil)
- runtime·throw("heapdump: out of memory");
- }
-
- // Copy and compact the bitmap.
- mw = false;
- for(i = 0; i < nptr; i++) {
- off = (uintptr*)(p + i*PtrSize) - (uintptr*)runtime·mheap.arena_start;
- bitp = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
- shift = (off % wordsPerBitmapByte) * gcBits;
- bits = (*bitp >> (shift + 2)) & BitsMask;
- if(!mw && bits == BitsDead)
- break; // end of heap object
- mw = !mw && bits == BitsMultiWord;
- tmpbuf[i*BitsPerPointer/8] &= ~(BitsMask<<((i*BitsPerPointer)%8));
- tmpbuf[i*BitsPerPointer/8] |= bits<<((i*BitsPerPointer)%8);
- }
- return (BitVector){i*BitsPerPointer, tmpbuf};
-}
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
new file mode 100644
index 000000000..c942e0163
--- /dev/null
+++ b/src/runtime/heapdump.go
@@ -0,0 +1,729 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implementation of runtime/debug.WriteHeapDump. Writes all
+// objects in the heap plus additional info (roots, threads,
+// finalizers, etc.) to a file.
+
+// The format of the dumped file is described at
+// http://code.google.com/p/go-wiki/wiki/heapdump14
+
+package runtime
+
+import "unsafe"
+
+const (
+ fieldKindEol = 0
+ fieldKindPtr = 1
+ fieldKindIface = 2
+ fieldKindEface = 3
+ tagEOF = 0
+ tagObject = 1
+ tagOtherRoot = 2
+ tagType = 3
+ tagGoroutine = 4
+ tagStackFrame = 5
+ tagParams = 6
+ tagFinalizer = 7
+ tagItab = 8
+ tagOSThread = 9
+ tagMemStats = 10
+ tagQueuedFinalizer = 11
+ tagData = 12
+ tagBSS = 13
+ tagDefer = 14
+ tagPanic = 15
+ tagMemProf = 16
+ tagAllocSample = 17
+)
+
+var dumpfd uintptr // fd to write the dump to.
+var tmpbuf []byte
+
+// buffer of pending write data
+const (
+ bufSize = 4096
+)
+
+var buf [bufSize]byte
+var nbuf uintptr
+
+func dwrite(data unsafe.Pointer, len uintptr) {
+ if len == 0 {
+ return
+ }
+ if nbuf+len <= bufSize {
+ copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
+ nbuf += len
+ return
+ }
+
+ write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf))
+ if len >= bufSize {
+ write(dumpfd, data, int32(len))
+ nbuf = 0
+ } else {
+ copy(buf[:], (*[bufSize]byte)(data)[:len])
+ nbuf = len
+ }
+}
+
+func dwritebyte(b byte) {
+ dwrite(unsafe.Pointer(&b), 1)
+}
+
+func flush() {
+ write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf))
+ nbuf = 0
+}
+
+// Cache of types that have been serialized already.
+// We use a type's hash field to pick a bucket.
+// Inside a bucket, we keep a list of types that
+// have been serialized so far, most recently used first.
+// Note: when a bucket overflows we may end up
+// serializing a type more than once. That's ok.
+const (
+ typeCacheBuckets = 256
+ typeCacheAssoc = 4
+)
+
+type typeCacheBucket struct {
+ t [typeCacheAssoc]*_type
+}
+
+var typecache [typeCacheBuckets]typeCacheBucket
+
+// dump a uint64 in a varint format parseable by encoding/binary
+func dumpint(v uint64) {
+ var buf [10]byte
+ var n int
+ for v >= 0x80 {
+ buf[n] = byte(v | 0x80)
+ n++
+ v >>= 7
+ }
+ buf[n] = byte(v)
+ n++
+ dwrite(unsafe.Pointer(&buf), uintptr(n))
+}
+
+func dumpbool(b bool) {
+ if b {
+ dumpint(1)
+ } else {
+ dumpint(0)
+ }
+}
+
+// dump varint uint64 length followed by memory contents
+func dumpmemrange(data unsafe.Pointer, len uintptr) {
+ dumpint(uint64(len))
+ dwrite(data, len)
+}
+
+func dumpslice(b []byte) {
+ dumpint(uint64(len(b)))
+ if len(b) > 0 {
+ dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
+ }
+}
+
+func dumpstr(s string) {
+ sp := (*stringStruct)(unsafe.Pointer(&s))
+ dumpmemrange(sp.str, uintptr(sp.len))
+}
+
+// dump information for a type
+func dumptype(t *_type) {
+ if t == nil {
+ return
+ }
+
+ // If we've definitely serialized the type before,
+ // no need to do it again.
+ b := &typecache[t.hash&(typeCacheBuckets-1)]
+ if t == b.t[0] {
+ return
+ }
+ for i := 1; i < typeCacheAssoc; i++ {
+ if t == b.t[i] {
+ // Move-to-front
+ for j := i; j > 0; j-- {
+ b.t[j] = b.t[j-1]
+ }
+ b.t[0] = t
+ return
+ }
+ }
+
+ // Might not have been dumped yet. Dump it and
+ // remember we did so.
+ for j := typeCacheAssoc - 1; j > 0; j-- {
+ b.t[j] = b.t[j-1]
+ }
+ b.t[0] = t
+
+ // dump the type
+ dumpint(tagType)
+ dumpint(uint64(uintptr(unsafe.Pointer(t))))
+ dumpint(uint64(t.size))
+ if t.x == nil || t.x.pkgpath == nil || t.x.name == nil {
+ dumpstr(*t._string)
+ } else {
+ pkgpath := (*stringStruct)(unsafe.Pointer(&t.x.pkgpath))
+ name := (*stringStruct)(unsafe.Pointer(&t.x.name))
+ dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
+ dwrite(pkgpath.str, uintptr(pkgpath.len))
+ dwritebyte('.')
+ dwrite(name.str, uintptr(name.len))
+ }
+ dumpbool(t.kind&kindDirectIface == 0 || t.kind&kindNoPointers == 0)
+}
+
+// dump an object
+func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
+ dumpbvtypes(&bv, obj)
+ dumpint(tagObject)
+ dumpint(uint64(uintptr(obj)))
+ dumpmemrange(obj, size)
+ dumpfields(bv)
+}
+
+func dumpotherroot(description string, to unsafe.Pointer) {
+ dumpint(tagOtherRoot)
+ dumpstr(description)
+ dumpint(uint64(uintptr(to)))
+}
+
+func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
+ dumpint(tagFinalizer)
+ dumpint(uint64(uintptr(obj)))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(fint))))
+ dumpint(uint64(uintptr(unsafe.Pointer(ot))))
+}
+
+type childInfo struct {
+ // Information passed up from the callee frame about
+ // the layout of the outargs region.
+ argoff uintptr // where the arguments start in the frame
+ arglen uintptr // size of args region
+ args bitvector // if args.n >= 0, pointer map of args region
+ sp *uint8 // callee sp
+ depth uintptr // depth in call stack (0 == most recent)
+}
+
+// dump kinds & offsets of interesting fields in bv
+func dumpbv(cbv *bitvector, offset uintptr) {
+ bv := gobv(*cbv)
+ for i := uintptr(0); i < uintptr(bv.n); i += bitsPerPointer {
+ switch bv.bytedata[i/8] >> (i % 8) & 3 {
+ default:
+ gothrow("unexpected pointer bits")
+ case _BitsDead:
+ // BitsDead has already been processed in makeheapobjbv.
+ // We should only see it in stack maps, in which case we should continue processing.
+ case _BitsScalar:
+ // ok
+ case _BitsPointer:
+ dumpint(fieldKindPtr)
+ dumpint(uint64(offset + i/_BitsPerPointer*ptrSize))
+ }
+ }
+}
+
+func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
+ child := (*childInfo)(arg)
+ f := s.fn
+
+ // Figure out what we can about our stack map
+ pc := s.pc
+ if pc != f.entry {
+ pc--
+ }
+ pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, pc)
+ if pcdata == -1 {
+ // We do not have a valid pcdata value but there might be a
+ // stackmap for this function. It is likely that we are looking
+ // at the function prologue, assume so and hope for the best.
+ pcdata = 0
+ }
+ stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
+
+ // Dump any types we will need to resolve Efaces.
+ if child.args.n >= 0 {
+ dumpbvtypes(&child.args, unsafe.Pointer(s.sp+child.argoff))
+ }
+ var bv bitvector
+ if stkmap != nil && stkmap.n > 0 {
+ bv = stackmapdata(stkmap, pcdata)
+ dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n/_BitsPerPointer*ptrSize)))
+ } else {
+ bv.n = -1
+ }
+
+ // Dump main body of stack frame.
+ dumpint(tagStackFrame)
+ dumpint(uint64(s.sp)) // lowest address in frame
+ dumpint(uint64(child.depth)) // # of frames deep on the stack
+ dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
+ dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp) // frame contents
+ dumpint(uint64(f.entry))
+ dumpint(uint64(s.pc))
+ dumpint(uint64(s.continpc))
+ name := gofuncname(f)
+ if name == "" {
+ name = "unknown function"
+ }
+ dumpstr(name)
+
+ // Dump fields in the outargs section
+ if child.args.n >= 0 {
+ dumpbv(&child.args, child.argoff)
+ } else {
+ // conservative - everything might be a pointer
+ for off := child.argoff; off < child.argoff+child.arglen; off += ptrSize {
+ dumpint(fieldKindPtr)
+ dumpint(uint64(off))
+ }
+ }
+
+ // Dump fields in the local vars section
+ if stkmap == nil {
+ // No locals information, dump everything.
+ for off := child.arglen; off < s.varp-s.sp; off += ptrSize {
+ dumpint(fieldKindPtr)
+ dumpint(uint64(off))
+ }
+ } else if stkmap.n < 0 {
+ // Locals size information, dump just the locals.
+ size := uintptr(-stkmap.n)
+ for off := s.varp - size - s.sp; off < s.varp-s.sp; off += ptrSize {
+ dumpint(fieldKindPtr)
+ dumpint(uint64(off))
+ }
+ } else if stkmap.n > 0 {
+ // Locals bitmap information, scan just the pointers in
+ // locals.
+ dumpbv(&bv, s.varp-uintptr(bv.n)/_BitsPerPointer*ptrSize-s.sp)
+ }
+ dumpint(fieldKindEol)
+
+ // Record arg info for parent.
+ child.argoff = s.argp - s.fp
+ child.arglen = s.arglen
+ child.sp = (*uint8)(unsafe.Pointer(s.sp))
+ child.depth++
+ stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
+ if stkmap != nil {
+ child.args = stackmapdata(stkmap, pcdata)
+ } else {
+ child.args.n = -1
+ }
+ return true
+}
+
+func dumpgoroutine(gp *g) {
+ var sp, pc, lr uintptr
+ if gp.syscallsp != 0 {
+ sp = gp.syscallsp
+ pc = gp.syscallpc
+ lr = 0
+ } else {
+ sp = gp.sched.sp
+ pc = gp.sched.pc
+ lr = gp.sched.lr
+ }
+
+ dumpint(tagGoroutine)
+ dumpint(uint64(uintptr(unsafe.Pointer(gp))))
+ dumpint(uint64(sp))
+ dumpint(uint64(gp.goid))
+ dumpint(uint64(gp.gopc))
+ dumpint(uint64(readgstatus(gp)))
+ dumpbool(gp.issystem)
+ dumpbool(false) // isbackground
+ dumpint(uint64(gp.waitsince))
+ dumpstr(gp.waitreason)
+ dumpint(uint64(uintptr(gp.sched.ctxt)))
+ dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
+ dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
+ dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
+
+ // dump stack
+ var child childInfo
+ child.args.n = -1
+ child.arglen = 0
+ child.sp = nil
+ child.depth = 0
+ gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0)
+
+ // dump defer & panic records
+ for d := gp._defer; d != nil; d = d.link {
+ dumpint(tagDefer)
+ dumpint(uint64(uintptr(unsafe.Pointer(d))))
+ dumpint(uint64(uintptr(unsafe.Pointer(gp))))
+ dumpint(uint64(d.argp))
+ dumpint(uint64(d.pc))
+ dumpint(uint64(uintptr(unsafe.Pointer(d.fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
+ }
+ for p := gp._panic; p != nil; p = p.link {
+ dumpint(tagPanic)
+ dumpint(uint64(uintptr(unsafe.Pointer(p))))
+ dumpint(uint64(uintptr(unsafe.Pointer(gp))))
+ eface := (*eface)(unsafe.Pointer(&p.arg))
+ dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
+ dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
+ dumpint(0) // was p->defer, no longer recorded
+ dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
+ }
+}
+
+func dumpgs() {
+ // goroutines & stacks
+ for i := 0; uintptr(i) < allglen; i++ {
+ gp := allgs[i]
+ status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
+ switch status {
+ default:
+ print("runtime: unexpected G.status ", hex(status), "\n")
+ gothrow("dumpgs in STW - bad status")
+ case _Gdead:
+ // ok
+ case _Grunnable,
+ _Gsyscall,
+ _Gwaiting:
+ dumpgoroutine(gp)
+ }
+ }
+}
+
+func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
+ dumpint(tagQueuedFinalizer)
+ dumpint(uint64(uintptr(obj)))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
+ dumpint(uint64(uintptr(unsafe.Pointer(fint))))
+ dumpint(uint64(uintptr(unsafe.Pointer(ot))))
+}
+
+func dumproots() {
+ // data segment
+ dumpbvtypes(&gcdatamask, unsafe.Pointer(&data))
+ dumpint(tagData)
+ dumpint(uint64(uintptr(unsafe.Pointer(&data))))
+ dumpmemrange(unsafe.Pointer(&data), uintptr(unsafe.Pointer(&edata))-uintptr(unsafe.Pointer(&data)))
+ dumpfields(gcdatamask)
+
+ // bss segment
+ dumpbvtypes(&gcbssmask, unsafe.Pointer(&bss))
+ dumpint(tagBSS)
+ dumpint(uint64(uintptr(unsafe.Pointer(&bss))))
+ dumpmemrange(unsafe.Pointer(&bss), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)))
+ dumpfields(gcbssmask)
+
+ // MSpan.types
+ allspans := h_allspans
+ for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
+ s := allspans[spanidx]
+ if s.state == _MSpanInUse {
+ // Finalizers
+ for sp := s.specials; sp != nil; sp = sp.next {
+ if sp.kind != _KindSpecialFinalizer {
+ continue
+ }
+ spf := (*specialfinalizer)(unsafe.Pointer(sp))
+ p := unsafe.Pointer((uintptr(s.start) << _PageShift) + uintptr(spf.special.offset))
+ dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
+ }
+ }
+ }
+
+ // Finalizer queue
+ iterate_finq(finq_callback)
+}
+
+// Bit vector of free marks.
+// Needs to be as big as the largest number of objects per span.
+var freemark [_PageSize / 8]bool
+
+func dumpobjs() {
+ for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
+ s := h_allspans[i]
+ if s.state != _MSpanInUse {
+ continue
+ }
+ p := uintptr(s.start << _PageShift)
+ size := s.elemsize
+ n := (s.npages << _PageShift) / size
+ if n > uintptr(len(freemark)) {
+ gothrow("freemark array doesn't have enough entries")
+ }
+ for l := s.freelist; l != nil; l = l.next {
+ freemark[(uintptr(unsafe.Pointer(l))-p)/size] = true
+ }
+ for j := uintptr(0); j < n; j, p = j+1, p+size {
+ if freemark[j] {
+ freemark[j] = false
+ continue
+ }
+ dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
+ }
+ }
+}
+
+func dumpparams() {
+ dumpint(tagParams)
+ x := uintptr(1)
+ if *(*byte)(unsafe.Pointer(&x)) == 1 {
+ dumpbool(false) // little-endian ptrs
+ } else {
+ dumpbool(true) // big-endian ptrs
+ }
+ dumpint(ptrSize)
+ dumpint(uint64(mheap_.arena_start))
+ dumpint(uint64(mheap_.arena_used))
+ dumpint(thechar)
+ dumpstr(goexperiment)
+ dumpint(uint64(ncpu))
+}
+
+func itab_callback(tab *itab) {
+ t := tab._type
+ // Dump a map from itab* to the type of its data field.
+ // We want this map so we can deduce types of interface referents.
+ if t.kind&kindDirectIface == 0 {
+ // indirect - data slot is a pointer to t.
+ dumptype(t.ptrto)
+ dumpint(tagItab)
+ dumpint(uint64(uintptr(unsafe.Pointer(tab))))
+ dumpint(uint64(uintptr(unsafe.Pointer(t.ptrto))))
+ } else if t.kind&kindNoPointers == 0 {
+ // t is pointer-like - data slot is a t.
+ dumptype(t)
+ dumpint(tagItab)
+ dumpint(uint64(uintptr(unsafe.Pointer(tab))))
+ dumpint(uint64(uintptr(unsafe.Pointer(t))))
+ } else {
+ // Data slot is a scalar. Dump type just for fun.
+ // With pointer-only interfaces, this shouldn't happen.
+ dumptype(t)
+ dumpint(tagItab)
+ dumpint(uint64(uintptr(unsafe.Pointer(tab))))
+ dumpint(uint64(uintptr(unsafe.Pointer(t))))
+ }
+}
+
+func dumpitabs() {
+ iterate_itabs(itab_callback)
+}
+
+func dumpms() {
+ for mp := allm; mp != nil; mp = mp.alllink {
+ dumpint(tagOSThread)
+ dumpint(uint64(uintptr(unsafe.Pointer(mp))))
+ dumpint(uint64(mp.id))
+ dumpint(mp.procid)
+ }
+}
+
+func dumpmemstats() {
+ dumpint(tagMemStats)
+ dumpint(memstats.alloc)
+ dumpint(memstats.total_alloc)
+ dumpint(memstats.sys)
+ dumpint(memstats.nlookup)
+ dumpint(memstats.nmalloc)
+ dumpint(memstats.nfree)
+ dumpint(memstats.heap_alloc)
+ dumpint(memstats.heap_sys)
+ dumpint(memstats.heap_idle)
+ dumpint(memstats.heap_inuse)
+ dumpint(memstats.heap_released)
+ dumpint(memstats.heap_objects)
+ dumpint(memstats.stacks_inuse)
+ dumpint(memstats.stacks_sys)
+ dumpint(memstats.mspan_inuse)
+ dumpint(memstats.mspan_sys)
+ dumpint(memstats.mcache_inuse)
+ dumpint(memstats.mcache_sys)
+ dumpint(memstats.buckhash_sys)
+ dumpint(memstats.gc_sys)
+ dumpint(memstats.other_sys)
+ dumpint(memstats.next_gc)
+ dumpint(memstats.last_gc)
+ dumpint(memstats.pause_total_ns)
+ for i := 0; i < 256; i++ {
+ dumpint(memstats.pause_ns[i])
+ }
+ dumpint(uint64(memstats.numgc))
+}
+
+func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
+ stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
+ dumpint(tagMemProf)
+ dumpint(uint64(uintptr(unsafe.Pointer(b))))
+ dumpint(uint64(size))
+ dumpint(uint64(nstk))
+ for i := uintptr(0); i < nstk; i++ {
+ pc := stk[i]
+ f := findfunc(pc)
+ if f == nil {
+ var buf [64]byte
+ n := len(buf)
+ n--
+ buf[n] = ')'
+ if pc == 0 {
+ n--
+ buf[n] = '0'
+ } else {
+ for pc > 0 {
+ n--
+ buf[n] = "0123456789abcdef"[pc&15]
+ pc >>= 4
+ }
+ }
+ n--
+ buf[n] = 'x'
+ n--
+ buf[n] = '0'
+ n--
+ buf[n] = '('
+ dumpslice(buf[n:])
+ dumpstr("?")
+ dumpint(0)
+ } else {
+ dumpstr(gofuncname(f))
+ if i > 0 && pc > f.entry {
+ pc--
+ }
+ file, line := funcline(f, pc)
+ dumpstr(file)
+ dumpint(uint64(line))
+ }
+ }
+ dumpint(uint64(allocs))
+ dumpint(uint64(frees))
+}
+
+func dumpmemprof() {
+ iterate_memprof(dumpmemprof_callback)
+ allspans := h_allspans
+ for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
+ s := allspans[spanidx]
+ if s.state != _MSpanInUse {
+ continue
+ }
+ for sp := s.specials; sp != nil; sp = sp.next {
+ if sp.kind != _KindSpecialProfile {
+ continue
+ }
+ spp := (*specialprofile)(unsafe.Pointer(sp))
+ p := uintptr(s.start<<_PageShift) + uintptr(spp.special.offset)
+ dumpint(tagAllocSample)
+ dumpint(uint64(p))
+ dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
+ }
+ }
+}
+
+var dumphdr = []byte("go1.4 heap dump\n")
+
+func mdump() {
+ // make sure we're done sweeping
+ for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
+ s := h_allspans[i]
+ if s.state == _MSpanInUse {
+ mSpan_EnsureSwept(s)
+ }
+ }
+ memclr(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
+ dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
+ dumpparams()
+ dumpitabs()
+ dumpobjs()
+ dumpgs()
+ dumpms()
+ dumproots()
+ dumpmemstats()
+ dumpmemprof()
+ dumpint(tagEOF)
+ flush()
+}
+
+func writeheapdump_m(fd uintptr) {
+ _g_ := getg()
+ casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
+ _g_.waitreason = "dumping heap"
+
+ // Update stats so we can dump them.
+ // As a side effect, flushes all the MCaches so the MSpan.freelist
+ // lists contain all the free objects.
+ updatememstats(nil)
+
+ // Set dump file.
+ dumpfd = fd
+
+ // Call dump routine.
+ mdump()
+
+ // Reset dump file.
+ dumpfd = 0
+ if tmpbuf != nil {
+ sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
+ tmpbuf = nil
+ }
+
+ casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
+}
+
+// dumpint() the kind & offset of each field in an object.
+func dumpfields(bv bitvector) {
+ dumpbv(&bv, 0)
+ dumpint(fieldKindEol)
+}
+
+// The heap dump reader needs to be able to disambiguate
+// Eface entries. So it needs to know every type that might
+// appear in such an entry. The following routine accomplishes that.
+// TODO(rsc, khr): Delete - no longer possible.
+
+// Dump all the types that appear in the type field of
+// any Eface described by this bit vector.
+func dumpbvtypes(bv *bitvector, base unsafe.Pointer) {
+}
+
+func makeheapobjbv(p uintptr, size uintptr) bitvector {
+ // Extend the temp buffer if necessary.
+ nptr := size / ptrSize
+ if uintptr(len(tmpbuf)) < nptr*_BitsPerPointer/8+1 {
+ if tmpbuf != nil {
+ sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
+ }
+ n := nptr*_BitsPerPointer/8 + 1
+ p := sysAlloc(n, &memstats.other_sys)
+ if p == nil {
+ gothrow("heapdump: out of memory")
+ }
+ tmpbuf = (*[1 << 30]byte)(p)[:n]
+ }
+ // Copy and compact the bitmap.
+ var i uintptr
+ for i = 0; i < nptr; i++ {
+ off := (p + i*ptrSize - mheap_.arena_start) / ptrSize
+ bitp := (*uint8)(unsafe.Pointer(mheap_.arena_start - off/wordsPerBitmapByte - 1))
+ shift := uint8((off % wordsPerBitmapByte) * gcBits)
+ bits := (*bitp >> (shift + 2)) & _BitsMask
+ if bits == _BitsDead {
+ break // end of heap object
+ }
+ tmpbuf[i*_BitsPerPointer/8] &^= (_BitsMask << ((i * _BitsPerPointer) % 8))
+ tmpbuf[i*_BitsPerPointer/8] |= bits << ((i * _BitsPerPointer) % 8)
+ }
+ return bitvector{int32(i * _BitsPerPointer), &tmpbuf[0]}
+}
diff --git a/src/runtime/lfstack.c b/src/runtime/lfstack.c
deleted file mode 100644
index 0ced839c2..000000000
--- a/src/runtime/lfstack.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Lock-free stack.
-// The following code runs only on g0 stack.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-
-#ifdef _64BIT
-// Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag.
-// So we use 17msb of pointers as ABA counter.
-# define PTR_BITS 47
-#else
-# define PTR_BITS 32
-#endif
-#define PTR_MASK ((1ull<<PTR_BITS)-1)
-#define CNT_MASK (0ull-1)
-
-#ifdef _64BIT
-#ifdef GOOS_solaris
-// SPARC64 and Solaris on AMD64 uses all 64 bits of virtual addresses.
-// Use low-order three bits as ABA counter.
-// http://docs.oracle.com/cd/E19120-01/open.solaris/816-5138/6mba6ua5p/index.html
-#undef PTR_BITS
-#undef CNT_MASK
-#undef PTR_MASK
-#define PTR_BITS 0
-#define CNT_MASK 7
-#define PTR_MASK ((0ull-1)<<3)
-#endif
-#endif
-
-void
-runtime·lfstackpush(uint64 *head, LFNode *node)
-{
- uint64 old, new;
-
- if((uintptr)node != ((uintptr)node&PTR_MASK)) {
- runtime·printf("p=%p\n", node);
- runtime·throw("runtime·lfstackpush: invalid pointer");
- }
-
- node->pushcnt++;
- new = (uint64)(uintptr)node|(((uint64)node->pushcnt&CNT_MASK)<<PTR_BITS);
- for(;;) {
- old = runtime·atomicload64(head);
- node->next = old;
- if(runtime·cas64(head, old, new))
- break;
- }
-}
-
-LFNode*
-runtime·lfstackpop(uint64 *head)
-{
- LFNode *node;
- uint64 old, next;
-
- for(;;) {
- old = runtime·atomicload64(head);
- if(old == 0)
- return nil;
- node = (LFNode*)(uintptr)(old&PTR_MASK);
- next = runtime·atomicload64(&node->next);
-
- if(runtime·cas64(head, old, next))
- return node;
- }
-}
-
-void
-runtime·lfstackpush_m(void)
-{
- runtime·lfstackpush(g->m->ptrarg[0], g->m->ptrarg[1]);
- g->m->ptrarg[0] = nil;
- g->m->ptrarg[1] = nil;
-}
-
-void
-runtime·lfstackpop_m(void)
-{
- g->m->ptrarg[0] = runtime·lfstackpop(g->m->ptrarg[0]);
-}
diff --git a/src/runtime/lfstack.go b/src/runtime/lfstack.go
new file mode 100644
index 000000000..a4ad8a10c
--- /dev/null
+++ b/src/runtime/lfstack.go
@@ -0,0 +1,36 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lock-free stack.
+// The following code runs only on g0 stack.
+
+package runtime
+
+import "unsafe"
+
+func lfstackpush(head *uint64, node *lfnode) {
+ node.pushcnt++
+ new := lfstackPack(node, node.pushcnt)
+ for {
+ old := atomicload64(head)
+ node.next = old
+ if cas64(head, old, new) {
+ break
+ }
+ }
+}
+
+func lfstackpop(head *uint64) unsafe.Pointer {
+ for {
+ old := atomicload64(head)
+ if old == 0 {
+ return nil
+ }
+ node, _ := lfstackUnpack(old)
+ next := atomicload64(&node.next)
+ if cas64(head, old, next) {
+ return unsafe.Pointer(node)
+ }
+ }
+}
diff --git a/src/runtime/lfstack_32bit.go b/src/runtime/lfstack_32bit.go
new file mode 100644
index 000000000..61d8678d9
--- /dev/null
+++ b/src/runtime/lfstack_32bit.go
@@ -0,0 +1,21 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386 arm
+
+package runtime
+
+import "unsafe"
+
+// On 32-bit systems, the stored uint64 has a 32-bit pointer and 32-bit count.
+
+func lfstackPack(node *lfnode, cnt uintptr) uint64 {
+ return uint64(uintptr(unsafe.Pointer(node)))<<32 | uint64(cnt)
+}
+
+func lfstackUnpack(val uint64) (node *lfnode, cnt uintptr) {
+ node = (*lfnode)(unsafe.Pointer(uintptr(val >> 32)))
+ cnt = uintptr(val)
+ return
+}
diff --git a/src/runtime/lfstack_amd64.go b/src/runtime/lfstack_amd64.go
new file mode 100644
index 000000000..84e28519f
--- /dev/null
+++ b/src/runtime/lfstack_amd64.go
@@ -0,0 +1,24 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// On AMD64, virtual addresses are 48-bit numbers sign extended to 64.
+// We shift the address left 16 to eliminate the sign extended part and make
+// room in the bottom for the count.
+// In addition to the 16 bits taken from the top, we can take 3 from the
+// bottom, because node must be pointer-aligned, giving a total of 19 bits
+// of count.
+
+func lfstackPack(node *lfnode, cnt uintptr) uint64 {
+ return uint64(uintptr(unsafe.Pointer(node)))<<16 | uint64(cnt&(1<<19-1))
+}
+
+func lfstackUnpack(val uint64) (node *lfnode, cnt uintptr) {
+ node = (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> 19 << 3)))
+ cnt = uintptr(val & (1<<19 - 1))
+ return
+}
diff --git a/src/runtime/lfstack_linux_power64x.go b/src/runtime/lfstack_linux_power64x.go
new file mode 100644
index 000000000..7a122bf92
--- /dev/null
+++ b/src/runtime/lfstack_linux_power64x.go
@@ -0,0 +1,26 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build power64 power64le
+// +build linux
+
+package runtime
+
+import "unsafe"
+
+// On Power64, Linux limits the user address space to 43 bits.
+// (https://www.kernel.org/doc/ols/2001/ppc64.pdf)
+// In addition to the 21 bits taken from the top, we can take 3 from the
+// bottom, because node must be pointer-aligned, giving a total of 24 bits
+// of count.
+
+func lfstackPack(node *lfnode, cnt uintptr) uint64 {
+ return uint64(uintptr(unsafe.Pointer(node)))<<21 | uint64(cnt&(1<<24-1))
+}
+
+func lfstackUnpack(val uint64) (node *lfnode, cnt uintptr) {
+ node = (*lfnode)(unsafe.Pointer(uintptr(val >> 24 << 3)))
+ cnt = uintptr(val & (1<<24 - 1))
+ return
+}
diff --git a/src/runtime/lock_futex.go b/src/runtime/lock_futex.go
index 725962341..11c3a3f06 100644
--- a/src/runtime/lock_futex.go
+++ b/src/runtime/lock_futex.go
@@ -34,9 +34,6 @@ const (
// Note that there can be spinning threads during all states - they do not
// affect mutex's state.
-func futexsleep(addr *uint32, val uint32, ns int64)
-func futexwakeup(addr *uint32, cnt uint32)
-
// We use the uintptr mutex.key and note.key as a uint32.
func key32(p *uintptr) *uint32 {
return (*uint32)(unsafe.Pointer(p))
@@ -198,8 +195,8 @@ func notetsleepg(n *note, ns int64) bool {
gothrow("notetsleepg on g0")
}
- entersyscallblock()
+ entersyscallblock(0)
ok := notetsleep_internal(n, ns)
- exitsyscall()
+ exitsyscall(0)
return ok
}
diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go
index d136b8280..a2a87bac4 100644
--- a/src/runtime/lock_sema.go
+++ b/src/runtime/lock_sema.go
@@ -31,10 +31,6 @@ const (
passive_spin = 1
)
-func semacreate() uintptr
-func semasleep(int64) int32
-func semawakeup(mp *m)
-
func lock(l *mutex) {
gp := getg()
if gp.m.locks < 0 {
@@ -263,8 +259,8 @@ func notetsleepg(n *note, ns int64) bool {
if gp.m.waitsema == 0 {
gp.m.waitsema = semacreate()
}
- entersyscallblock()
+ entersyscallblock(0)
ok := notetsleep_internal(n, ns, nil, 0)
- exitsyscall()
+ exitsyscall(0)
return ok
}
diff --git a/src/runtime/malloc.c b/src/runtime/malloc.c
deleted file mode 100644
index b79c30b72..000000000
--- a/src/runtime/malloc.c
+++ /dev/null
@@ -1,396 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// See malloc.h for overview.
-//
-// TODO(rsc): double-check stats.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-#include "type.h"
-#include "typekind.h"
-#include "race.h"
-#include "stack.h"
-#include "textflag.h"
-
-// Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K.
-#pragma dataflag NOPTR
-MHeap runtime·mheap;
-#pragma dataflag NOPTR
-MStats runtime·memstats;
-
-int32
-runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
-{
- uintptr n, i;
- byte *p;
- MSpan *s;
-
- g->m->mcache->local_nlookup++;
- if (sizeof(void*) == 4 && g->m->mcache->local_nlookup >= (1<<30)) {
- // purge cache stats to prevent overflow
- runtime·lock(&runtime·mheap.lock);
- runtime·purgecachedstats(g->m->mcache);
- runtime·unlock(&runtime·mheap.lock);
- }
-
- s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
- if(sp)
- *sp = s;
- if(s == nil) {
- if(base)
- *base = nil;
- if(size)
- *size = 0;
- return 0;
- }
-
- p = (byte*)((uintptr)s->start<<PageShift);
- if(s->sizeclass == 0) {
- // Large object.
- if(base)
- *base = p;
- if(size)
- *size = s->npages<<PageShift;
- return 1;
- }
-
- n = s->elemsize;
- if(base) {
- i = ((byte*)v - p)/n;
- *base = p + i*n;
- }
- if(size)
- *size = n;
-
- return 1;
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·purgecachedstats(MCache *c)
-{
- MHeap *h;
- int32 i;
-
- // Protected by either heap or GC lock.
- h = &runtime·mheap;
- mstats.heap_alloc += c->local_cachealloc;
- c->local_cachealloc = 0;
- mstats.tinyallocs += c->local_tinyallocs;
- c->local_tinyallocs = 0;
- mstats.nlookup += c->local_nlookup;
- c->local_nlookup = 0;
- h->largefree += c->local_largefree;
- c->local_largefree = 0;
- h->nlargefree += c->local_nlargefree;
- c->local_nlargefree = 0;
- for(i=0; i<nelem(c->local_nsmallfree); i++) {
- h->nsmallfree[i] += c->local_nsmallfree[i];
- c->local_nsmallfree[i] = 0;
- }
-}
-
-// Size of the trailing by_size array differs between Go and C,
-// and all data after by_size is local to C, not exported to Go.
-// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
-// sizeof_C_MStats is what C thinks about size of Go struct.
-uintptr runtime·sizeof_C_MStats = offsetof(MStats, by_size[61]);
-
-#define MaxArena32 (2U<<30)
-
-// For use by Go. If it were a C enum it would be made available automatically,
-// but the value of MaxMem is too large for enum.
-uintptr runtime·maxmem = MaxMem;
-
-void
-runtime·mallocinit(void)
-{
- byte *p, *p1;
- uintptr arena_size, bitmap_size, spans_size, p_size;
- extern byte runtime·end[];
- uintptr limit;
- uint64 i;
- bool reserved;
-
- p = nil;
- p_size = 0;
- arena_size = 0;
- bitmap_size = 0;
- spans_size = 0;
- reserved = false;
-
- // for 64-bit build
- USED(p);
- USED(p_size);
- USED(arena_size);
- USED(bitmap_size);
- USED(spans_size);
-
- runtime·InitSizes();
-
- if(runtime·class_to_size[TinySizeClass] != TinySize)
- runtime·throw("bad TinySizeClass");
-
- // limit = runtime·memlimit();
- // See https://code.google.com/p/go/issues/detail?id=5049
- // TODO(rsc): Fix after 1.1.
- limit = 0;
-
- // Set up the allocation arena, a contiguous area of memory where
- // allocated data will be found. The arena begins with a bitmap large
- // enough to hold 4 bits per allocated word.
- if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
- // On a 64-bit machine, allocate from a single contiguous reservation.
- // 128 GB (MaxMem) should be big enough for now.
- //
- // The code will work with the reservation at any address, but ask
- // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
- // Allocating a 128 GB region takes away 37 bits, and the amd64
- // doesn't let us choose the top 17 bits, so that leaves the 11 bits
- // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
- // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
- // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
- // UTF-8 sequences, and they are otherwise as far away from
- // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
- // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
- // on OS X during thread allocations. 0x00c0 causes conflicts with
- // AddressSanitizer which reserves all memory up to 0x0100.
- // These choices are both for debuggability and to reduce the
- // odds of the conservative garbage collector not collecting memory
- // because some non-pointer block of memory had a bit pattern
- // that matched a memory address.
- //
- // Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
- // but it hardly matters: e0 00 is not valid UTF-8 either.
- //
- // If this fails we fall back to the 32 bit memory mechanism
- arena_size = MaxMem;
- bitmap_size = arena_size / (sizeof(void*)*8/4);
- spans_size = arena_size / PageSize * sizeof(runtime·mheap.spans[0]);
- spans_size = ROUND(spans_size, PageSize);
- for(i = 0; i <= 0x7f; i++) {
- p = (void*)(i<<40 | 0x00c0ULL<<32);
- p_size = bitmap_size + spans_size + arena_size + PageSize;
- p = runtime·SysReserve(p, p_size, &reserved);
- if(p != nil)
- break;
- }
- }
- if (p == nil) {
- // On a 32-bit machine, we can't typically get away
- // with a giant virtual address space reservation.
- // Instead we map the memory information bitmap
- // immediately after the data segment, large enough
- // to handle another 2GB of mappings (256 MB),
- // along with a reservation for another 512 MB of memory.
- // When that gets used up, we'll start asking the kernel
- // for any memory anywhere and hope it's in the 2GB
- // following the bitmap (presumably the executable begins
- // near the bottom of memory, so we'll have to use up
- // most of memory before the kernel resorts to giving out
- // memory before the beginning of the text segment).
- //
- // Alternatively we could reserve 512 MB bitmap, enough
- // for 4GB of mappings, and then accept any memory the
- // kernel threw at us, but normally that's a waste of 512 MB
- // of address space, which is probably too much in a 32-bit world.
- bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
- arena_size = 512<<20;
- spans_size = MaxArena32 / PageSize * sizeof(runtime·mheap.spans[0]);
- if(limit > 0 && arena_size+bitmap_size+spans_size > limit) {
- bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
- arena_size = bitmap_size * 8;
- spans_size = arena_size / PageSize * sizeof(runtime·mheap.spans[0]);
- }
- spans_size = ROUND(spans_size, PageSize);
-
- // SysReserve treats the address we ask for, end, as a hint,
- // not as an absolute requirement. If we ask for the end
- // of the data segment but the operating system requires
- // a little more space before we can start allocating, it will
- // give out a slightly higher pointer. Except QEMU, which
- // is buggy, as usual: it won't adjust the pointer upward.
- // So adjust it upward a little bit ourselves: 1/4 MB to get
- // away from the running binary image and then round up
- // to a MB boundary.
- p = (byte*)ROUND((uintptr)runtime·end + (1<<18), 1<<20);
- p_size = bitmap_size + spans_size + arena_size + PageSize;
- p = runtime·SysReserve(p, p_size, &reserved);
- if(p == nil)
- runtime·throw("runtime: cannot reserve arena virtual address space");
- }
-
- // PageSize can be larger than OS definition of page size,
- // so SysReserve can give us a PageSize-unaligned pointer.
- // To overcome this we ask for PageSize more and round up the pointer.
- p1 = (byte*)ROUND((uintptr)p, PageSize);
-
- runtime·mheap.spans = (MSpan**)p1;
- runtime·mheap.bitmap = p1 + spans_size;
- runtime·mheap.arena_start = p1 + spans_size + bitmap_size;
- runtime·mheap.arena_used = runtime·mheap.arena_start;
- runtime·mheap.arena_end = p + p_size;
- runtime·mheap.arena_reserved = reserved;
-
- if(((uintptr)runtime·mheap.arena_start & (PageSize-1)) != 0)
- runtime·throw("misrounded allocation in mallocinit");
-
- // Initialize the rest of the allocator.
- runtime·MHeap_Init(&runtime·mheap);
- g->m->mcache = runtime·allocmcache();
-}
-
-void*
-runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
-{
- byte *p, *p_end;
- uintptr p_size;
- bool reserved;
-
- if(n > h->arena_end - h->arena_used) {
- // We are in 32-bit mode, maybe we didn't use all possible address space yet.
- // Reserve some more space.
- byte *new_end;
-
- p_size = ROUND(n + PageSize, 256<<20);
- new_end = h->arena_end + p_size;
- if(new_end <= h->arena_start + MaxArena32) {
- // TODO: It would be bad if part of the arena
- // is reserved and part is not.
- p = runtime·SysReserve(h->arena_end, p_size, &reserved);
- if(p == h->arena_end) {
- h->arena_end = new_end;
- h->arena_reserved = reserved;
- }
- else if(p+p_size <= h->arena_start + MaxArena32) {
- // Keep everything page-aligned.
- // Our pages are bigger than hardware pages.
- h->arena_end = p+p_size;
- h->arena_used = p + (-(uintptr)p&(PageSize-1));
- h->arena_reserved = reserved;
- } else {
- uint64 stat;
- stat = 0;
- runtime·SysFree(p, p_size, &stat);
- }
- }
- }
- if(n <= h->arena_end - h->arena_used) {
- // Keep taking from our reservation.
- p = h->arena_used;
- runtime·SysMap(p, n, h->arena_reserved, &mstats.heap_sys);
- h->arena_used += n;
- runtime·MHeap_MapBits(h);
- runtime·MHeap_MapSpans(h);
- if(raceenabled)
- runtime·racemapshadow(p, n);
-
- if(((uintptr)p & (PageSize-1)) != 0)
- runtime·throw("misrounded allocation in MHeap_SysAlloc");
- return p;
- }
-
- // If using 64-bit, our reservation is all we have.
- if(h->arena_end - h->arena_start >= MaxArena32)
- return nil;
-
- // On 32-bit, once the reservation is gone we can
- // try to get memory at a location chosen by the OS
- // and hope that it is in the range we allocated bitmap for.
- p_size = ROUND(n, PageSize) + PageSize;
- p = runtime·sysAlloc(p_size, &mstats.heap_sys);
- if(p == nil)
- return nil;
-
- if(p < h->arena_start || p+p_size - h->arena_start >= MaxArena32) {
- runtime·printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
- p, h->arena_start, h->arena_start+MaxArena32);
- runtime·SysFree(p, p_size, &mstats.heap_sys);
- return nil;
- }
-
- p_end = p + p_size;
- p += -(uintptr)p & (PageSize-1);
- if(p+n > h->arena_used) {
- h->arena_used = p+n;
- if(p_end > h->arena_end)
- h->arena_end = p_end;
- runtime·MHeap_MapBits(h);
- runtime·MHeap_MapSpans(h);
- if(raceenabled)
- runtime·racemapshadow(p, n);
- }
-
- if(((uintptr)p & (PageSize-1)) != 0)
- runtime·throw("misrounded allocation in MHeap_SysAlloc");
- return p;
-}
-
-void
-runtime·setFinalizer_m(void)
-{
- FuncVal *fn;
- void *arg;
- uintptr nret;
- Type *fint;
- PtrType *ot;
-
- fn = g->m->ptrarg[0];
- arg = g->m->ptrarg[1];
- nret = g->m->scalararg[0];
- fint = g->m->ptrarg[2];
- ot = g->m->ptrarg[3];
- g->m->ptrarg[0] = nil;
- g->m->ptrarg[1] = nil;
- g->m->ptrarg[2] = nil;
- g->m->ptrarg[3] = nil;
-
- g->m->scalararg[0] = runtime·addfinalizer(arg, fn, nret, fint, ot);
-}
-
-void
-runtime·removeFinalizer_m(void)
-{
- void *p;
-
- p = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- runtime·removefinalizer(p);
-}
-
-// mcallable cache refill
-void
-runtime·mcacheRefill_m(void)
-{
- runtime·MCache_Refill(g->m->mcache, (int32)g->m->scalararg[0]);
-}
-
-void
-runtime·largeAlloc_m(void)
-{
- uintptr npages, size;
- MSpan *s;
- void *v;
- int32 flag;
-
- //runtime·printf("largeAlloc size=%D\n", g->m->scalararg[0]);
- // Allocate directly from heap.
- size = g->m->scalararg[0];
- flag = (int32)g->m->scalararg[1];
- if(size + PageSize < size)
- runtime·throw("out of memory");
- npages = size >> PageShift;
- if((size & PageMask) != 0)
- npages++;
- s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, !(flag & FlagNoZero));
- if(s == nil)
- runtime·throw("out of memory");
- s->limit = (byte*)(s->start<<PageShift) + size;
- v = (void*)(s->start << PageShift);
- // setup for mark sweep
- runtime·markspan(v, 0, 0, true);
- g->m->ptrarg[0] = s;
-}
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index fab8cf269..f90a8f84a 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -26,10 +26,11 @@ const (
maxGCMask = _MaxGCMask
bitsDead = _BitsDead
bitsPointer = _BitsPointer
+ bitsScalar = _BitsScalar
mSpanInUse = _MSpanInUse
- concurrentSweep = _ConcurrentSweep != 0
+ concurrentSweep = _ConcurrentSweep
)
// Page number (address>>pageShift)
@@ -54,7 +55,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
// This function must be atomic wrt GC, but for performance reasons
// we don't acquirem/releasem on fast path. The code below does not have
// split stack checks, so it can't be preempted by GC.
- // Functions like roundup/add are inlined. And onM/racemalloc are nosplit.
+ // Functions like roundup/add are inlined. And systemstack/racemalloc are nosplit.
// If debugMalloc = true, these assumptions are checked below.
if debugMalloc {
mp := acquirem()
@@ -140,10 +141,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = c.alloc[tinySizeClass]
v := s.freelist
if v == nil {
- mp := acquirem()
- mp.scalararg[0] = tinySizeClass
- onM(mcacheRefill_m)
- releasem(mp)
+ systemstack(func() {
+ mCache_Refill(c, tinySizeClass)
+ })
s = c.alloc[tinySizeClass]
v = s.freelist
}
@@ -171,10 +171,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = c.alloc[sizeclass]
v := s.freelist
if v == nil {
- mp := acquirem()
- mp.scalararg[0] = uintptr(sizeclass)
- onM(mcacheRefill_m)
- releasem(mp)
+ systemstack(func() {
+ mCache_Refill(c, int32(sizeclass))
+ })
s = c.alloc[sizeclass]
v = s.freelist
}
@@ -191,13 +190,10 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
}
c.local_cachealloc += intptr(size)
} else {
- mp := acquirem()
- mp.scalararg[0] = uintptr(size)
- mp.scalararg[1] = uintptr(flags)
- onM(largeAlloc_m)
- s = (*mspan)(mp.ptrarg[0])
- mp.ptrarg[0] = nil
- releasem(mp)
+ var s *mspan
+ systemstack(func() {
+ s = largeAlloc(size, uint32(flags))
+ })
x = unsafe.Pointer(uintptr(s.start << pageShift))
size = uintptr(s.elemsize)
}
@@ -251,13 +247,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
// into the GC bitmap. It's 7 times slower than copying
// from the pre-unrolled mask, but saves 1/16 of type size
// memory for the mask.
- mp := acquirem()
- mp.ptrarg[0] = x
- mp.ptrarg[1] = unsafe.Pointer(typ)
- mp.scalararg[0] = uintptr(size)
- mp.scalararg[1] = uintptr(size0)
- onM(unrollgcproginplace_m)
- releasem(mp)
+ systemstack(func() {
+ unrollgcproginplace_m(x, typ, size, size0)
+ })
goto marked
}
ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
@@ -265,10 +257,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
// by checking if the unroll flag byte is set
maskword := uintptr(atomicloadp(unsafe.Pointer(ptrmask)))
if *(*uint8)(unsafe.Pointer(&maskword)) == 0 {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(typ)
- onM(unrollgcprog_m)
- releasem(mp)
+ systemstack(func() {
+ unrollgcprog_m(typ)
+ })
}
ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
} else {
@@ -312,10 +303,9 @@ marked:
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
if gcphase == _GCmarktermination {
- mp := acquirem()
- mp.ptrarg[0] = x
- onM(gcmarknewobject_m)
- releasem(mp)
+ systemstack(func() {
+ gcmarknewobject_m(uintptr(x))
+ })
}
if raceenabled {
@@ -377,10 +367,9 @@ func loadPtrMask(typ *_type) []uint8 {
// by checking if the unroll flag byte is set
maskword := uintptr(atomicloadp(unsafe.Pointer(ptrmask)))
if *(*uint8)(unsafe.Pointer(&maskword)) == 0 {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(typ)
- onM(unrollgcprog_m)
- releasem(mp)
+ systemstack(func() {
+ unrollgcprog_m(typ)
+ })
}
ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
} else {
@@ -404,7 +393,7 @@ func newarray(typ *_type, n uintptr) unsafe.Pointer {
if typ.kind&kindNoPointers != 0 {
flags |= flagNoScan
}
- if int(n) < 0 || (typ.size > 0 && n > maxmem/uintptr(typ.size)) {
+ if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) {
panic("runtime: allocation size out of range")
}
return mallocgc(uintptr(typ.size)*n, typ, flags)
@@ -484,19 +473,20 @@ func gogc(force int32) {
mp.gcing = 1
releasem(mp)
- onM(stoptheworld)
- onM(finishsweep_m) // finish sweep before we start concurrent scan.
- if false { // To turn on concurrent scan and mark set to true...
- onM(starttheworld)
+ systemstack(stoptheworld)
+ systemstack(finishsweep_m) // finish sweep before we start concurrent scan.
+ if false { // To turn on concurrent scan and mark set to true...
+ systemstack(starttheworld)
// Do a concurrent heap scan before we stop the world.
- onM(gcscan_m)
- onM(stoptheworld)
- onM(gcinstallmarkwb_m)
- onM(starttheworld)
- onM(gcmark_m)
- onM(stoptheworld)
- onM(gcinstalloffwb_m)
+ systemstack(gcscan_m)
+ systemstack(stoptheworld)
+ systemstack(gcinstallmarkwb_m)
+ systemstack(starttheworld)
+ systemstack(gcmark_m)
+ systemstack(stoptheworld)
+ systemstack(gcinstalloffwb_m)
}
+
if mp != acquirem() {
gothrow("gogc: rescheduled")
}
@@ -512,27 +502,25 @@ func gogc(force int32) {
if debug.gctrace > 1 {
n = 2
}
+ eagersweep := force >= 2
for i := 0; i < n; i++ {
if i > 0 {
startTime = nanotime()
}
// switch to g0, call gc, then switch back
- mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits
- mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits
- if force >= 2 {
- mp.scalararg[2] = 1 // eagersweep
- } else {
- mp.scalararg[2] = 0
- }
- onM(gc_m)
+ systemstack(func() {
+ gc_m(startTime, eagersweep)
+ })
}
- onM(gccheckmark_m)
+ systemstack(func() {
+ gccheckmark_m(startTime, eagersweep)
+ })
// all done
mp.gcing = 0
semrelease(&worldsema)
- onM(starttheworld)
+ systemstack(starttheworld)
releasem(mp)
mp = nil
@@ -544,11 +532,11 @@ func gogc(force int32) {
}
func GCcheckmarkenable() {
- onM(gccheckmarkenable_m)
+ systemstack(gccheckmarkenable_m)
}
func GCcheckmarkdisable() {
- onM(gccheckmarkdisable_m)
+ systemstack(gccheckmarkdisable_m)
}
// GC runs a garbage collection.
@@ -652,11 +640,10 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
f := (*eface)(unsafe.Pointer(&finalizer))
ftyp := f._type
if ftyp == nil {
- // switch to M stack and remove finalizer
- mp := acquirem()
- mp.ptrarg[0] = e.data
- onM(removeFinalizer_m)
- releasem(mp)
+ // switch to system stack and remove finalizer
+ systemstack(func() {
+ removefinalizer(e.data)
+ })
return
}
@@ -701,18 +688,11 @@ okarg:
// make sure we have a finalizer goroutine
createfing()
- // switch to M stack to add finalizer record
- mp := acquirem()
- mp.ptrarg[0] = f.data
- mp.ptrarg[1] = e.data
- mp.scalararg[0] = nret
- mp.ptrarg[2] = unsafe.Pointer(fint)
- mp.ptrarg[3] = unsafe.Pointer(ot)
- onM(setFinalizer_m)
- if mp.scalararg[0] != 1 {
- gothrow("runtime.SetFinalizer: finalizer already set")
- }
- releasem(mp)
+ systemstack(func() {
+ if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
+ gothrow("runtime.SetFinalizer: finalizer already set")
+ }
+ })
}
// round n up to a multiple of a. a must be a power of 2.
diff --git a/src/runtime/malloc.h b/src/runtime/malloc.h
deleted file mode 100644
index 522b11bba..000000000
--- a/src/runtime/malloc.h
+++ /dev/null
@@ -1,620 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Memory allocator, based on tcmalloc.
-// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
-
-// The main allocator works in runs of pages.
-// Small allocation sizes (up to and including 32 kB) are
-// rounded to one of about 100 size classes, each of which
-// has its own free list of objects of exactly that size.
-// Any free page of memory can be split into a set of objects
-// of one size class, which are then managed using free list
-// allocators.
-//
-// The allocator's data structures are:
-//
-// FixAlloc: a free-list allocator for fixed-size objects,
-// used to manage storage used by the allocator.
-// MHeap: the malloc heap, managed at page (4096-byte) granularity.
-// MSpan: a run of pages managed by the MHeap.
-// MCentral: a shared free list for a given size class.
-// MCache: a per-thread (in Go, per-P) cache for small objects.
-// MStats: allocation statistics.
-//
-// Allocating a small object proceeds up a hierarchy of caches:
-//
-// 1. Round the size up to one of the small size classes
-// and look in the corresponding MCache free list.
-// If the list is not empty, allocate an object from it.
-// This can all be done without acquiring a lock.
-//
-// 2. If the MCache free list is empty, replenish it by
-// taking a bunch of objects from the MCentral free list.
-// Moving a bunch amortizes the cost of acquiring the MCentral lock.
-//
-// 3. If the MCentral free list is empty, replenish it by
-// allocating a run of pages from the MHeap and then
-// chopping that memory into a objects of the given size.
-// Allocating many objects amortizes the cost of locking
-// the heap.
-//
-// 4. If the MHeap is empty or has no page runs large enough,
-// allocate a new group of pages (at least 1MB) from the
-// operating system. Allocating a large run of pages
-// amortizes the cost of talking to the operating system.
-//
-// Freeing a small object proceeds up the same hierarchy:
-//
-// 1. Look up the size class for the object and add it to
-// the MCache free list.
-//
-// 2. If the MCache free list is too long or the MCache has
-// too much memory, return some to the MCentral free lists.
-//
-// 3. If all the objects in a given span have returned to
-// the MCentral list, return that span to the page heap.
-//
-// 4. If the heap has too much memory, return some to the
-// operating system.
-//
-// TODO(rsc): Step 4 is not implemented.
-//
-// Allocating and freeing a large object uses the page heap
-// directly, bypassing the MCache and MCentral free lists.
-//
-// The small objects on the MCache and MCentral free lists
-// may or may not be zeroed. They are zeroed if and only if
-// the second word of the object is zero. A span in the
-// page heap is zeroed unless s->needzero is set. When a span
-// is allocated to break into small objects, it is zeroed if needed
-// and s->needzero is set. There are two main benefits to delaying the
-// zeroing this way:
-//
-// 1. stack frames allocated from the small object lists
-// or the page heap can avoid zeroing altogether.
-// 2. the cost of zeroing when reusing a small object is
-// charged to the mutator, not the garbage collector.
-//
-// This C code was written with an eye toward translating to Go
-// in the future. Methods have the form Type_Method(Type *t, ...).
-
-typedef struct MCentral MCentral;
-typedef struct MHeap MHeap;
-typedef struct MSpan MSpan;
-typedef struct MStats MStats;
-typedef struct MLink MLink;
-typedef struct GCStats GCStats;
-typedef struct Workbuf Workbuf;
-
-enum
-{
- PageShift = 13,
- PageSize = 1<<PageShift,
- PageMask = PageSize - 1,
-};
-typedef uintptr pageID; // address >> PageShift
-
-enum
-{
- // Computed constant. The definition of MaxSmallSize and the
- // algorithm in msize.c produce some number of different allocation
- // size classes. NumSizeClasses is that number. It's needed here
- // because there are static arrays of this length; when msize runs its
- // size choosing algorithm it double-checks that NumSizeClasses agrees.
- NumSizeClasses = 67,
-
- // Tunable constants.
- MaxSmallSize = 32<<10,
-
- // Tiny allocator parameters, see "Tiny allocator" comment in malloc.goc.
- TinySize = 16,
- TinySizeClass = 2,
-
- FixAllocChunk = 16<<10, // Chunk size for FixAlloc
- MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap.
- HeapAllocChunk = 1<<20, // Chunk size for heap growth
-
- // Per-P, per order stack segment cache size.
- StackCacheSize = 32*1024,
- // Number of orders that get caching. Order 0 is FixedStack
- // and each successive order is twice as large.
- NumStackOrders = 3,
-
- // Number of bits in page to span calculations (4k pages).
- // On Windows 64-bit we limit the arena to 32GB or 35 bits (see below for reason).
- // On other 64-bit platforms, we limit the arena to 128GB, or 37 bits.
- // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
-#ifdef _64BIT
-#ifdef GOOS_windows
- // Windows counts memory used by page table into committed memory
- // of the process, so we can't reserve too much memory.
- // See http://golang.org/issue/5402 and http://golang.org/issue/5236.
- MHeapMap_Bits = 35 - PageShift,
-#else
- MHeapMap_Bits = 37 - PageShift,
-#endif
-#else
- MHeapMap_Bits = 32 - PageShift,
-#endif
-
- // Max number of threads to run garbage collection.
- // 2, 3, and 4 are all plausible maximums depending
- // on the hardware details of the machine. The garbage
- // collector scales well to 32 cpus.
- MaxGcproc = 32,
-};
-
-// Maximum memory allocation size, a hint for callers.
-// This must be a #define instead of an enum because it
-// is so large.
-#ifdef _64BIT
-#define MaxMem (1ULL<<(MHeapMap_Bits+PageShift)) /* 128 GB or 32 GB */
-#else
-#define MaxMem ((uintptr)-1)
-#endif
-
-// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
-struct MLink
-{
- MLink *next;
-};
-
-// sysAlloc obtains a large chunk of zeroed memory from the
-// operating system, typically on the order of a hundred kilobytes
-// or a megabyte.
-// NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
-// may use larger alignment, so the caller must be careful to realign the
-// memory obtained by sysAlloc.
-//
-// SysUnused notifies the operating system that the contents
-// of the memory region are no longer needed and can be reused
-// for other purposes.
-// SysUsed notifies the operating system that the contents
-// of the memory region are needed again.
-//
-// SysFree returns it unconditionally; this is only used if
-// an out-of-memory error has been detected midway through
-// an allocation. It is okay if SysFree is a no-op.
-//
-// SysReserve reserves address space without allocating memory.
-// If the pointer passed to it is non-nil, the caller wants the
-// reservation there, but SysReserve can still choose another
-// location if that one is unavailable. On some systems and in some
-// cases SysReserve will simply check that the address space is
-// available and not actually reserve it. If SysReserve returns
-// non-nil, it sets *reserved to true if the address space is
-// reserved, false if it has merely been checked.
-// NOTE: SysReserve returns OS-aligned memory, but the heap allocator
-// may use larger alignment, so the caller must be careful to realign the
-// memory obtained by sysAlloc.
-//
-// SysMap maps previously reserved address space for use.
-// The reserved argument is true if the address space was really
-// reserved, not merely checked.
-//
-// SysFault marks a (already sysAlloc'd) region to fault
-// if accessed. Used only for debugging the runtime.
-
-void* runtime·sysAlloc(uintptr nbytes, uint64 *stat);
-void runtime·SysFree(void *v, uintptr nbytes, uint64 *stat);
-void runtime·SysUnused(void *v, uintptr nbytes);
-void runtime·SysUsed(void *v, uintptr nbytes);
-void runtime·SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat);
-void* runtime·SysReserve(void *v, uintptr nbytes, bool *reserved);
-void runtime·SysFault(void *v, uintptr nbytes);
-
-// FixAlloc is a simple free-list allocator for fixed size objects.
-// Malloc uses a FixAlloc wrapped around sysAlloc to manages its
-// MCache and MSpan objects.
-//
-// Memory returned by FixAlloc_Alloc is not zeroed.
-// The caller is responsible for locking around FixAlloc calls.
-// Callers can keep state in the object but the first word is
-// smashed by freeing and reallocating.
-struct FixAlloc
-{
- uintptr size;
- void (*first)(void *arg, byte *p); // called first time p is returned
- void* arg;
- MLink* list;
- byte* chunk;
- uint32 nchunk;
- uintptr inuse; // in-use bytes now
- uint64* stat;
-};
-
-void runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg, uint64 *stat);
-void* runtime·FixAlloc_Alloc(FixAlloc *f);
-void runtime·FixAlloc_Free(FixAlloc *f, void *p);
-
-
-// Statistics.
-// Shared with Go: if you edit this structure, also edit type MemStats in mem.go.
-struct MStats
-{
- // General statistics.
- uint64 alloc; // bytes allocated and still in use
- uint64 total_alloc; // bytes allocated (even if freed)
- uint64 sys; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
- uint64 nlookup; // number of pointer lookups
- uint64 nmalloc; // number of mallocs
- uint64 nfree; // number of frees
-
- // Statistics about malloc heap.
- // protected by mheap.lock
- uint64 heap_alloc; // bytes allocated and still in use
- uint64 heap_sys; // bytes obtained from system
- uint64 heap_idle; // bytes in idle spans
- uint64 heap_inuse; // bytes in non-idle spans
- uint64 heap_released; // bytes released to the OS
- uint64 heap_objects; // total number of allocated objects
-
- // Statistics about allocation of low-level fixed-size structures.
- // Protected by FixAlloc locks.
- uint64 stacks_inuse; // this number is included in heap_inuse above
- uint64 stacks_sys; // always 0 in mstats
- uint64 mspan_inuse; // MSpan structures
- uint64 mspan_sys;
- uint64 mcache_inuse; // MCache structures
- uint64 mcache_sys;
- uint64 buckhash_sys; // profiling bucket hash table
- uint64 gc_sys;
- uint64 other_sys;
-
- // Statistics about garbage collector.
- // Protected by mheap or stopping the world during GC.
- uint64 next_gc; // next GC (in heap_alloc time)
- uint64 last_gc; // last GC (in absolute time)
- uint64 pause_total_ns;
- uint64 pause_ns[256]; // circular buffer of recent GC pause lengths
- uint64 pause_end[256]; // circular buffer of recent GC end times (nanoseconds since 1970)
- uint32 numgc;
- bool enablegc;
- bool debuggc;
-
- // Statistics about allocation size classes.
-
- struct MStatsBySize {
- uint32 size;
- uint64 nmalloc;
- uint64 nfree;
- } by_size[NumSizeClasses];
-
- uint64 tinyallocs; // number of tiny allocations that didn't cause actual allocation; not exported to Go directly
-};
-
-
-#define mstats runtime·memstats
-extern MStats mstats;
-void runtime·updatememstats(GCStats *stats);
-void runtime·ReadMemStats(MStats *stats);
-
-// Size classes. Computed and initialized by InitSizes.
-//
-// SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
-// 1 <= sizeclass < NumSizeClasses, for n.
-// Size class 0 is reserved to mean "not small".
-//
-// class_to_size[i] = largest size in class i
-// class_to_allocnpages[i] = number of pages to allocate when
-// making new objects in class i
-
-int32 runtime·SizeToClass(int32);
-uintptr runtime·roundupsize(uintptr);
-extern int32 runtime·class_to_size[NumSizeClasses];
-extern int32 runtime·class_to_allocnpages[NumSizeClasses];
-extern int8 runtime·size_to_class8[1024/8 + 1];
-extern int8 runtime·size_to_class128[(MaxSmallSize-1024)/128 + 1];
-extern void runtime·InitSizes(void);
-
-typedef struct MCacheList MCacheList;
-struct MCacheList
-{
- MLink *list;
- uint32 nlist;
-};
-
-typedef struct StackFreeList StackFreeList;
-struct StackFreeList
-{
- MLink *list; // linked list of free stacks
- uintptr size; // total size of stacks in list
-};
-
-typedef struct SudoG SudoG;
-
-// Per-thread (in Go, per-P) cache for small objects.
-// No locking needed because it is per-thread (per-P).
-struct MCache
-{
- // The following members are accessed on every malloc,
- // so they are grouped here for better caching.
- int32 next_sample; // trigger heap sample after allocating this many bytes
- intptr local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
- // Allocator cache for tiny objects w/o pointers.
- // See "Tiny allocator" comment in malloc.goc.
- byte* tiny;
- uintptr tinysize;
- uintptr local_tinyallocs; // number of tiny allocs not counted in other stats
- // The rest is not accessed on every malloc.
- MSpan* alloc[NumSizeClasses]; // spans to allocate from
-
- StackFreeList stackcache[NumStackOrders];
-
- SudoG* sudogcache;
-
- // Local allocator stats, flushed during GC.
- uintptr local_nlookup; // number of pointer lookups
- uintptr local_largefree; // bytes freed for large objects (>MaxSmallSize)
- uintptr local_nlargefree; // number of frees for large objects (>MaxSmallSize)
- uintptr local_nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
-};
-
-MSpan* runtime·MCache_Refill(MCache *c, int32 sizeclass);
-void runtime·MCache_ReleaseAll(MCache *c);
-void runtime·stackcache_clear(MCache *c);
-void runtime·gcworkbuffree(Workbuf *b);
-
-enum
-{
- KindSpecialFinalizer = 1,
- KindSpecialProfile = 2,
- // Note: The finalizer special must be first because if we're freeing
- // an object, a finalizer special will cause the freeing operation
- // to abort, and we want to keep the other special records around
- // if that happens.
-};
-
-typedef struct Special Special;
-struct Special
-{
- Special* next; // linked list in span
- uint16 offset; // span offset of object
- byte kind; // kind of Special
-};
-
-// The described object has a finalizer set for it.
-typedef struct SpecialFinalizer SpecialFinalizer;
-struct SpecialFinalizer
-{
- Special special;
- FuncVal* fn;
- uintptr nret;
- Type* fint;
- PtrType* ot;
-};
-
-// The described object is being heap profiled.
-typedef struct Bucket Bucket; // from mprof.h
-typedef struct SpecialProfile SpecialProfile;
-struct SpecialProfile
-{
- Special special;
- Bucket* b;
-};
-
-// An MSpan is a run of pages.
-enum
-{
- MSpanInUse = 0, // allocated for garbage collected heap
- MSpanStack, // allocated for use by stack allocator
- MSpanFree,
- MSpanListHead,
- MSpanDead,
-};
-struct MSpan
-{
- MSpan *next; // in a span linked list
- MSpan *prev; // in a span linked list
- pageID start; // starting page number
- uintptr npages; // number of pages in span
- MLink *freelist; // list of free objects
- // sweep generation:
- // if sweepgen == h->sweepgen - 2, the span needs sweeping
- // if sweepgen == h->sweepgen - 1, the span is currently being swept
- // if sweepgen == h->sweepgen, the span is swept and ready to use
- // h->sweepgen is incremented by 2 after every GC
- uint32 sweepgen;
- uint16 ref; // capacity - number of objects in freelist
- uint8 sizeclass; // size class
- bool incache; // being used by an MCache
- uint8 state; // MSpanInUse etc
- uint8 needzero; // needs to be zeroed before allocation
- uintptr elemsize; // computed from sizeclass or from npages
- int64 unusedsince; // First time spotted by GC in MSpanFree state
- uintptr npreleased; // number of pages released to the OS
- byte *limit; // end of data in span
- Mutex specialLock; // guards specials list
- Special *specials; // linked list of special records sorted by offset.
-};
-
-void runtime·MSpan_Init(MSpan *span, pageID start, uintptr npages);
-void runtime·MSpan_EnsureSwept(MSpan *span);
-bool runtime·MSpan_Sweep(MSpan *span, bool preserve);
-
-// Every MSpan is in one doubly-linked list,
-// either one of the MHeap's free lists or one of the
-// MCentral's span lists. We use empty MSpan structures as list heads.
-void runtime·MSpanList_Init(MSpan *list);
-bool runtime·MSpanList_IsEmpty(MSpan *list);
-void runtime·MSpanList_Insert(MSpan *list, MSpan *span);
-void runtime·MSpanList_InsertBack(MSpan *list, MSpan *span);
-void runtime·MSpanList_Remove(MSpan *span); // from whatever list it is in
-
-
-// Central list of free objects of a given size.
-struct MCentral
-{
- Mutex lock;
- int32 sizeclass;
- MSpan nonempty; // list of spans with a free object
- MSpan empty; // list of spans with no free objects (or cached in an MCache)
-};
-
-void runtime·MCentral_Init(MCentral *c, int32 sizeclass);
-MSpan* runtime·MCentral_CacheSpan(MCentral *c);
-void runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s);
-bool runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end, bool preserve);
-
-// Main malloc heap.
-// The heap itself is the "free[]" and "large" arrays,
-// but all the other global data is here too.
-struct MHeap
-{
- Mutex lock;
- MSpan free[MaxMHeapList]; // free lists of given length
- MSpan freelarge; // free lists length >= MaxMHeapList
- MSpan busy[MaxMHeapList]; // busy lists of large objects of given length
- MSpan busylarge; // busy lists of large objects length >= MaxMHeapList
- MSpan **allspans; // all spans out there
- MSpan **gcspans; // copy of allspans referenced by GC marker or sweeper
- uint32 nspan;
- uint32 nspancap;
- uint32 sweepgen; // sweep generation, see comment in MSpan
- uint32 sweepdone; // all spans are swept
-
- // span lookup
- MSpan** spans;
- uintptr spans_mapped;
-
- // range of addresses we might see in the heap
- byte *bitmap;
- uintptr bitmap_mapped;
- byte *arena_start;
- byte *arena_used;
- byte *arena_end;
- bool arena_reserved;
-
- // central free lists for small size classes.
- // the padding makes sure that the MCentrals are
- // spaced CacheLineSize bytes apart, so that each MCentral.lock
- // gets its own cache line.
- struct MHeapCentral {
- MCentral mcentral;
- byte pad[CacheLineSize];
- } central[NumSizeClasses];
-
- FixAlloc spanalloc; // allocator for Span*
- FixAlloc cachealloc; // allocator for MCache*
- FixAlloc specialfinalizeralloc; // allocator for SpecialFinalizer*
- FixAlloc specialprofilealloc; // allocator for SpecialProfile*
- Mutex speciallock; // lock for sepcial record allocators.
-
- // Malloc stats.
- uint64 largefree; // bytes freed for large objects (>MaxSmallSize)
- uint64 nlargefree; // number of frees for large objects (>MaxSmallSize)
- uint64 nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
-};
-#define runtime·mheap runtime·mheap_
-extern MHeap runtime·mheap;
-
-void runtime·MHeap_Init(MHeap *h);
-MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero);
-MSpan* runtime·MHeap_AllocStack(MHeap *h, uintptr npage);
-void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct);
-void runtime·MHeap_FreeStack(MHeap *h, MSpan *s);
-MSpan* runtime·MHeap_Lookup(MHeap *h, void *v);
-MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v);
-void* runtime·MHeap_SysAlloc(MHeap *h, uintptr n);
-void runtime·MHeap_MapBits(MHeap *h);
-void runtime·MHeap_MapSpans(MHeap *h);
-void runtime·MHeap_Scavenge(int32 k, uint64 now, uint64 limit);
-
-void* runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat);
-int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s);
-uintptr runtime·sweepone(void);
-void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover);
-void runtime·unmarkspan(void *v, uintptr size);
-void runtime·purgecachedstats(MCache*);
-void runtime·tracealloc(void*, uintptr, Type*);
-void runtime·tracefree(void*, uintptr);
-void runtime·tracegc(void);
-
-int32 runtime·gcpercent;
-int32 runtime·readgogc(void);
-void runtime·clearpools(void);
-
-enum
-{
- // flags to malloc
- FlagNoScan = 1<<0, // GC doesn't have to scan object
- FlagNoZero = 1<<1, // don't zero memory
-};
-
-void runtime·mProf_Malloc(void*, uintptr);
-void runtime·mProf_Free(Bucket*, uintptr, bool);
-void runtime·mProf_GC(void);
-void runtime·iterate_memprof(void (**callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr));
-int32 runtime·gcprocs(void);
-void runtime·helpgc(int32 nproc);
-void runtime·gchelper(void);
-void runtime·createfing(void);
-G* runtime·wakefing(void);
-void runtime·getgcmask(byte*, Type*, byte**, uintptr*);
-
-// NOTE: Layout known to queuefinalizer.
-typedef struct Finalizer Finalizer;
-struct Finalizer
-{
- FuncVal *fn; // function to call
- void *arg; // ptr to object
- uintptr nret; // bytes of return values from fn
- Type *fint; // type of first argument of fn
- PtrType *ot; // type of ptr to object
-};
-
-typedef struct FinBlock FinBlock;
-struct FinBlock
-{
- FinBlock *alllink;
- FinBlock *next;
- int32 cnt;
- int32 cap;
- Finalizer fin[1];
-};
-extern Mutex runtime·finlock; // protects the following variables
-extern G* runtime·fing;
-extern bool runtime·fingwait;
-extern bool runtime·fingwake;
-extern FinBlock *runtime·finq; // list of finalizers that are to be executed
-extern FinBlock *runtime·finc; // cache of free blocks
-
-void runtime·setprofilebucket_m(void);
-
-bool runtime·addfinalizer(void*, FuncVal *fn, uintptr, Type*, PtrType*);
-void runtime·removefinalizer(void*);
-void runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType *ot);
-bool runtime·freespecial(Special *s, void *p, uintptr size, bool freed);
-
-// Information from the compiler about the layout of stack frames.
-struct BitVector
-{
- int32 n; // # of bits
- uint8 *bytedata;
-};
-typedef struct StackMap StackMap;
-struct StackMap
-{
- int32 n; // number of bitmaps
- int32 nbit; // number of bits in each bitmap
- uint8 bytedata[]; // bitmaps, each starting on a 32-bit boundary
-};
-// Returns pointer map data for the given stackmap index
-// (the index is encoded in PCDATA_StackMapIndex).
-BitVector runtime·stackmapdata(StackMap *stackmap, int32 n);
-
-extern BitVector runtime·gcdatamask;
-extern BitVector runtime·gcbssmask;
-
-// defined in mgc0.go
-void runtime·gc_m_ptr(Eface*);
-void runtime·gc_g_ptr(Eface*);
-void runtime·gc_itab_ptr(Eface*);
-
-void runtime·setgcpercent_m(void);
-
-// Value we use to mark dead pointers when GODEBUG=gcdead=1.
-#define PoisonGC ((uintptr)0xf969696969696969ULL)
-#define PoisonStack ((uintptr)0x6868686868686868ULL)
diff --git a/src/runtime/malloc1.go b/src/runtime/malloc1.go
new file mode 100644
index 000000000..db02d9cca
--- /dev/null
+++ b/src/runtime/malloc1.go
@@ -0,0 +1,318 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// See malloc.h for overview.
+//
+// TODO(rsc): double-check stats.
+
+package runtime
+
+import "unsafe"
+
+const _MaxArena32 = 2 << 30
+
+// For use by Go. If it were a C enum it would be made available automatically,
+// but the value of MaxMem is too large for enum.
+// XXX - uintptr runtime·maxmem = MaxMem;
+
+func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
+ _g_ := getg()
+
+ _g_.m.mcache.local_nlookup++
+ if ptrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
+ // purge cache stats to prevent overflow
+ lock(&mheap_.lock)
+ purgecachedstats(_g_.m.mcache)
+ unlock(&mheap_.lock)
+ }
+
+ s := mHeap_LookupMaybe(&mheap_, unsafe.Pointer(v))
+ if sp != nil {
+ *sp = s
+ }
+ if s == nil {
+ if base != nil {
+ *base = 0
+ }
+ if size != nil {
+ *size = 0
+ }
+ return 0
+ }
+
+ p := uintptr(s.start) << _PageShift
+ if s.sizeclass == 0 {
+ // Large object.
+ if base != nil {
+ *base = p
+ }
+ if size != nil {
+ *size = s.npages << _PageShift
+ }
+ return 1
+ }
+
+ n := s.elemsize
+ if base != nil {
+ i := (uintptr(v) - uintptr(p)) / n
+ *base = p + i*n
+ }
+ if size != nil {
+ *size = n
+ }
+
+ return 1
+}
+
+//go:nosplit
+func purgecachedstats(c *mcache) {
+ // Protected by either heap or GC lock.
+ h := &mheap_
+ memstats.heap_alloc += uint64(c.local_cachealloc)
+ c.local_cachealloc = 0
+ memstats.tinyallocs += uint64(c.local_tinyallocs)
+ c.local_tinyallocs = 0
+ memstats.nlookup += uint64(c.local_nlookup)
+ c.local_nlookup = 0
+ h.largefree += uint64(c.local_largefree)
+ c.local_largefree = 0
+ h.nlargefree += uint64(c.local_nlargefree)
+ c.local_nlargefree = 0
+ for i := 0; i < len(c.local_nsmallfree); i++ {
+ h.nsmallfree[i] += uint64(c.local_nsmallfree[i])
+ c.local_nsmallfree[i] = 0
+ }
+}
+
+func mallocinit() {
+ initSizes()
+
+ if class_to_size[_TinySizeClass] != _TinySize {
+ gothrow("bad TinySizeClass")
+ }
+
+ var p, arena_size, bitmap_size, spans_size, p_size, limit uintptr
+ var reserved bool
+
+ // limit = runtime.memlimit();
+ // See https://code.google.com/p/go/issues/detail?id=5049
+ // TODO(rsc): Fix after 1.1.
+ limit = 0
+
+ // Set up the allocation arena, a contiguous area of memory where
+ // allocated data will be found. The arena begins with a bitmap large
+ // enough to hold 4 bits per allocated word.
+ if ptrSize == 8 && (limit == 0 || limit > 1<<30) {
+ // On a 64-bit machine, allocate from a single contiguous reservation.
+ // 128 GB (MaxMem) should be big enough for now.
+ //
+ // The code will work with the reservation at any address, but ask
+ // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
+ // Allocating a 128 GB region takes away 37 bits, and the amd64
+ // doesn't let us choose the top 17 bits, so that leaves the 11 bits
+ // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
+ // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
+ // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
+ // UTF-8 sequences, and they are otherwise as far away from
+ // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
+ // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
+ // on OS X during thread allocations. 0x00c0 causes conflicts with
+ // AddressSanitizer which reserves all memory up to 0x0100.
+ // These choices are both for debuggability and to reduce the
+ // odds of the conservative garbage collector not collecting memory
+ // because some non-pointer block of memory had a bit pattern
+ // that matched a memory address.
+ //
+ // Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
+ // but it hardly matters: e0 00 is not valid UTF-8 either.
+ //
+ // If this fails we fall back to the 32 bit memory mechanism
+ arena_size = round(_MaxMem, _PageSize)
+ bitmap_size = arena_size / (ptrSize * 8 / 4)
+ spans_size = arena_size / _PageSize * ptrSize
+ spans_size = round(spans_size, _PageSize)
+ for i := 0; i <= 0x7f; i++ {
+ p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
+ p_size = bitmap_size + spans_size + arena_size + _PageSize
+ p = uintptr(sysReserve(unsafe.Pointer(p), p_size, &reserved))
+ if p != 0 {
+ break
+ }
+ }
+ }
+
+ if p == 0 {
+ // On a 32-bit machine, we can't typically get away
+ // with a giant virtual address space reservation.
+ // Instead we map the memory information bitmap
+ // immediately after the data segment, large enough
+ // to handle another 2GB of mappings (256 MB),
+ // along with a reservation for another 512 MB of memory.
+ // When that gets used up, we'll start asking the kernel
+ // for any memory anywhere and hope it's in the 2GB
+ // following the bitmap (presumably the executable begins
+ // near the bottom of memory, so we'll have to use up
+ // most of memory before the kernel resorts to giving out
+ // memory before the beginning of the text segment).
+ //
+ // Alternatively we could reserve 512 MB bitmap, enough
+ // for 4GB of mappings, and then accept any memory the
+ // kernel threw at us, but normally that's a waste of 512 MB
+ // of address space, which is probably too much in a 32-bit world.
+ bitmap_size = _MaxArena32 / (ptrSize * 8 / 4)
+ arena_size = 512 << 20
+ spans_size = _MaxArena32 / _PageSize * ptrSize
+ if limit > 0 && arena_size+bitmap_size+spans_size > limit {
+ bitmap_size = (limit / 9) &^ ((1 << _PageShift) - 1)
+ arena_size = bitmap_size * 8
+ spans_size = arena_size / _PageSize * ptrSize
+ }
+ spans_size = round(spans_size, _PageSize)
+
+ // SysReserve treats the address we ask for, end, as a hint,
+ // not as an absolute requirement. If we ask for the end
+ // of the data segment but the operating system requires
+ // a little more space before we can start allocating, it will
+ // give out a slightly higher pointer. Except QEMU, which
+ // is buggy, as usual: it won't adjust the pointer upward.
+ // So adjust it upward a little bit ourselves: 1/4 MB to get
+ // away from the running binary image and then round up
+ // to a MB boundary.
+ p = round(uintptr(unsafe.Pointer(&end))+(1<<18), 1<<20)
+ p_size = bitmap_size + spans_size + arena_size + _PageSize
+ p = uintptr(sysReserve(unsafe.Pointer(p), p_size, &reserved))
+ if p == 0 {
+ gothrow("runtime: cannot reserve arena virtual address space")
+ }
+ }
+
+ // PageSize can be larger than OS definition of page size,
+ // so SysReserve can give us a PageSize-unaligned pointer.
+ // To overcome this we ask for PageSize more and round up the pointer.
+ p1 := round(p, _PageSize)
+
+ mheap_.spans = (**mspan)(unsafe.Pointer(p1))
+ mheap_.bitmap = p1 + spans_size
+ mheap_.arena_start = p1 + (spans_size + bitmap_size)
+ mheap_.arena_used = mheap_.arena_start
+ mheap_.arena_end = p + p_size
+ mheap_.arena_reserved = reserved
+
+ if mheap_.arena_start&(_PageSize-1) != 0 {
+ println("bad pagesize", hex(p), hex(p1), hex(spans_size), hex(bitmap_size), hex(_PageSize), "start", hex(mheap_.arena_start))
+ gothrow("misrounded allocation in mallocinit")
+ }
+
+ // Initialize the rest of the allocator.
+ mHeap_Init(&mheap_, spans_size)
+ _g_ := getg()
+ _g_.m.mcache = allocmcache()
+}
+
+func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
+ if n > uintptr(h.arena_end)-uintptr(h.arena_used) {
+ // We are in 32-bit mode, maybe we didn't use all possible address space yet.
+ // Reserve some more space.
+ p_size := round(n+_PageSize, 256<<20)
+ new_end := h.arena_end + p_size
+ if new_end <= h.arena_start+_MaxArena32 {
+ // TODO: It would be bad if part of the arena
+ // is reserved and part is not.
+ var reserved bool
+ p := uintptr(sysReserve((unsafe.Pointer)(h.arena_end), p_size, &reserved))
+ if p == h.arena_end {
+ h.arena_end = new_end
+ h.arena_reserved = reserved
+ } else if p+p_size <= h.arena_start+_MaxArena32 {
+ // Keep everything page-aligned.
+ // Our pages are bigger than hardware pages.
+ h.arena_end = p + p_size
+ h.arena_used = p + (-uintptr(p) & (_PageSize - 1))
+ h.arena_reserved = reserved
+ } else {
+ var stat uint64
+ sysFree((unsafe.Pointer)(p), p_size, &stat)
+ }
+ }
+ }
+
+ if n <= uintptr(h.arena_end)-uintptr(h.arena_used) {
+ // Keep taking from our reservation.
+ p := h.arena_used
+ sysMap((unsafe.Pointer)(p), n, h.arena_reserved, &memstats.heap_sys)
+ h.arena_used += n
+ mHeap_MapBits(h)
+ mHeap_MapSpans(h)
+ if raceenabled {
+ racemapshadow((unsafe.Pointer)(p), n)
+ }
+
+ if uintptr(p)&(_PageSize-1) != 0 {
+ gothrow("misrounded allocation in MHeap_SysAlloc")
+ }
+ return (unsafe.Pointer)(p)
+ }
+
+ // If using 64-bit, our reservation is all we have.
+ if uintptr(h.arena_end)-uintptr(h.arena_start) >= _MaxArena32 {
+ return nil
+ }
+
+ // On 32-bit, once the reservation is gone we can
+ // try to get memory at a location chosen by the OS
+ // and hope that it is in the range we allocated bitmap for.
+ p_size := round(n, _PageSize) + _PageSize
+ p := uintptr(sysAlloc(p_size, &memstats.heap_sys))
+ if p == 0 {
+ return nil
+ }
+
+ if p < h.arena_start || uintptr(p)+p_size-uintptr(h.arena_start) >= _MaxArena32 {
+ print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n")
+ sysFree((unsafe.Pointer)(p), p_size, &memstats.heap_sys)
+ return nil
+ }
+
+ p_end := p + p_size
+ p += -p & (_PageSize - 1)
+ if uintptr(p)+n > uintptr(h.arena_used) {
+ h.arena_used = p + n
+ if p_end > h.arena_end {
+ h.arena_end = p_end
+ }
+ mHeap_MapBits(h)
+ mHeap_MapSpans(h)
+ if raceenabled {
+ racemapshadow((unsafe.Pointer)(p), n)
+ }
+ }
+
+ if uintptr(p)&(_PageSize-1) != 0 {
+ gothrow("misrounded allocation in MHeap_SysAlloc")
+ }
+ return (unsafe.Pointer)(p)
+}
+
+var end struct{}
+
+func largeAlloc(size uintptr, flag uint32) *mspan {
+ // print("largeAlloc size=", size, "\n")
+
+ if size+_PageSize < size {
+ gothrow("out of memory")
+ }
+ npages := size >> _PageShift
+ if size&_PageMask != 0 {
+ npages++
+ }
+ s := mHeap_Alloc(&mheap_, npages, 0, true, flag&_FlagNoZero == 0)
+ if s == nil {
+ gothrow("out of memory")
+ }
+ s.limit = uintptr(s.start)<<_PageShift + size
+ v := unsafe.Pointer(uintptr(s.start) << _PageShift)
+ // setup for mark sweep
+ markspan(v, 0, 0, true)
+ return s
+}
diff --git a/src/runtime/malloc2.go b/src/runtime/malloc2.go
new file mode 100644
index 000000000..4ac0207b1
--- /dev/null
+++ b/src/runtime/malloc2.go
@@ -0,0 +1,473 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// Memory allocator, based on tcmalloc.
+// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
+
+// The main allocator works in runs of pages.
+// Small allocation sizes (up to and including 32 kB) are
+// rounded to one of about 100 size classes, each of which
+// has its own free list of objects of exactly that size.
+// Any free page of memory can be split into a set of objects
+// of one size class, which are then managed using free list
+// allocators.
+//
+// The allocator's data structures are:
+//
+// FixAlloc: a free-list allocator for fixed-size objects,
+// used to manage storage used by the allocator.
+// MHeap: the malloc heap, managed at page (4096-byte) granularity.
+// MSpan: a run of pages managed by the MHeap.
+// MCentral: a shared free list for a given size class.
+// MCache: a per-thread (in Go, per-P) cache for small objects.
+// MStats: allocation statistics.
+//
+// Allocating a small object proceeds up a hierarchy of caches:
+//
+// 1. Round the size up to one of the small size classes
+// and look in the corresponding MCache free list.
+// If the list is not empty, allocate an object from it.
+// This can all be done without acquiring a lock.
+//
+// 2. If the MCache free list is empty, replenish it by
+// taking a bunch of objects from the MCentral free list.
+// Moving a bunch amortizes the cost of acquiring the MCentral lock.
+//
+// 3. If the MCentral free list is empty, replenish it by
+// allocating a run of pages from the MHeap and then
+// chopping that memory into a objects of the given size.
+// Allocating many objects amortizes the cost of locking
+// the heap.
+//
+// 4. If the MHeap is empty or has no page runs large enough,
+// allocate a new group of pages (at least 1MB) from the
+// operating system. Allocating a large run of pages
+// amortizes the cost of talking to the operating system.
+//
+// Freeing a small object proceeds up the same hierarchy:
+//
+// 1. Look up the size class for the object and add it to
+// the MCache free list.
+//
+// 2. If the MCache free list is too long or the MCache has
+// too much memory, return some to the MCentral free lists.
+//
+// 3. If all the objects in a given span have returned to
+// the MCentral list, return that span to the page heap.
+//
+// 4. If the heap has too much memory, return some to the
+// operating system.
+//
+// TODO(rsc): Step 4 is not implemented.
+//
+// Allocating and freeing a large object uses the page heap
+// directly, bypassing the MCache and MCentral free lists.
+//
+// The small objects on the MCache and MCentral free lists
+// may or may not be zeroed. They are zeroed if and only if
+// the second word of the object is zero. A span in the
+// page heap is zeroed unless s->needzero is set. When a span
+// is allocated to break into small objects, it is zeroed if needed
+// and s->needzero is set. There are two main benefits to delaying the
+// zeroing this way:
+//
+// 1. stack frames allocated from the small object lists
+// or the page heap can avoid zeroing altogether.
+// 2. the cost of zeroing when reusing a small object is
+// charged to the mutator, not the garbage collector.
+//
+// This C code was written with an eye toward translating to Go
+// in the future. Methods have the form Type_Method(Type *t, ...).
+
+const (
+ _PageShift = 13
+ _PageSize = 1 << _PageShift
+ _PageMask = _PageSize - 1
+)
+
+const (
+ // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
+ _64bit = 1 << (^uintptr(0) >> 63) / 2
+
+ // Computed constant. The definition of MaxSmallSize and the
+ // algorithm in msize.c produce some number of different allocation
+ // size classes. NumSizeClasses is that number. It's needed here
+ // because there are static arrays of this length; when msize runs its
+ // size choosing algorithm it double-checks that NumSizeClasses agrees.
+ _NumSizeClasses = 67
+
+ // Tunable constants.
+ _MaxSmallSize = 32 << 10
+
+ // Tiny allocator parameters, see "Tiny allocator" comment in malloc.goc.
+ _TinySize = 16
+ _TinySizeClass = 2
+
+ _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
+ _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
+ _HeapAllocChunk = 1 << 20 // Chunk size for heap growth
+
+ // Per-P, per order stack segment cache size.
+ _StackCacheSize = 32 * 1024
+
+ // Number of orders that get caching. Order 0 is FixedStack
+ // and each successive order is twice as large.
+ _NumStackOrders = 3
+
+ // Number of bits in page to span calculations (4k pages).
+ // On Windows 64-bit we limit the arena to 32GB or 35 bits.
+ // Windows counts memory used by page table into committed memory
+ // of the process, so we can't reserve too much memory.
+ // See http://golang.org/issue/5402 and http://golang.org/issue/5236.
+ // On other 64-bit platforms, we limit the arena to 128GB, or 37 bits.
+ // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
+ _MHeapMap_TotalBits = (_64bit*_Windows)*35 + (_64bit*(1-_Windows))*37 + (1-_64bit)*32
+ _MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift
+
+ _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
+
+ // Max number of threads to run garbage collection.
+ // 2, 3, and 4 are all plausible maximums depending
+ // on the hardware details of the machine. The garbage
+ // collector scales well to 32 cpus.
+ _MaxGcproc = 32
+)
+
+// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
+type mlink struct {
+ next *mlink
+}
+
+// sysAlloc obtains a large chunk of zeroed memory from the
+// operating system, typically on the order of a hundred kilobytes
+// or a megabyte.
+// NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
+// may use larger alignment, so the caller must be careful to realign the
+// memory obtained by sysAlloc.
+//
+// SysUnused notifies the operating system that the contents
+// of the memory region are no longer needed and can be reused
+// for other purposes.
+// SysUsed notifies the operating system that the contents
+// of the memory region are needed again.
+//
+// SysFree returns it unconditionally; this is only used if
+// an out-of-memory error has been detected midway through
+// an allocation. It is okay if SysFree is a no-op.
+//
+// SysReserve reserves address space without allocating memory.
+// If the pointer passed to it is non-nil, the caller wants the
+// reservation there, but SysReserve can still choose another
+// location if that one is unavailable. On some systems and in some
+// cases SysReserve will simply check that the address space is
+// available and not actually reserve it. If SysReserve returns
+// non-nil, it sets *reserved to true if the address space is
+// reserved, false if it has merely been checked.
+// NOTE: SysReserve returns OS-aligned memory, but the heap allocator
+// may use larger alignment, so the caller must be careful to realign the
+// memory obtained by sysAlloc.
+//
+// SysMap maps previously reserved address space for use.
+// The reserved argument is true if the address space was really
+// reserved, not merely checked.
+//
+// SysFault marks a (already sysAlloc'd) region to fault
+// if accessed. Used only for debugging the runtime.
+
+// FixAlloc is a simple free-list allocator for fixed size objects.
+// Malloc uses a FixAlloc wrapped around sysAlloc to manages its
+// MCache and MSpan objects.
+//
+// Memory returned by FixAlloc_Alloc is not zeroed.
+// The caller is responsible for locking around FixAlloc calls.
+// Callers can keep state in the object but the first word is
+// smashed by freeing and reallocating.
+type fixalloc struct {
+ size uintptr
+ first unsafe.Pointer // go func(unsafe.pointer, unsafe.pointer); f(arg, p) called first time p is returned
+ arg unsafe.Pointer
+ list *mlink
+ chunk *byte
+ nchunk uint32
+ inuse uintptr // in-use bytes now
+ stat *uint64
+}
+
+// Statistics.
+// Shared with Go: if you edit this structure, also edit type MemStats in mem.go.
+type mstats struct {
+ // General statistics.
+ alloc uint64 // bytes allocated and still in use
+ total_alloc uint64 // bytes allocated (even if freed)
+ sys uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
+ nlookup uint64 // number of pointer lookups
+ nmalloc uint64 // number of mallocs
+ nfree uint64 // number of frees
+
+ // Statistics about malloc heap.
+ // protected by mheap.lock
+ heap_alloc uint64 // bytes allocated and still in use
+ heap_sys uint64 // bytes obtained from system
+ heap_idle uint64 // bytes in idle spans
+ heap_inuse uint64 // bytes in non-idle spans
+ heap_released uint64 // bytes released to the os
+ heap_objects uint64 // total number of allocated objects
+
+ // Statistics about allocation of low-level fixed-size structures.
+ // Protected by FixAlloc locks.
+ stacks_inuse uint64 // this number is included in heap_inuse above
+ stacks_sys uint64 // always 0 in mstats
+ mspan_inuse uint64 // mspan structures
+ mspan_sys uint64
+ mcache_inuse uint64 // mcache structures
+ mcache_sys uint64
+ buckhash_sys uint64 // profiling bucket hash table
+ gc_sys uint64
+ other_sys uint64
+
+ // Statistics about garbage collector.
+ // Protected by mheap or stopping the world during GC.
+ next_gc uint64 // next gc (in heap_alloc time)
+ last_gc uint64 // last gc (in absolute time)
+ pause_total_ns uint64
+ pause_ns [256]uint64 // circular buffer of recent gc pause lengths
+ pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
+ numgc uint32
+ enablegc bool
+ debuggc bool
+
+ // Statistics about allocation size classes.
+
+ by_size [_NumSizeClasses]struct {
+ size uint32
+ nmalloc uint64
+ nfree uint64
+ }
+
+ tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
+}
+
+var memstats mstats
+
+// Size classes. Computed and initialized by InitSizes.
+//
+// SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
+// 1 <= sizeclass < NumSizeClasses, for n.
+// Size class 0 is reserved to mean "not small".
+//
+// class_to_size[i] = largest size in class i
+// class_to_allocnpages[i] = number of pages to allocate when
+// making new objects in class i
+
+var class_to_size [_NumSizeClasses]int32
+var class_to_allocnpages [_NumSizeClasses]int32
+var size_to_class8 [1024/8 + 1]int8
+var size_to_class128 [(_MaxSmallSize-1024)/128 + 1]int8
+
+type mcachelist struct {
+ list *mlink
+ nlist uint32
+}
+
+type stackfreelist struct {
+ list *mlink // linked list of free stacks
+ size uintptr // total size of stacks in list
+}
+
+// Per-thread (in Go, per-P) cache for small objects.
+// No locking needed because it is per-thread (per-P).
+type mcache struct {
+ // The following members are accessed on every malloc,
+ // so they are grouped here for better caching.
+ next_sample int32 // trigger heap sample after allocating this many bytes
+ local_cachealloc intptr // bytes allocated (or freed) from cache since last lock of heap
+ // Allocator cache for tiny objects w/o pointers.
+ // See "Tiny allocator" comment in malloc.goc.
+ tiny *byte
+ tinysize uintptr
+ local_tinyallocs uintptr // number of tiny allocs not counted in other stats
+
+ // The rest is not accessed on every malloc.
+ alloc [_NumSizeClasses]*mspan // spans to allocate from
+
+ stackcache [_NumStackOrders]stackfreelist
+
+ sudogcache *sudog
+
+ // Local allocator stats, flushed during GC.
+ local_nlookup uintptr // number of pointer lookups
+ local_largefree uintptr // bytes freed for large objects (>maxsmallsize)
+ local_nlargefree uintptr // number of frees for large objects (>maxsmallsize)
+ local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
+}
+
+const (
+ _KindSpecialFinalizer = 1
+ _KindSpecialProfile = 2
+ // Note: The finalizer special must be first because if we're freeing
+ // an object, a finalizer special will cause the freeing operation
+ // to abort, and we want to keep the other special records around
+ // if that happens.
+)
+
+type special struct {
+ next *special // linked list in span
+ offset uint16 // span offset of object
+ kind byte // kind of special
+}
+
+// The described object has a finalizer set for it.
+type specialfinalizer struct {
+ special special
+ fn *funcval
+ nret uintptr
+ fint *_type
+ ot *ptrtype
+}
+
+// The described object is being heap profiled.
+type specialprofile struct {
+ special special
+ b *bucket
+}
+
+// An MSpan is a run of pages.
+const (
+ _MSpanInUse = iota // allocated for garbage collected heap
+ _MSpanStack // allocated for use by stack allocator
+ _MSpanFree
+ _MSpanListHead
+ _MSpanDead
+)
+
+type mspan struct {
+ next *mspan // in a span linked list
+ prev *mspan // in a span linked list
+ start pageID // starting page number
+ npages uintptr // number of pages in span
+ freelist *mlink // list of free objects
+ // sweep generation:
+ // if sweepgen == h->sweepgen - 2, the span needs sweeping
+ // if sweepgen == h->sweepgen - 1, the span is currently being swept
+ // if sweepgen == h->sweepgen, the span is swept and ready to use
+ // h->sweepgen is incremented by 2 after every GC
+ sweepgen uint32
+ ref uint16 // capacity - number of objects in freelist
+ sizeclass uint8 // size class
+ incache bool // being used by an mcache
+ state uint8 // mspaninuse etc
+ needzero uint8 // needs to be zeroed before allocation
+ elemsize uintptr // computed from sizeclass or from npages
+ unusedsince int64 // first time spotted by gc in mspanfree state
+ npreleased uintptr // number of pages released to the os
+ limit uintptr // end of data in span
+ speciallock mutex // guards specials list
+ specials *special // linked list of special records sorted by offset.
+}
+
+// Every MSpan is in one doubly-linked list,
+// either one of the MHeap's free lists or one of the
+// MCentral's span lists. We use empty MSpan structures as list heads.
+
+// Central list of free objects of a given size.
+type mcentral struct {
+ lock mutex
+ sizeclass int32
+ nonempty mspan // list of spans with a free object
+ empty mspan // list of spans with no free objects (or cached in an mcache)
+}
+
+// Main malloc heap.
+// The heap itself is the "free[]" and "large" arrays,
+// but all the other global data is here too.
+type mheap struct {
+ lock mutex
+ free [_MaxMHeapList]mspan // free lists of given length
+ freelarge mspan // free lists length >= _MaxMHeapList
+ busy [_MaxMHeapList]mspan // busy lists of large objects of given length
+ busylarge mspan // busy lists of large objects length >= _MaxMHeapList
+ allspans **mspan // all spans out there
+ gcspans **mspan // copy of allspans referenced by gc marker or sweeper
+ nspan uint32
+ sweepgen uint32 // sweep generation, see comment in mspan
+ sweepdone uint32 // all spans are swept
+
+ // span lookup
+ spans **mspan
+ spans_mapped uintptr
+
+ // range of addresses we might see in the heap
+ bitmap uintptr
+ bitmap_mapped uintptr
+ arena_start uintptr
+ arena_used uintptr
+ arena_end uintptr
+ arena_reserved bool
+
+ // central free lists for small size classes.
+ // the padding makes sure that the MCentrals are
+ // spaced CacheLineSize bytes apart, so that each MCentral.lock
+ // gets its own cache line.
+ central [_NumSizeClasses]struct {
+ mcentral mcentral
+ pad [_CacheLineSize]byte
+ }
+
+ spanalloc fixalloc // allocator for span*
+ cachealloc fixalloc // allocator for mcache*
+ specialfinalizeralloc fixalloc // allocator for specialfinalizer*
+ specialprofilealloc fixalloc // allocator for specialprofile*
+ speciallock mutex // lock for sepcial record allocators.
+
+ // Malloc stats.
+ largefree uint64 // bytes freed for large objects (>maxsmallsize)
+ nlargefree uint64 // number of frees for large objects (>maxsmallsize)
+ nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
+}
+
+var mheap_ mheap
+
+const (
+ // flags to malloc
+ _FlagNoScan = 1 << 0 // GC doesn't have to scan object
+ _FlagNoZero = 1 << 1 // don't zero memory
+)
+
+// NOTE: Layout known to queuefinalizer.
+type finalizer struct {
+ fn *funcval // function to call
+ arg unsafe.Pointer // ptr to object
+ nret uintptr // bytes of return values from fn
+ fint *_type // type of first argument of fn
+ ot *ptrtype // type of ptr to object
+}
+
+type finblock struct {
+ alllink *finblock
+ next *finblock
+ cnt int32
+ cap int32
+ fin [1]finalizer
+}
+
+// Information from the compiler about the layout of stack frames.
+type bitvector struct {
+ n int32 // # of bits
+ bytedata *uint8
+}
+
+type stackmap struct {
+ n int32 // number of bitmaps
+ nbit int32 // number of bits in each bitmap
+ bytedata [0]byte // bitmaps, each starting on a 32-bit boundary
+}
+
+// Returns pointer map data for the given stackmap index
+// (the index is encoded in PCDATA_StackMapIndex).
+
+// defined in mgc0.go
diff --git a/src/runtime/mcache.c b/src/runtime/mcache.c
deleted file mode 100644
index 95ddced3e..000000000
--- a/src/runtime/mcache.c
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Per-P malloc cache for small objects.
-//
-// See malloc.h for an overview.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-
-extern volatile intgo runtime·MemProfileRate;
-
-// dummy MSpan that contains no free objects.
-MSpan runtime·emptymspan;
-
-MCache*
-runtime·allocmcache(void)
-{
- intgo rate;
- MCache *c;
- int32 i;
-
- runtime·lock(&runtime·mheap.lock);
- c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
- runtime·unlock(&runtime·mheap.lock);
- runtime·memclr((byte*)c, sizeof(*c));
- for(i = 0; i < NumSizeClasses; i++)
- c->alloc[i] = &runtime·emptymspan;
-
- // Set first allocation sample size.
- rate = runtime·MemProfileRate;
- if(rate > 0x3fffffff) // make 2*rate not overflow
- rate = 0x3fffffff;
- if(rate != 0)
- c->next_sample = runtime·fastrand1() % (2*rate);
-
- return c;
-}
-
-// mheap.lock needs to be held to release the gcworkbuf.
-static void
-freemcache(MCache *c)
-{
- runtime·MCache_ReleaseAll(c);
- runtime·stackcache_clear(c);
- runtime·lock(&runtime·mheap.lock);
- runtime·purgecachedstats(c);
- runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c);
- runtime·unlock(&runtime·mheap.lock);
-}
-
-static void
-freemcache_m(void)
-{
- MCache *c;
-
- c = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- freemcache(c);
-}
-
-void
-runtime·freemcache(MCache *c)
-{
- void (*fn)(void);
-
- g->m->ptrarg[0] = c;
- fn = freemcache_m;
- runtime·onM(&fn);
-}
-
-// Gets a span that has a free object in it and assigns it
-// to be the cached span for the given sizeclass. Returns this span.
-MSpan*
-runtime·MCache_Refill(MCache *c, int32 sizeclass)
-{
- MSpan *s;
-
- g->m->locks++;
- // Return the current cached span to the central lists.
- s = c->alloc[sizeclass];
- if(s->freelist != nil)
- runtime·throw("refill on a nonempty span");
- if(s != &runtime·emptymspan)
- s->incache = false;
-
- // Get a new cached span from the central lists.
- s = runtime·MCentral_CacheSpan(&runtime·mheap.central[sizeclass].mcentral);
- if(s == nil)
- runtime·throw("out of memory");
- if(s->freelist == nil) {
- runtime·printf("%d %d\n", s->ref, (int32)((s->npages << PageShift) / s->elemsize));
- runtime·throw("empty span");
- }
- c->alloc[sizeclass] = s;
- g->m->locks--;
- return s;
-}
-
-void
-runtime·MCache_ReleaseAll(MCache *c)
-{
- int32 i;
- MSpan *s;
-
- for(i=0; i<NumSizeClasses; i++) {
- s = c->alloc[i];
- if(s != &runtime·emptymspan) {
- runtime·MCentral_UncacheSpan(&runtime·mheap.central[i].mcentral, s);
- c->alloc[i] = &runtime·emptymspan;
- }
- }
-}
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
new file mode 100644
index 000000000..08b1bc359
--- /dev/null
+++ b/src/runtime/mcache.go
@@ -0,0 +1,91 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Per-P malloc cache for small objects.
+//
+// See malloc.h for an overview.
+
+package runtime
+
+import "unsafe"
+
+// dummy MSpan that contains no free objects.
+var emptymspan mspan
+
+func allocmcache() *mcache {
+ lock(&mheap_.lock)
+ c := (*mcache)(fixAlloc_Alloc(&mheap_.cachealloc))
+ unlock(&mheap_.lock)
+ memclr(unsafe.Pointer(c), unsafe.Sizeof(*c))
+ for i := 0; i < _NumSizeClasses; i++ {
+ c.alloc[i] = &emptymspan
+ }
+
+ // Set first allocation sample size.
+ rate := MemProfileRate
+ if rate > 0x3fffffff { // make 2*rate not overflow
+ rate = 0x3fffffff
+ }
+ if rate != 0 {
+ c.next_sample = int32(int(fastrand1()) % (2 * rate))
+ }
+
+ return c
+}
+
+func freemcache(c *mcache) {
+ systemstack(func() {
+ mCache_ReleaseAll(c)
+ stackcache_clear(c)
+
+ // NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
+ // with the stealing of gcworkbufs during garbage collection to avoid
+ // a race where the workbuf is double-freed.
+ // gcworkbuffree(c.gcworkbuf)
+
+ lock(&mheap_.lock)
+ purgecachedstats(c)
+ fixAlloc_Free(&mheap_.cachealloc, unsafe.Pointer(c))
+ unlock(&mheap_.lock)
+ })
+}
+
+// Gets a span that has a free object in it and assigns it
+// to be the cached span for the given sizeclass. Returns this span.
+func mCache_Refill(c *mcache, sizeclass int32) *mspan {
+ _g_ := getg()
+
+ _g_.m.locks++
+ // Return the current cached span to the central lists.
+ s := c.alloc[sizeclass]
+ if s.freelist != nil {
+ gothrow("refill on a nonempty span")
+ }
+ if s != &emptymspan {
+ s.incache = false
+ }
+
+ // Get a new cached span from the central lists.
+ s = mCentral_CacheSpan(&mheap_.central[sizeclass].mcentral)
+ if s == nil {
+ gothrow("out of memory")
+ }
+ if s.freelist == nil {
+ println(s.ref, (s.npages<<_PageShift)/s.elemsize)
+ gothrow("empty span")
+ }
+ c.alloc[sizeclass] = s
+ _g_.m.locks--
+ return s
+}
+
+func mCache_ReleaseAll(c *mcache) {
+ for i := 0; i < _NumSizeClasses; i++ {
+ s := c.alloc[i]
+ if s != &emptymspan {
+ mCentral_UncacheSpan(&mheap_.central[i].mcentral, s)
+ c.alloc[i] = &emptymspan
+ }
+ }
+}
diff --git a/src/runtime/mcentral.c b/src/runtime/mcentral.c
deleted file mode 100644
index fe6bcfeb1..000000000
--- a/src/runtime/mcentral.c
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Central free lists.
-//
-// See malloc.h for an overview.
-//
-// The MCentral doesn't actually contain the list of free objects; the MSpan does.
-// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
-// and those that are completely allocated (c->empty).
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-
-static MSpan* MCentral_Grow(MCentral *c);
-
-// Initialize a single central free list.
-void
-runtime·MCentral_Init(MCentral *c, int32 sizeclass)
-{
- c->sizeclass = sizeclass;
- runtime·MSpanList_Init(&c->nonempty);
- runtime·MSpanList_Init(&c->empty);
-}
-
-// Allocate a span to use in an MCache.
-MSpan*
-runtime·MCentral_CacheSpan(MCentral *c)
-{
- MSpan *s;
- int32 cap, n;
- uint32 sg;
-
- runtime·lock(&c->lock);
- sg = runtime·mheap.sweepgen;
-retry:
- for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
- if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
- runtime·MSpanList_Remove(s);
- runtime·MSpanList_InsertBack(&c->empty, s);
- runtime·unlock(&c->lock);
- runtime·MSpan_Sweep(s, true);
- goto havespan;
- }
- if(s->sweepgen == sg-1) {
- // the span is being swept by background sweeper, skip
- continue;
- }
- // we have a nonempty span that does not require sweeping, allocate from it
- runtime·MSpanList_Remove(s);
- runtime·MSpanList_InsertBack(&c->empty, s);
- runtime·unlock(&c->lock);
- goto havespan;
- }
-
- for(s = c->empty.next; s != &c->empty; s = s->next) {
- if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
- // we have an empty span that requires sweeping,
- // sweep it and see if we can free some space in it
- runtime·MSpanList_Remove(s);
- // swept spans are at the end of the list
- runtime·MSpanList_InsertBack(&c->empty, s);
- runtime·unlock(&c->lock);
- runtime·MSpan_Sweep(s, true);
- if(s->freelist != nil)
- goto havespan;
- runtime·lock(&c->lock);
- // the span is still empty after sweep
- // it is already in the empty list, so just retry
- goto retry;
- }
- if(s->sweepgen == sg-1) {
- // the span is being swept by background sweeper, skip
- continue;
- }
- // already swept empty span,
- // all subsequent ones must also be either swept or in process of sweeping
- break;
- }
- runtime·unlock(&c->lock);
-
- // Replenish central list if empty.
- s = MCentral_Grow(c);
- if(s == nil)
- return nil;
- runtime·lock(&c->lock);
- runtime·MSpanList_InsertBack(&c->empty, s);
- runtime·unlock(&c->lock);
-
-havespan:
- // At this point s is a non-empty span, queued at the end of the empty list,
- // c is unlocked.
- cap = (s->npages << PageShift) / s->elemsize;
- n = cap - s->ref;
- if(n == 0)
- runtime·throw("empty span");
- if(s->freelist == nil)
- runtime·throw("freelist empty");
- s->incache = true;
- return s;
-}
-
-// Return span from an MCache.
-void
-runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s)
-{
- int32 cap, n;
-
- runtime·lock(&c->lock);
-
- s->incache = false;
-
- if(s->ref == 0)
- runtime·throw("uncaching full span");
-
- cap = (s->npages << PageShift) / s->elemsize;
- n = cap - s->ref;
- if(n > 0) {
- runtime·MSpanList_Remove(s);
- runtime·MSpanList_Insert(&c->nonempty, s);
- }
- runtime·unlock(&c->lock);
-}
-
-// Free n objects from a span s back into the central free list c.
-// Called during sweep.
-// Returns true if the span was returned to heap. Sets sweepgen to
-// the latest generation.
-// If preserve=true, don't return the span to heap nor relink in MCentral lists;
-// caller takes care of it.
-bool
-runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end, bool preserve)
-{
- bool wasempty;
-
- if(s->incache)
- runtime·throw("freespan into cached span");
-
- // Add the objects back to s's free list.
- wasempty = s->freelist == nil;
- end->next = s->freelist;
- s->freelist = start;
- s->ref -= n;
-
- if(preserve) {
- // preserve is set only when called from MCentral_CacheSpan above,
- // the span must be in the empty list.
- if(s->next == nil)
- runtime·throw("can't preserve unlinked span");
- runtime·atomicstore(&s->sweepgen, runtime·mheap.sweepgen);
- return false;
- }
-
- runtime·lock(&c->lock);
-
- // Move to nonempty if necessary.
- if(wasempty) {
- runtime·MSpanList_Remove(s);
- runtime·MSpanList_Insert(&c->nonempty, s);
- }
-
- // delay updating sweepgen until here. This is the signal that
- // the span may be used in an MCache, so it must come after the
- // linked list operations above (actually, just after the
- // lock of c above.)
- runtime·atomicstore(&s->sweepgen, runtime·mheap.sweepgen);
-
- if(s->ref != 0) {
- runtime·unlock(&c->lock);
- return false;
- }
-
- // s is completely freed, return it to the heap.
- runtime·MSpanList_Remove(s);
- s->needzero = 1;
- s->freelist = nil;
- runtime·unlock(&c->lock);
- runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
- runtime·MHeap_Free(&runtime·mheap, s, 0);
- return true;
-}
-
-// Fetch a new span from the heap and carve into objects for the free list.
-static MSpan*
-MCentral_Grow(MCentral *c)
-{
- uintptr size, npages, i, n;
- MLink **tailp, *v;
- byte *p;
- MSpan *s;
-
- npages = runtime·class_to_allocnpages[c->sizeclass];
- size = runtime·class_to_size[c->sizeclass];
- n = (npages << PageShift) / size;
- s = runtime·MHeap_Alloc(&runtime·mheap, npages, c->sizeclass, 0, 1);
- if(s == nil)
- return nil;
-
- // Carve span into sequence of blocks.
- tailp = &s->freelist;
- p = (byte*)(s->start << PageShift);
- s->limit = p + size*n;
- for(i=0; i<n; i++) {
- v = (MLink*)p;
- *tailp = v;
- tailp = &v->next;
- p += size;
- }
- *tailp = nil;
- runtime·markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
- return s;
-}
diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go
new file mode 100644
index 000000000..0d172a08b
--- /dev/null
+++ b/src/runtime/mcentral.go
@@ -0,0 +1,199 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Central free lists.
+//
+// See malloc.h for an overview.
+//
+// The MCentral doesn't actually contain the list of free objects; the MSpan does.
+// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
+// and those that are completely allocated (c->empty).
+
+package runtime
+
+import "unsafe"
+
+// Initialize a single central free list.
+func mCentral_Init(c *mcentral, sizeclass int32) {
+ c.sizeclass = sizeclass
+ mSpanList_Init(&c.nonempty)
+ mSpanList_Init(&c.empty)
+}
+
+// Allocate a span to use in an MCache.
+func mCentral_CacheSpan(c *mcentral) *mspan {
+ lock(&c.lock)
+ sg := mheap_.sweepgen
+retry:
+ var s *mspan
+ for s = c.nonempty.next; s != &c.nonempty; s = s.next {
+ if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
+ mSpanList_Remove(s)
+ mSpanList_InsertBack(&c.empty, s)
+ unlock(&c.lock)
+ mSpan_Sweep(s, true)
+ goto havespan
+ }
+ if s.sweepgen == sg-1 {
+ // the span is being swept by background sweeper, skip
+ continue
+ }
+ // we have a nonempty span that does not require sweeping, allocate from it
+ mSpanList_Remove(s)
+ mSpanList_InsertBack(&c.empty, s)
+ unlock(&c.lock)
+ goto havespan
+ }
+
+ for s = c.empty.next; s != &c.empty; s = s.next {
+ if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
+ // we have an empty span that requires sweeping,
+ // sweep it and see if we can free some space in it
+ mSpanList_Remove(s)
+ // swept spans are at the end of the list
+ mSpanList_InsertBack(&c.empty, s)
+ unlock(&c.lock)
+ mSpan_Sweep(s, true)
+ if s.freelist != nil {
+ goto havespan
+ }
+ lock(&c.lock)
+ // the span is still empty after sweep
+ // it is already in the empty list, so just retry
+ goto retry
+ }
+ if s.sweepgen == sg-1 {
+ // the span is being swept by background sweeper, skip
+ continue
+ }
+ // already swept empty span,
+ // all subsequent ones must also be either swept or in process of sweeping
+ break
+ }
+ unlock(&c.lock)
+
+ // Replenish central list if empty.
+ s = mCentral_Grow(c)
+ if s == nil {
+ return nil
+ }
+ lock(&c.lock)
+ mSpanList_InsertBack(&c.empty, s)
+ unlock(&c.lock)
+
+ // At this point s is a non-empty span, queued at the end of the empty list,
+ // c is unlocked.
+havespan:
+ cap := int32((s.npages << _PageShift) / s.elemsize)
+ n := cap - int32(s.ref)
+ if n == 0 {
+ gothrow("empty span")
+ }
+ if s.freelist == nil {
+ gothrow("freelist empty")
+ }
+ s.incache = true
+ return s
+}
+
+// Return span from an MCache.
+func mCentral_UncacheSpan(c *mcentral, s *mspan) {
+ lock(&c.lock)
+
+ s.incache = false
+
+ if s.ref == 0 {
+ gothrow("uncaching full span")
+ }
+
+ cap := int32((s.npages << _PageShift) / s.elemsize)
+ n := cap - int32(s.ref)
+ if n > 0 {
+ mSpanList_Remove(s)
+ mSpanList_Insert(&c.nonempty, s)
+ }
+ unlock(&c.lock)
+}
+
+// Free n objects from a span s back into the central free list c.
+// Called during sweep.
+// Returns true if the span was returned to heap. Sets sweepgen to
+// the latest generation.
+// If preserve=true, don't return the span to heap nor relink in MCentral lists;
+// caller takes care of it.
+func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start *mlink, end *mlink, preserve bool) bool {
+ if s.incache {
+ gothrow("freespan into cached span")
+ }
+
+ // Add the objects back to s's free list.
+ wasempty := s.freelist == nil
+ end.next = s.freelist
+ s.freelist = start
+ s.ref -= uint16(n)
+
+ if preserve {
+ // preserve is set only when called from MCentral_CacheSpan above,
+ // the span must be in the empty list.
+ if s.next == nil {
+ gothrow("can't preserve unlinked span")
+ }
+ atomicstore(&s.sweepgen, mheap_.sweepgen)
+ return false
+ }
+
+ lock(&c.lock)
+
+ // Move to nonempty if necessary.
+ if wasempty {
+ mSpanList_Remove(s)
+ mSpanList_Insert(&c.nonempty, s)
+ }
+
+ // delay updating sweepgen until here. This is the signal that
+ // the span may be used in an MCache, so it must come after the
+ // linked list operations above (actually, just after the
+ // lock of c above.)
+ atomicstore(&s.sweepgen, mheap_.sweepgen)
+
+ if s.ref != 0 {
+ unlock(&c.lock)
+ return false
+ }
+
+ // s is completely freed, return it to the heap.
+ mSpanList_Remove(s)
+ s.needzero = 1
+ s.freelist = nil
+ unlock(&c.lock)
+ unmarkspan(uintptr(s.start)<<_PageShift, s.npages<<_PageShift)
+ mHeap_Free(&mheap_, s, 0)
+ return true
+}
+
+// Fetch a new span from the heap and carve into objects for the free list.
+func mCentral_Grow(c *mcentral) *mspan {
+ npages := uintptr(class_to_allocnpages[c.sizeclass])
+ size := uintptr(class_to_size[c.sizeclass])
+ n := (npages << _PageShift) / size
+
+ s := mHeap_Alloc(&mheap_, npages, c.sizeclass, false, true)
+ if s == nil {
+ return nil
+ }
+
+ // Carve span into sequence of blocks.
+ tailp := &s.freelist
+ p := uintptr(s.start << _PageShift)
+ s.limit = p + size*n
+ for i := uintptr(0); i < n; i++ {
+ v := (*mlink)(unsafe.Pointer(p))
+ *tailp = v
+ tailp = &v.next
+ p += size
+ }
+ *tailp = nil
+ markspan(unsafe.Pointer(uintptr(s.start)<<_PageShift), size, n, size*n < s.npages<<_PageShift)
+ return s
+}
diff --git a/src/runtime/mem.go b/src/runtime/mem.go
index e6f1eb0e6..183567251 100644
--- a/src/runtime/mem.go
+++ b/src/runtime/mem.go
@@ -59,7 +59,11 @@ type MemStats struct {
}
}
-var sizeof_C_MStats uintptr // filled in by malloc.goc
+// Size of the trailing by_size array differs between Go and C,
+// and all data after by_size is local to runtime, not exported.
+// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
+// sizeof_C_MStats is what C thinks about size of Go struct.
+var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0])
func init() {
var memStats MemStats
@@ -78,15 +82,16 @@ func ReadMemStats(m *MemStats) {
semacquire(&worldsema, false)
gp := getg()
gp.m.gcing = 1
- onM(stoptheworld)
+ systemstack(stoptheworld)
- gp.m.ptrarg[0] = noescape(unsafe.Pointer(m))
- onM(readmemstats_m)
+ systemstack(func() {
+ readmemstats_m(m)
+ })
gp.m.gcing = 0
gp.m.locks++
semrelease(&worldsema)
- onM(starttheworld)
+ systemstack(starttheworld)
gp.m.locks--
}
@@ -95,14 +100,15 @@ func writeHeapDump(fd uintptr) {
semacquire(&worldsema, false)
gp := getg()
gp.m.gcing = 1
- onM(stoptheworld)
+ systemstack(stoptheworld)
- gp.m.scalararg[0] = fd
- onM(writeheapdump_m)
+ systemstack(func() {
+ writeheapdump_m(fd)
+ })
gp.m.gcing = 0
gp.m.locks++
semrelease(&worldsema)
- onM(starttheworld)
+ systemstack(starttheworld)
gp.m.locks--
}
diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go
new file mode 100644
index 000000000..4bd40a39f
--- /dev/null
+++ b/src/runtime/mem_bsd.go
@@ -0,0 +1,88 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly freebsd netbsd openbsd solaris
+
+package runtime
+
+import "unsafe"
+
+//go:nosplit
+func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
+ v := unsafe.Pointer(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
+ if uintptr(v) < 4096 {
+ return nil
+ }
+ xadd64(stat, int64(n))
+ return v
+}
+
+func sysUnused(v unsafe.Pointer, n uintptr) {
+ madvise(v, n, _MADV_FREE)
+}
+
+func sysUsed(v unsafe.Pointer, n uintptr) {
+}
+
+func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
+ xadd64(stat, -int64(n))
+ munmap(v, n)
+}
+
+func sysFault(v unsafe.Pointer, n uintptr) {
+ mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
+}
+
+func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
+ // On 64-bit, people with ulimit -v set complain if we reserve too
+ // much address space. Instead, assume that the reservation is okay
+ // and check the assumption in SysMap.
+ if ptrSize == 8 && uint64(n) > 1<<32 {
+ *reserved = false
+ return v
+ }
+
+ p := unsafe.Pointer(mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
+ if uintptr(p) < 4096 {
+ return nil
+ }
+ *reserved = true
+ return p
+}
+
+func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
+ const _ENOMEM = 12
+
+ xadd64(stat, int64(n))
+
+ // On 64-bit, we don't actually have v reserved, so tread carefully.
+ if !reserved {
+ flags := int32(_MAP_ANON | _MAP_PRIVATE)
+ if GOOS == "dragonfly" {
+ // TODO(jsing): For some reason DragonFly seems to return
+ // memory at a different address than we requested, even when
+ // there should be no reason for it to do so. This can be
+ // avoided by using MAP_FIXED, but I'm not sure we should need
+ // to do this - we do not on other platforms.
+ flags |= _MAP_FIXED
+ }
+ p := mmap(v, n, _PROT_READ|_PROT_WRITE, flags, -1, 0)
+ if uintptr(p) == _ENOMEM {
+ gothrow("runtime: out of memory")
+ }
+ if p != v {
+ print("runtime: address space conflict: map(", v, ") = ", p, "\n")
+ gothrow("runtime: address space conflict")
+ }
+ return
+ }
+
+ p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
+ if uintptr(p) == _ENOMEM {
+ gothrow("runtime: out of memory")
+ }
+ if p != v {
+ gothrow("runtime: cannot map pages in arena address space")
+ }
+}
diff --git a/src/runtime/mem_darwin.c b/src/runtime/mem_darwin.c
deleted file mode 100644
index bf3ede577..000000000
--- a/src/runtime/mem_darwin.c
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "malloc.h"
-#include "textflag.h"
-
-#pragma textflag NOSPLIT
-void*
-runtime·sysAlloc(uintptr n, uint64 *stat)
-{
- void *v;
-
- v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(v < (void*)4096)
- return nil;
- runtime·xadd64(stat, n);
- return v;
-}
-
-void
-runtime·SysUnused(void *v, uintptr n)
-{
- // Linux's MADV_DONTNEED is like BSD's MADV_FREE.
- runtime·madvise(v, n, MADV_FREE);
-}
-
-void
-runtime·SysUsed(void *v, uintptr n)
-{
- USED(v);
- USED(n);
-}
-
-void
-runtime·SysFree(void *v, uintptr n, uint64 *stat)
-{
- runtime·xadd64(stat, -(uint64)n);
- runtime·munmap(v, n);
-}
-
-void
-runtime·SysFault(void *v, uintptr n)
-{
- runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
-}
-
-void*
-runtime·SysReserve(void *v, uintptr n, bool *reserved)
-{
- void *p;
-
- *reserved = true;
- p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p < (void*)4096)
- return nil;
- return p;
-}
-
-enum
-{
- ENOMEM = 12,
-};
-
-void
-runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
-{
- void *p;
-
- USED(reserved);
-
- runtime·xadd64(stat, n);
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v)
- runtime·throw("runtime: cannot map pages in arena address space");
-}
diff --git a/src/runtime/mem_darwin.go b/src/runtime/mem_darwin.go
new file mode 100644
index 000000000..1bee933d0
--- /dev/null
+++ b/src/runtime/mem_darwin.go
@@ -0,0 +1,58 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+//go:nosplit
+func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
+ v := (unsafe.Pointer)(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
+ if uintptr(v) < 4096 {
+ return nil
+ }
+ xadd64(stat, int64(n))
+ return v
+}
+
+func sysUnused(v unsafe.Pointer, n uintptr) {
+ // Linux's MADV_DONTNEED is like BSD's MADV_FREE.
+ madvise(v, n, _MADV_FREE)
+}
+
+func sysUsed(v unsafe.Pointer, n uintptr) {
+}
+
+func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
+ xadd64(stat, -int64(n))
+ munmap(v, n)
+}
+
+func sysFault(v unsafe.Pointer, n uintptr) {
+ mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
+}
+
+func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
+ *reserved = true
+ p := (unsafe.Pointer)(mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
+ if uintptr(p) < 4096 {
+ return nil
+ }
+ return p
+}
+
+const (
+ _ENOMEM = 12
+)
+
+func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
+ xadd64(stat, int64(n))
+ p := (unsafe.Pointer)(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
+ if uintptr(p) == _ENOMEM {
+ gothrow("runtime: out of memory")
+ }
+ if p != v {
+ gothrow("runtime: cannot map pages in arena address space")
+ }
+}
diff --git a/src/runtime/mem_dragonfly.c b/src/runtime/mem_dragonfly.c
deleted file mode 100644
index 11457b2c0..000000000
--- a/src/runtime/mem_dragonfly.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "malloc.h"
-#include "textflag.h"
-
-enum
-{
- ENOMEM = 12,
-};
-
-#pragma textflag NOSPLIT
-void*
-runtime·sysAlloc(uintptr n, uint64 *stat)
-{
- void *v;
-
- v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(v < (void*)4096)
- return nil;
- runtime·xadd64(stat, n);
- return v;
-}
-
-void
-runtime·SysUnused(void *v, uintptr n)
-{
- runtime·madvise(v, n, MADV_FREE);
-}
-
-void
-runtime·SysUsed(void *v, uintptr n)
-{
- USED(v);
- USED(n);
-}
-
-void
-runtime·SysFree(void *v, uintptr n, uint64 *stat)
-{
- runtime·xadd64(stat, -(uint64)n);
- runtime·munmap(v, n);
-}
-
-void
-runtime·SysFault(void *v, uintptr n)
-{
- runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
-}
-
-void*
-runtime·SysReserve(void *v, uintptr n, bool *reserved)
-{
- void *p;
-
- // On 64-bit, people with ulimit -v set complain if we reserve too
- // much address space. Instead, assume that the reservation is okay
- // and check the assumption in SysMap.
- if(sizeof(void*) == 8 && n > 1LL<<32) {
- *reserved = false;
- return v;
- }
-
- *reserved = true;
- p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p < (void*)4096)
- return nil;
- return p;
-}
-
-void
-runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
-{
- void *p;
-
- runtime·xadd64(stat, n);
-
- // On 64-bit, we don't actually have v reserved, so tread carefully.
- if(!reserved) {
- // TODO(jsing): For some reason DragonFly seems to return
- // memory at a different address than we requested, even when
- // there should be no reason for it to do so. This can be
- // avoided by using MAP_FIXED, but I'm not sure we should need
- // to do this - we do not on other platforms.
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v) {
- runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
- runtime·throw("runtime: address space conflict");
- }
- return;
- }
-
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v)
- runtime·throw("runtime: cannot map pages in arena address space");
-}
diff --git a/src/runtime/mem_freebsd.c b/src/runtime/mem_freebsd.c
deleted file mode 100644
index 18a9a2f5b..000000000
--- a/src/runtime/mem_freebsd.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "malloc.h"
-#include "textflag.h"
-
-enum
-{
- ENOMEM = 12,
-};
-
-#pragma textflag NOSPLIT
-void*
-runtime·sysAlloc(uintptr n, uint64 *stat)
-{
- void *v;
-
- v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(v < (void*)4096)
- return nil;
- runtime·xadd64(stat, n);
- return v;
-}
-
-void
-runtime·SysUnused(void *v, uintptr n)
-{
- runtime·madvise(v, n, MADV_FREE);
-}
-
-void
-runtime·SysUsed(void *v, uintptr n)
-{
- USED(v);
- USED(n);
-}
-
-void
-runtime·SysFree(void *v, uintptr n, uint64 *stat)
-{
- runtime·xadd64(stat, -(uint64)n);
- runtime·munmap(v, n);
-}
-
-void
-runtime·SysFault(void *v, uintptr n)
-{
- runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
-}
-
-void*
-runtime·SysReserve(void *v, uintptr n, bool *reserved)
-{
- void *p;
-
- // On 64-bit, people with ulimit -v set complain if we reserve too
- // much address space. Instead, assume that the reservation is okay
- // and check the assumption in SysMap.
- if(sizeof(void*) == 8 && n > 1LL<<32) {
- *reserved = false;
- return v;
- }
-
- *reserved = true;
- p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p < (void*)4096)
- return nil;
- return p;
-}
-
-void
-runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
-{
- void *p;
-
- runtime·xadd64(stat, n);
-
- // On 64-bit, we don't actually have v reserved, so tread carefully.
- if(!reserved) {
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v) {
- runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
- runtime·throw("runtime: address space conflict");
- }
- return;
- }
-
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v)
- runtime·throw("runtime: cannot map pages in arena address space");
-}
diff --git a/src/runtime/mem_linux.c b/src/runtime/mem_linux.c
deleted file mode 100644
index 52e02b34e..000000000
--- a/src/runtime/mem_linux.c
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "malloc.h"
-#include "textflag.h"
-
-enum
-{
- _PAGE_SIZE = PhysPageSize,
- EACCES = 13,
-};
-
-static int32
-addrspace_free(void *v, uintptr n)
-{
- int32 errval;
- uintptr chunk;
- uintptr off;
-
- // NOTE: vec must be just 1 byte long here.
- // Mincore returns ENOMEM if any of the pages are unmapped,
- // but we want to know that all of the pages are unmapped.
- // To make these the same, we can only ask about one page
- // at a time. See golang.org/issue/7476.
- static byte vec[1];
-
- for(off = 0; off < n; off += chunk) {
- chunk = _PAGE_SIZE * sizeof vec;
- if(chunk > (n - off))
- chunk = n - off;
- errval = runtime·mincore((int8*)v + off, chunk, vec);
- // ENOMEM means unmapped, which is what we want.
- // Anything else we assume means the pages are mapped.
- if (errval != -ENOMEM && errval != ENOMEM) {
- return 0;
- }
- }
- return 1;
-}
-
-static void *
-mmap_fixed(byte *v, uintptr n, int32 prot, int32 flags, int32 fd, uint32 offset)
-{
- void *p;
-
- p = runtime·mmap(v, n, prot, flags, fd, offset);
- if(p != v) {
- if(p > (void*)4096) {
- runtime·munmap(p, n);
- p = nil;
- }
- // On some systems, mmap ignores v without
- // MAP_FIXED, so retry if the address space is free.
- if(addrspace_free(v, n))
- p = runtime·mmap(v, n, prot, flags|MAP_FIXED, fd, offset);
- }
- return p;
-}
-
-#pragma textflag NOSPLIT
-void*
-runtime·sysAlloc(uintptr n, uint64 *stat)
-{
- void *p;
-
- p = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p < (void*)4096) {
- if(p == (void*)EACCES) {
- runtime·printf("runtime: mmap: access denied\n");
- runtime·printf("if you're running SELinux, enable execmem for this process.\n");
- runtime·exit(2);
- }
- if(p == (void*)EAGAIN) {
- runtime·printf("runtime: mmap: too much locked memory (check 'ulimit -l').\n");
- runtime·exit(2);
- }
- return nil;
- }
- runtime·xadd64(stat, n);
- return p;
-}
-
-void
-runtime·SysUnused(void *v, uintptr n)
-{
- runtime·madvise(v, n, MADV_DONTNEED);
-}
-
-void
-runtime·SysUsed(void *v, uintptr n)
-{
- USED(v);
- USED(n);
-}
-
-void
-runtime·SysFree(void *v, uintptr n, uint64 *stat)
-{
- runtime·xadd64(stat, -(uint64)n);
- runtime·munmap(v, n);
-}
-
-void
-runtime·SysFault(void *v, uintptr n)
-{
- runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
-}
-
-void*
-runtime·SysReserve(void *v, uintptr n, bool *reserved)
-{
- void *p;
-
- // On 64-bit, people with ulimit -v set complain if we reserve too
- // much address space. Instead, assume that the reservation is okay
- // if we can reserve at least 64K and check the assumption in SysMap.
- // Only user-mode Linux (UML) rejects these requests.
- if(sizeof(void*) == 8 && n > 1LL<<32) {
- p = mmap_fixed(v, 64<<10, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if (p != v) {
- if(p >= (void*)4096)
- runtime·munmap(p, 64<<10);
- return nil;
- }
- runtime·munmap(p, 64<<10);
- *reserved = false;
- return v;
- }
-
- p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if((uintptr)p < 4096)
- return nil;
- *reserved = true;
- return p;
-}
-
-void
-runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
-{
- void *p;
-
- runtime·xadd64(stat, n);
-
- // On 64-bit, we don't actually have v reserved, so tread carefully.
- if(!reserved) {
- p = mmap_fixed(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v) {
- runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
- runtime·throw("runtime: address space conflict");
- }
- return;
- }
-
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v)
- runtime·throw("runtime: cannot map pages in arena address space");
-}
diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go
new file mode 100644
index 000000000..85b55ef49
--- /dev/null
+++ b/src/runtime/mem_linux.go
@@ -0,0 +1,135 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+const (
+ _PAGE_SIZE = _PhysPageSize
+ _EACCES = 13
+)
+
+// NOTE: vec must be just 1 byte long here.
+// Mincore returns ENOMEM if any of the pages are unmapped,
+// but we want to know that all of the pages are unmapped.
+// To make these the same, we can only ask about one page
+// at a time. See golang.org/issue/7476.
+var addrspace_vec [1]byte
+
+func addrspace_free(v unsafe.Pointer, n uintptr) bool {
+ var chunk uintptr
+ for off := uintptr(0); off < n; off += chunk {
+ chunk = _PAGE_SIZE * uintptr(len(addrspace_vec))
+ if chunk > (n - off) {
+ chunk = n - off
+ }
+ errval := mincore(unsafe.Pointer(uintptr(v)+off), chunk, &addrspace_vec[0])
+ // ENOMEM means unmapped, which is what we want.
+ // Anything else we assume means the pages are mapped.
+ if errval != -_ENOMEM {
+ return false
+ }
+ }
+ return true
+}
+
+func mmap_fixed(v unsafe.Pointer, n uintptr, prot, flags, fd int32, offset uint32) unsafe.Pointer {
+ p := mmap(v, n, prot, flags, fd, offset)
+ // On some systems, mmap ignores v without
+ // MAP_FIXED, so retry if the address space is free.
+ if p != v && addrspace_free(v, n) {
+ if uintptr(p) > 4096 {
+ munmap(p, n)
+ }
+ p = mmap(v, n, prot, flags|_MAP_FIXED, fd, offset)
+ }
+ return p
+}
+
+//go:nosplit
+func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
+ p := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
+ if uintptr(p) < 4096 {
+ if uintptr(p) == _EACCES {
+ print("runtime: mmap: access denied\n")
+ print("if you're running SELinux, enable execmem for this process.\n")
+ exit(2)
+ }
+ if uintptr(p) == _EAGAIN {
+ print("runtime: mmap: too much locked memory (check 'ulimit -l').\n")
+ exit(2)
+ }
+ return nil
+ }
+ xadd64(stat, int64(n))
+ return p
+}
+
+func sysUnused(v unsafe.Pointer, n uintptr) {
+ madvise(v, n, _MADV_DONTNEED)
+}
+
+func sysUsed(v unsafe.Pointer, n uintptr) {
+}
+
+func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
+ xadd64(stat, -int64(n))
+ munmap(v, n)
+}
+
+func sysFault(v unsafe.Pointer, n uintptr) {
+ mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
+}
+
+func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
+ // On 64-bit, people with ulimit -v set complain if we reserve too
+ // much address space. Instead, assume that the reservation is okay
+ // if we can reserve at least 64K and check the assumption in SysMap.
+ // Only user-mode Linux (UML) rejects these requests.
+ if ptrSize == 7 && uint64(n) > 1<<32 {
+ p := mmap_fixed(v, 64<<10, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
+ if p != v {
+ if uintptr(p) >= 4096 {
+ munmap(p, 64<<10)
+ }
+ return nil
+ }
+ munmap(p, 64<<10)
+ *reserved = false
+ return v
+ }
+
+ p := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
+ if uintptr(p) < 4096 {
+ return nil
+ }
+ *reserved = true
+ return p
+}
+
+func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
+ xadd64(stat, int64(n))
+
+ // On 64-bit, we don't actually have v reserved, so tread carefully.
+ if !reserved {
+ p := mmap_fixed(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
+ if uintptr(p) == _ENOMEM {
+ gothrow("runtime: out of memory")
+ }
+ if p != v {
+ print("runtime: address space conflict: map(", v, ") = ", p, "\n")
+ gothrow("runtime: address space conflict")
+ }
+ return
+ }
+
+ p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
+ if uintptr(p) == _ENOMEM {
+ gothrow("runtime: out of memory")
+ }
+ if p != v {
+ gothrow("runtime: cannot map pages in arena address space")
+ }
+}
diff --git a/src/runtime/mem_netbsd.c b/src/runtime/mem_netbsd.c
deleted file mode 100644
index 31820e517..000000000
--- a/src/runtime/mem_netbsd.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "malloc.h"
-#include "textflag.h"
-
-enum
-{
- ENOMEM = 12,
-};
-
-#pragma textflag NOSPLIT
-void*
-runtime·sysAlloc(uintptr n, uint64 *stat)
-{
- void *v;
-
- v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(v < (void*)4096)
- return nil;
- runtime·xadd64(stat, n);
- return v;
-}
-
-void
-runtime·SysUnused(void *v, uintptr n)
-{
- runtime·madvise(v, n, MADV_FREE);
-}
-
-void
-runtime·SysUsed(void *v, uintptr n)
-{
- USED(v);
- USED(n);
-}
-
-void
-runtime·SysFree(void *v, uintptr n, uint64 *stat)
-{
- runtime·xadd64(stat, -(uint64)n);
- runtime·munmap(v, n);
-}
-
-void
-runtime·SysFault(void *v, uintptr n)
-{
- runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
-}
-
-void*
-runtime·SysReserve(void *v, uintptr n, bool *reserved)
-{
- void *p;
-
- // On 64-bit, people with ulimit -v set complain if we reserve too
- // much address space. Instead, assume that the reservation is okay
- // and check the assumption in SysMap.
- if(sizeof(void*) == 8 && n > 1LL<<32) {
- *reserved = false;
- return v;
- }
-
- p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p < (void*)4096)
- return nil;
- *reserved = true;
- return p;
-}
-
-void
-runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
-{
- void *p;
-
- runtime·xadd64(stat, n);
-
- // On 64-bit, we don't actually have v reserved, so tread carefully.
- if(!reserved) {
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v) {
- runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
- runtime·throw("runtime: address space conflict");
- }
- return;
- }
-
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v)
- runtime·throw("runtime: cannot map pages in arena address space");
-}
diff --git a/src/runtime/mem_openbsd.c b/src/runtime/mem_openbsd.c
deleted file mode 100644
index 31820e517..000000000
--- a/src/runtime/mem_openbsd.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "malloc.h"
-#include "textflag.h"
-
-enum
-{
- ENOMEM = 12,
-};
-
-#pragma textflag NOSPLIT
-void*
-runtime·sysAlloc(uintptr n, uint64 *stat)
-{
- void *v;
-
- v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(v < (void*)4096)
- return nil;
- runtime·xadd64(stat, n);
- return v;
-}
-
-void
-runtime·SysUnused(void *v, uintptr n)
-{
- runtime·madvise(v, n, MADV_FREE);
-}
-
-void
-runtime·SysUsed(void *v, uintptr n)
-{
- USED(v);
- USED(n);
-}
-
-void
-runtime·SysFree(void *v, uintptr n, uint64 *stat)
-{
- runtime·xadd64(stat, -(uint64)n);
- runtime·munmap(v, n);
-}
-
-void
-runtime·SysFault(void *v, uintptr n)
-{
- runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
-}
-
-void*
-runtime·SysReserve(void *v, uintptr n, bool *reserved)
-{
- void *p;
-
- // On 64-bit, people with ulimit -v set complain if we reserve too
- // much address space. Instead, assume that the reservation is okay
- // and check the assumption in SysMap.
- if(sizeof(void*) == 8 && n > 1LL<<32) {
- *reserved = false;
- return v;
- }
-
- p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p < (void*)4096)
- return nil;
- *reserved = true;
- return p;
-}
-
-void
-runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
-{
- void *p;
-
- runtime·xadd64(stat, n);
-
- // On 64-bit, we don't actually have v reserved, so tread carefully.
- if(!reserved) {
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v) {
- runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
- runtime·throw("runtime: address space conflict");
- }
- return;
- }
-
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v)
- runtime·throw("runtime: cannot map pages in arena address space");
-}
diff --git a/src/runtime/mem_solaris.c b/src/runtime/mem_solaris.c
deleted file mode 100644
index 8e90ba1d9..000000000
--- a/src/runtime/mem_solaris.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "malloc.h"
-#include "textflag.h"
-
-enum
-{
- ENOMEM = 12,
-};
-
-#pragma textflag NOSPLIT
-void*
-runtime·sysAlloc(uintptr n, uint64 *stat)
-{
- void *v;
-
- v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(v < (void*)4096)
- return nil;
- runtime·xadd64(stat, n);
- return v;
-}
-
-void
-runtime·SysUnused(void *v, uintptr n)
-{
- USED(v);
- USED(n);
-}
-
-void
-runtime·SysUsed(void *v, uintptr n)
-{
- USED(v);
- USED(n);
-}
-
-void
-runtime·SysFree(void *v, uintptr n, uint64 *stat)
-{
- runtime·xadd64(stat, -(uint64)n);
- runtime·munmap(v, n);
-}
-
-void
-runtime·SysFault(void *v, uintptr n)
-{
- runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
-}
-
-void*
-runtime·SysReserve(void *v, uintptr n, bool *reserved)
-{
- void *p;
-
- // On 64-bit, people with ulimit -v set complain if we reserve too
- // much address space. Instead, assume that the reservation is okay
- // and check the assumption in SysMap.
- if(sizeof(void*) == 8 && n > 1LL<<32) {
- *reserved = false;
- return v;
- }
-
- p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p < (void*)4096)
- return nil;
- *reserved = true;
- return p;
-}
-
-void
-runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
-{
- void *p;
-
- runtime·xadd64(stat, n);
-
- // On 64-bit, we don't actually have v reserved, so tread carefully.
- if(!reserved) {
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v) {
- runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
- runtime·throw("runtime: address space conflict");
- }
- return;
- }
-
- p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
- if(p == (void*)ENOMEM)
- runtime·throw("runtime: out of memory");
- if(p != v)
- runtime·throw("runtime: cannot map pages in arena address space");
-}
diff --git a/src/runtime/mem_windows.c b/src/runtime/mem_windows.c
deleted file mode 100644
index 6ea992020..000000000
--- a/src/runtime/mem_windows.c
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "os_GOOS.h"
-#include "defs_GOOS_GOARCH.h"
-#include "malloc.h"
-#include "textflag.h"
-
-enum {
- MEM_COMMIT = 0x1000,
- MEM_RESERVE = 0x2000,
- MEM_DECOMMIT = 0x4000,
- MEM_RELEASE = 0x8000,
-
- PAGE_READWRITE = 0x0004,
- PAGE_NOACCESS = 0x0001,
-};
-
-#pragma dynimport runtime·VirtualAlloc VirtualAlloc "kernel32.dll"
-#pragma dynimport runtime·VirtualFree VirtualFree "kernel32.dll"
-#pragma dynimport runtime·VirtualProtect VirtualProtect "kernel32.dll"
-extern void *runtime·VirtualAlloc;
-extern void *runtime·VirtualFree;
-extern void *runtime·VirtualProtect;
-
-#pragma textflag NOSPLIT
-void*
-runtime·sysAlloc(uintptr n, uint64 *stat)
-{
- runtime·xadd64(stat, n);
- return runtime·stdcall4(runtime·VirtualAlloc, 0, n, MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
-}
-
-void
-runtime·SysUnused(void *v, uintptr n)
-{
- void *r;
- uintptr small;
-
- r = runtime·stdcall3(runtime·VirtualFree, (uintptr)v, n, MEM_DECOMMIT);
- if(r != nil)
- return;
-
- // Decommit failed. Usual reason is that we've merged memory from two different
- // VirtualAlloc calls, and Windows will only let each VirtualFree handle pages from
- // a single VirtualAlloc. It is okay to specify a subset of the pages from a single alloc,
- // just not pages from multiple allocs. This is a rare case, arising only when we're
- // trying to give memory back to the operating system, which happens on a time
- // scale of minutes. It doesn't have to be terribly fast. Instead of extra bookkeeping
- // on all our VirtualAlloc calls, try freeing successively smaller pieces until
- // we manage to free something, and then repeat. This ends up being O(n log n)
- // in the worst case, but that's fast enough.
- while(n > 0) {
- small = n;
- while(small >= 4096 && runtime·stdcall3(runtime·VirtualFree, (uintptr)v, small, MEM_DECOMMIT) == nil)
- small = (small / 2) & ~(4096-1);
- if(small < 4096)
- runtime·throw("runtime: failed to decommit pages");
- v = (byte*)v + small;
- n -= small;
- }
-}
-
-void
-runtime·SysUsed(void *v, uintptr n)
-{
- void *r;
- uintptr small;
-
- r = runtime·stdcall4(runtime·VirtualAlloc, (uintptr)v, n, MEM_COMMIT, PAGE_READWRITE);
- if(r != v)
- runtime·throw("runtime: failed to commit pages");
-
- // Commit failed. See SysUnused.
- while(n > 0) {
- small = n;
- while(small >= 4096 && runtime·stdcall4(runtime·VirtualAlloc, (uintptr)v, small, MEM_COMMIT, PAGE_READWRITE) == nil)
- small = (small / 2) & ~(4096-1);
- if(small < 4096)
- runtime·throw("runtime: failed to decommit pages");
- v = (byte*)v + small;
- n -= small;
- }
-}
-
-void
-runtime·SysFree(void *v, uintptr n, uint64 *stat)
-{
- uintptr r;
-
- runtime·xadd64(stat, -(uint64)n);
- r = (uintptr)runtime·stdcall3(runtime·VirtualFree, (uintptr)v, 0, MEM_RELEASE);
- if(r == 0)
- runtime·throw("runtime: failed to release pages");
-}
-
-void
-runtime·SysFault(void *v, uintptr n)
-{
- // SysUnused makes the memory inaccessible and prevents its reuse
- runtime·SysUnused(v, n);
-}
-
-void*
-runtime·SysReserve(void *v, uintptr n, bool *reserved)
-{
- *reserved = true;
- // v is just a hint.
- // First try at v.
- v = runtime·stdcall4(runtime·VirtualAlloc, (uintptr)v, n, MEM_RESERVE, PAGE_READWRITE);
- if(v != nil)
- return v;
-
- // Next let the kernel choose the address.
- return runtime·stdcall4(runtime·VirtualAlloc, 0, n, MEM_RESERVE, PAGE_READWRITE);
-}
-
-void
-runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
-{
- void *p;
-
- USED(reserved);
-
- runtime·xadd64(stat, n);
- p = runtime·stdcall4(runtime·VirtualAlloc, (uintptr)v, n, MEM_COMMIT, PAGE_READWRITE);
- if(p != v)
- runtime·throw("runtime: cannot map pages in arena address space");
-}
diff --git a/src/runtime/mem_windows.go b/src/runtime/mem_windows.go
new file mode 100644
index 000000000..a1dcad013
--- /dev/null
+++ b/src/runtime/mem_windows.go
@@ -0,0 +1,119 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+const (
+ _MEM_COMMIT = 0x1000
+ _MEM_RESERVE = 0x2000
+ _MEM_DECOMMIT = 0x4000
+ _MEM_RELEASE = 0x8000
+
+ _PAGE_READWRITE = 0x0004
+ _PAGE_NOACCESS = 0x0001
+)
+
+//go:cgo_import_dynamic runtime._VirtualAlloc VirtualAlloc "kernel32.dll"
+//go:cgo_import_dynamic runtime._VirtualFree VirtualFree "kernel32.dll"
+//go:cgo_import_dynamic runtime._VirtualProtect VirtualProtect "kernel32.dll"
+
+var (
+ _VirtualAlloc,
+ _VirtualFree,
+ _VirtualProtect stdFunction
+)
+
+//go:nosplit
+func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
+ xadd64(stat, int64(n))
+ return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
+}
+
+func sysUnused(v unsafe.Pointer, n uintptr) {
+ r := stdcall3(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT)
+ if r != 0 {
+ return
+ }
+
+ // Decommit failed. Usual reason is that we've merged memory from two different
+ // VirtualAlloc calls, and Windows will only let each VirtualFree handle pages from
+ // a single VirtualAlloc. It is okay to specify a subset of the pages from a single alloc,
+ // just not pages from multiple allocs. This is a rare case, arising only when we're
+ // trying to give memory back to the operating system, which happens on a time
+ // scale of minutes. It doesn't have to be terribly fast. Instead of extra bookkeeping
+ // on all our VirtualAlloc calls, try freeing successively smaller pieces until
+ // we manage to free something, and then repeat. This ends up being O(n log n)
+ // in the worst case, but that's fast enough.
+ for n > 0 {
+ small := n
+ for small >= 4096 && stdcall3(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 {
+ small /= 2
+ small &^= 4096 - 1
+ }
+ if small < 4096 {
+ gothrow("runtime: failed to decommit pages")
+ }
+ v = add(v, small)
+ n -= small
+ }
+}
+
+func sysUsed(v unsafe.Pointer, n uintptr) {
+ r := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
+ if r != uintptr(v) {
+ gothrow("runtime: failed to commit pages")
+ }
+
+ // Commit failed. See SysUnused.
+ for n > 0 {
+ small := n
+ for small >= 4096 && stdcall4(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 {
+ small /= 2
+ small &^= 4096 - 1
+ }
+ if small < 4096 {
+ gothrow("runtime: failed to decommit pages")
+ }
+ v = add(v, small)
+ n -= small
+ }
+}
+
+func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
+ xadd64(stat, -int64(n))
+ r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
+ if r == 0 {
+ gothrow("runtime: failed to release pages")
+ }
+}
+
+func sysFault(v unsafe.Pointer, n uintptr) {
+ // SysUnused makes the memory inaccessible and prevents its reuse
+ sysUnused(v, n)
+}
+
+func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
+ *reserved = true
+ // v is just a hint.
+ // First try at v.
+ v = unsafe.Pointer(stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE))
+ if v != nil {
+ return v
+ }
+
+ // Next let the kernel choose the address.
+ return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
+}
+
+func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
+ xadd64(stat, int64(n))
+ p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
+ if p != uintptr(v) {
+ gothrow("runtime: cannot map pages in arena address space")
+ }
+}
diff --git a/src/runtime/mfixalloc.c b/src/runtime/mfixalloc.c
deleted file mode 100644
index d670629da..000000000
--- a/src/runtime/mfixalloc.c
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Fixed-size object allocator. Returned memory is not zeroed.
-//
-// See malloc.h for overview.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-
-// Initialize f to allocate objects of the given size,
-// using the allocator to obtain chunks of memory.
-void
-runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg, uint64 *stat)
-{
- f->size = size;
- f->first = first;
- f->arg = arg;
- f->list = nil;
- f->chunk = nil;
- f->nchunk = 0;
- f->inuse = 0;
- f->stat = stat;
-}
-
-void*
-runtime·FixAlloc_Alloc(FixAlloc *f)
-{
- void *v;
-
- if(f->size == 0) {
- runtime·printf("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n");
- runtime·throw("runtime: internal error");
- }
-
- if(f->list) {
- v = f->list;
- f->list = *(void**)f->list;
- f->inuse += f->size;
- return v;
- }
- if(f->nchunk < f->size) {
- f->chunk = runtime·persistentalloc(FixAllocChunk, 0, f->stat);
- f->nchunk = FixAllocChunk;
- }
- v = f->chunk;
- if(f->first)
- f->first(f->arg, v);
- f->chunk += f->size;
- f->nchunk -= f->size;
- f->inuse += f->size;
- return v;
-}
-
-void
-runtime·FixAlloc_Free(FixAlloc *f, void *p)
-{
- f->inuse -= f->size;
- *(void**)p = f->list;
- f->list = p;
-}
-
diff --git a/src/runtime/mfixalloc.go b/src/runtime/mfixalloc.go
new file mode 100644
index 000000000..b66a17e41
--- /dev/null
+++ b/src/runtime/mfixalloc.go
@@ -0,0 +1,59 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fixed-size object allocator. Returned memory is not zeroed.
+//
+// See malloc.h for overview.
+
+package runtime
+
+import "unsafe"
+
+// Initialize f to allocate objects of the given size,
+// using the allocator to obtain chunks of memory.
+func fixAlloc_Init(f *fixalloc, size uintptr, first func(unsafe.Pointer, unsafe.Pointer), arg unsafe.Pointer, stat *uint64) {
+ f.size = size
+ f.first = *(*unsafe.Pointer)(unsafe.Pointer(&first))
+ f.arg = arg
+ f.list = nil
+ f.chunk = nil
+ f.nchunk = 0
+ f.inuse = 0
+ f.stat = stat
+}
+
+func fixAlloc_Alloc(f *fixalloc) unsafe.Pointer {
+ if f.size == 0 {
+ print("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n")
+ gothrow("runtime: internal error")
+ }
+
+ if f.list != nil {
+ v := unsafe.Pointer(f.list)
+ f.list = f.list.next
+ f.inuse += f.size
+ return v
+ }
+ if uintptr(f.nchunk) < f.size {
+ f.chunk = (*uint8)(persistentalloc(_FixAllocChunk, 0, f.stat))
+ f.nchunk = _FixAllocChunk
+ }
+
+ v := (unsafe.Pointer)(f.chunk)
+ if f.first != nil {
+ fn := *(*func(unsafe.Pointer, unsafe.Pointer))(unsafe.Pointer(&f.first))
+ fn(f.arg, v)
+ }
+ f.chunk = (*byte)(add(unsafe.Pointer(f.chunk), f.size))
+ f.nchunk -= uint32(f.size)
+ f.inuse += f.size
+ return v
+}
+
+func fixAlloc_Free(f *fixalloc, p unsafe.Pointer) {
+ f.inuse -= f.size
+ v := (*mlink)(p)
+ v.next = f.list
+ f.list = v
+}
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
new file mode 100644
index 000000000..57bd8b356
--- /dev/null
+++ b/src/runtime/mgc.go
@@ -0,0 +1,2422 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(rsc): The code having to do with the heap bitmap needs very serious cleanup.
+// It has gotten completely out of control.
+
+// Garbage collector (GC).
+//
+// The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple GC
+// thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
+// non-generational and non-compacting. Allocation is done using size segregated per P allocation
+// areas to minimize fragmentation while eliminating locks in the common case.
+//
+// The algorithm decomposes into several steps.
+// This is a high level description of the algorithm being used. For an overview of GC a good
+// place to start is Richard Jones' gchandbook.org.
+//
+// The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
+// Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
+// On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), 966-975.
+// For journal quality proofs that these steps are complete, correct, and terminate see
+// Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
+// Concurrency and Computation: Practice and Experience 15(3-5), 2003.
+//
+// 0. Set phase = GCscan from GCoff.
+// 1. Wait for all P's to acknowledge phase change.
+// At this point all goroutines have passed through a GC safepoint and
+// know we are in the GCscan phase.
+// 2. GC scans all goroutine stacks, mark and enqueues all encountered pointers
+// (marking avoids most duplicate enqueuing but races may produce duplication which is benign).
+// Preempted goroutines are scanned before P schedules next goroutine.
+// 3. Set phase = GCmark.
+// 4. Wait for all P's to acknowledge phase change.
+// 5. Now write barrier marks and enqueues black, grey, or white to white pointers.
+// Malloc still allocates white (non-marked) objects.
+// 6. Meanwhile GC transitively walks the heap marking reachable objects.
+// 7. When GC finishes marking heap, it preempts P's one-by-one and
+// retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine
+// currently scheduled on the P).
+// 8. Once the GC has exhausted all available marking work it sets phase = marktermination.
+// 9. Wait for all P's to acknowledge phase change.
+// 10. Malloc now allocates black objects, so number of unmarked reachable objects
+// monotonically decreases.
+// 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet reachable objects.
+// 12. When GC completes a full cycle over P's and discovers no new grey
+// objects, (which means all reachable objects are marked) set phase = GCsweep.
+// 13. Wait for all P's to acknowledge phase change.
+// 14. Now malloc allocates white (but sweeps spans before use).
+// Write barrier becomes nop.
+// 15. GC does background sweeping, see description below.
+// 16. When sweeping is complete set phase to GCoff.
+// 17. When sufficient allocation has taken place replay the sequence starting at 0 above,
+// see discussion of GC rate below.
+
+// Changing phases.
+// Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase.
+// All phase action must be benign in the presence of a change.
+// Starting with GCoff
+// GCoff to GCscan
+// GSscan scans stacks and globals greying them and never marks an object black.
+// Once all the P's are aware of the new phase they will scan gs on preemption.
+// This means that the scanning of preempted gs can't start until all the Ps
+// have acknowledged.
+// GCscan to GCmark
+// GCMark turns on the write barrier which also only greys objects. No scanning
+// of objects (making them black) can happen until all the Ps have acknowledged
+// the phase change.
+// GCmark to GCmarktermination
+// The only change here is that we start allocating black so the Ps must acknowledge
+// the change before we begin the termination algorithm
+// GCmarktermination to GSsweep
+// Object currently on the freelist must be marked black for this to work.
+// Are things on the free lists black or white? How does the sweep phase work?
+
+// Concurrent sweep.
+// The sweep phase proceeds concurrently with normal program execution.
+// The heap is swept span-by-span both lazily (when a goroutine needs another span)
+// and concurrently in a background goroutine (this helps programs that are not CPU bound).
+// However, at the end of the stop-the-world GC phase we don't know the size of the live heap,
+// and so next_gc calculation is tricky and happens as follows.
+// At the end of the stop-the-world phase next_gc is conservatively set based on total
+// heap size; all spans are marked as "needs sweeping".
+// Whenever a span is swept, next_gc is decremented by GOGC*newly_freed_memory.
+// The background sweeper goroutine simply sweeps spans one-by-one bringing next_gc
+// closer to the target value. However, this is not enough to avoid over-allocating memory.
+// Consider that a goroutine wants to allocate a new span for a large object and
+// there are no free swept spans, but there are small-object unswept spans.
+// If the goroutine naively allocates a new span, it can surpass the yet-unknown
+// target next_gc value. In order to prevent such cases (1) when a goroutine needs
+// to allocate a new small-object span, it sweeps small-object spans for the same
+// object size until it frees at least one object; (2) when a goroutine needs to
+// allocate large-object span from heap, it sweeps spans until it frees at least
+// that many pages into heap. Together these two measures ensure that we don't surpass
+// target next_gc value by a large margin. There is an exception: if a goroutine sweeps
+// and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
+// but there can still be other one-page unswept spans which could be combined into a two-page span.
+// It's critical to ensure that no operations proceed on unswept spans (that would corrupt
+// mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
+// so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
+// When a goroutine explicitly frees an object or sets a finalizer, it ensures that
+// the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
+// The finalizer goroutine is kicked off only when all spans are swept.
+// When the next GC starts, it sweeps all not-yet-swept spans (if any).
+
+// GC rate.
+// Next GC is after we've allocated an extra amount of memory proportional to
+// the amount already in use. The proportion is controlled by GOGC environment variable
+// (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
+// (this mark is tracked in next_gc variable). This keeps the GC cost in linear
+// proportion to the allocation cost. Adjusting GOGC just changes the linear constant
+// (and also the amount of extra memory used).
+
+package runtime
+
+import "unsafe"
+
+const (
+ _DebugGC = 0
+ _DebugGCPtrs = false // if true, print trace of every pointer load during GC
+ _ConcurrentSweep = true
+
+ _WorkbufSize = 4 * 1024
+ _FinBlockSize = 4 * 1024
+ _RootData = 0
+ _RootBss = 1
+ _RootFinalizers = 2
+ _RootSpans = 3
+ _RootFlushCaches = 4
+ _RootCount = 5
+)
+
+// ptrmask for an allocation containing a single pointer.
+var oneptr = [...]uint8{bitsPointer}
+
+// Initialized from $GOGC. GOGC=off means no GC.
+var gcpercent int32
+
+// Holding worldsema grants an M the right to try to stop the world.
+// The procedure is:
+//
+// semacquire(&worldsema);
+// m.gcing = 1;
+// stoptheworld();
+//
+// ... do stuff ...
+//
+// m.gcing = 0;
+// semrelease(&worldsema);
+// starttheworld();
+//
+var worldsema uint32 = 1
+
+// It is a bug if bits does not have bitBoundary set but
+// there are still some cases where this happens related
+// to stack spans.
+type markbits struct {
+ bitp *byte // pointer to the byte holding xbits
+ shift uintptr // bits xbits needs to be shifted to get bits
+ xbits byte // byte holding all the bits from *bitp
+ bits byte // mark and boundary bits relevant to corresponding slot.
+ tbits byte // pointer||scalar bits relevant to corresponding slot.
+}
+
+type workbuf struct {
+ node lfnode // must be first
+ nobj uintptr
+ obj [(_WorkbufSize - unsafe.Sizeof(lfnode{}) - ptrSize) / ptrSize]uintptr
+}
+
+var data, edata, bss, ebss, gcdata, gcbss struct{}
+
+var finlock mutex // protects the following variables
+var fing *g // goroutine that runs finalizers
+var finq *finblock // list of finalizers that are to be executed
+var finc *finblock // cache of free blocks
+var finptrmask [_FinBlockSize / ptrSize / pointersPerByte]byte
+var fingwait bool
+var fingwake bool
+var allfin *finblock // list of all blocks
+
+var gcdatamask bitvector
+var gcbssmask bitvector
+
+var gclock mutex
+
+var badblock [1024]uintptr
+var nbadblock int32
+
+type workdata struct {
+ full uint64 // lock-free list of full blocks
+ empty uint64 // lock-free list of empty blocks
+ partial uint64 // lock-free list of partially filled blocks
+ pad0 [_CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
+ nproc uint32
+ tstart int64
+ nwait uint32
+ ndone uint32
+ alldone note
+ markfor *parfor
+
+ // Copy of mheap.allspans for marker or sweeper.
+ spans []*mspan
+}
+
+var work workdata
+
+//go:linkname weak_cgo_allocate go.weak.runtime._cgo_allocate_internal
+var weak_cgo_allocate byte
+
+// Is _cgo_allocate linked into the binary?
+func have_cgo_allocate() bool {
+ return &weak_cgo_allocate != nil
+}
+
+// To help debug the concurrent GC we remark with the world
+// stopped ensuring that any object encountered has their normal
+// mark bit set. To do this we use an orthogonal bit
+// pattern to indicate the object is marked. The following pattern
+// uses the upper two bits in the object's bounday nibble.
+// 01: scalar not marked
+// 10: pointer not marked
+// 11: pointer marked
+// 00: scalar marked
+// Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
+// The higher bit is 1 for pointers and 0 for scalars, whether the object
+// is marked or not.
+// The first nibble no longer holds the bitsDead pattern indicating that the
+// there are no more pointers in the object. This information is held
+// in the second nibble.
+
+// When marking an object if the bool checkmark is true one uses the above
+// encoding, otherwise one uses the bitMarked bit in the lower two bits
+// of the nibble.
+var (
+ checkmark = false
+ gccheckmarkenable = true
+)
+
+// Is address b in the known heap. If it doesn't have a valid gcmap
+// returns false. For example pointers into stacks will return false.
+func inheap(b uintptr) bool {
+ if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
+ return false
+ }
+ // Not a beginning of a block, consult span table to find the block beginning.
+ k := b >> _PageShift
+ x := k
+ x -= mheap_.arena_start >> _PageShift
+ s := h_spans[x]
+ if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse {
+ return false
+ }
+ return true
+}
+
+// Given an address in the heap return the relevant byte from the gcmap. This routine
+// can be used on addresses to the start of an object or to the interior of the an object.
+func slottombits(obj uintptr, mbits *markbits) {
+ off := (obj&^(ptrSize-1) - mheap_.arena_start) / ptrSize
+ mbits.bitp = (*byte)(unsafe.Pointer(mheap_.arena_start - off/wordsPerBitmapByte - 1))
+ mbits.shift = off % wordsPerBitmapByte * gcBits
+ mbits.xbits = *mbits.bitp
+ mbits.bits = (mbits.xbits >> mbits.shift) & bitMask
+ mbits.tbits = ((mbits.xbits >> mbits.shift) & bitPtrMask) >> 2
+}
+
+// b is a pointer into the heap.
+// Find the start of the object refered to by b.
+// Set mbits to the associated bits from the bit map.
+// If b is not a valid heap object return nil and
+// undefined values in mbits.
+func objectstart(b uintptr, mbits *markbits) uintptr {
+ obj := b &^ (ptrSize - 1)
+ for {
+ slottombits(obj, mbits)
+ if mbits.bits&bitBoundary == bitBoundary {
+ break
+ }
+
+ // Not a beginning of a block, consult span table to find the block beginning.
+ k := b >> _PageShift
+ x := k
+ x -= mheap_.arena_start >> _PageShift
+ s := h_spans[x]
+ if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse {
+ if s != nil && s.state == _MSpanStack {
+ return 0 // This is legit.
+ }
+
+ // The following ensures that we are rigorous about what data
+ // structures hold valid pointers
+ if false {
+ // Still happens sometimes. We don't know why.
+ printlock()
+ print("runtime:objectstart Span weird: obj=", hex(obj), " k=", hex(k))
+ if s == nil {
+ print(" s=nil\n")
+ } else {
+ print(" s.start=", hex(s.start<<_PageShift), " s.limit=", hex(s.limit), " s.state=", s.state, "\n")
+ }
+ printunlock()
+ gothrow("objectstart: bad pointer in unexpected span")
+ }
+ return 0
+ }
+
+ p := uintptr(s.start) << _PageShift
+ if s.sizeclass != 0 {
+ size := s.elemsize
+ idx := (obj - p) / size
+ p = p + idx*size
+ }
+ if p == obj {
+ print("runtime: failed to find block beginning for ", hex(p), " s=", hex(s.start*_PageSize), " s.limit=", s.limit, "\n")
+ gothrow("failed to find block beginning")
+ }
+ obj = p
+ }
+
+ // if size(obj.firstfield) < PtrSize, the &obj.secondfield could map to the boundary bit
+ // Clear any low bits to get to the start of the object.
+ // greyobject depends on this.
+ return obj
+}
+
+// Slow for now as we serialize this, since this is on a debug path
+// speed is not critical at this point.
+var andlock mutex
+
+func atomicand8(src *byte, val byte) {
+ lock(&andlock)
+ *src &= val
+ unlock(&andlock)
+}
+
+// Mark using the checkmark scheme.
+func docheckmark(mbits *markbits) {
+ // xor 01 moves 01(scalar unmarked) to 00(scalar marked)
+ // and 10(pointer unmarked) to 11(pointer marked)
+ if mbits.tbits == _BitsScalar {
+ atomicand8(mbits.bitp, ^byte(_BitsCheckMarkXor<<mbits.shift<<2))
+ } else if mbits.tbits == _BitsPointer {
+ atomicor8(mbits.bitp, byte(_BitsCheckMarkXor<<mbits.shift<<2))
+ }
+
+ // reload bits for ischeckmarked
+ mbits.xbits = *mbits.bitp
+ mbits.bits = (mbits.xbits >> mbits.shift) & bitMask
+ mbits.tbits = ((mbits.xbits >> mbits.shift) & bitPtrMask) >> 2
+}
+
+// In the default scheme does mbits refer to a marked object.
+func ismarked(mbits *markbits) bool {
+ if mbits.bits&bitBoundary != bitBoundary {
+ gothrow("ismarked: bits should have boundary bit set")
+ }
+ return mbits.bits&bitMarked == bitMarked
+}
+
+// In the checkmark scheme does mbits refer to a marked object.
+func ischeckmarked(mbits *markbits) bool {
+ if mbits.bits&bitBoundary != bitBoundary {
+ gothrow("ischeckmarked: bits should have boundary bit set")
+ }
+ return mbits.tbits == _BitsScalarMarked || mbits.tbits == _BitsPointerMarked
+}
+
+// When in GCmarkterminate phase we allocate black.
+func gcmarknewobject_m(obj uintptr) {
+ if gcphase != _GCmarktermination {
+ gothrow("marking new object while not in mark termination phase")
+ }
+ if checkmark { // The world should be stopped so this should not happen.
+ gothrow("gcmarknewobject called while doing checkmark")
+ }
+
+ var mbits markbits
+ slottombits(obj, &mbits)
+ if mbits.bits&bitMarked != 0 {
+ return
+ }
+
+ // Each byte of GC bitmap holds info for two words.
+ // If the current object is larger than two words, or if the object is one word
+ // but the object it shares the byte with is already marked,
+ // then all the possible concurrent updates are trying to set the same bit,
+ // so we can use a non-atomic update.
+ if mbits.xbits&(bitMask|(bitMask<<gcBits)) != bitBoundary|bitBoundary<<gcBits || work.nproc == 1 {
+ *mbits.bitp = mbits.xbits | bitMarked<<mbits.shift
+ } else {
+ atomicor8(mbits.bitp, bitMarked<<mbits.shift)
+ }
+}
+
+// obj is the start of an object with mark mbits.
+// If it isn't already marked, mark it and enqueue into workbuf.
+// Return possibly new workbuf to use.
+func greyobject(obj uintptr, mbits *markbits, wbuf *workbuf) *workbuf {
+ // obj should be start of allocation, and so must be at least pointer-aligned.
+ if obj&(ptrSize-1) != 0 {
+ gothrow("greyobject: obj not pointer-aligned")
+ }
+
+ if checkmark {
+ if !ismarked(mbits) {
+ print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), ", mbits->bits=", hex(mbits.bits), " *mbits->bitp=", hex(*mbits.bitp), "\n")
+
+ k := obj >> _PageShift
+ x := k
+ x -= mheap_.arena_start >> _PageShift
+ s := h_spans[x]
+ printlock()
+ print("runtime:greyobject Span: obj=", hex(obj), " k=", hex(k))
+ if s == nil {
+ print(" s=nil\n")
+ } else {
+ print(" s.start=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n")
+ // NOTE(rsc): This code is using s.sizeclass as an approximation of the
+ // number of pointer-sized words in an object. Perhaps not what was intended.
+ for i := 0; i < int(s.sizeclass); i++ {
+ print(" *(obj+", i*ptrSize, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + uintptr(i)*ptrSize))), "\n")
+ }
+ }
+ gothrow("checkmark found unmarked object")
+ }
+ if ischeckmarked(mbits) {
+ return wbuf
+ }
+ docheckmark(mbits)
+ if !ischeckmarked(mbits) {
+ print("mbits xbits=", hex(mbits.xbits), " bits=", hex(mbits.bits), " tbits=", hex(mbits.tbits), " shift=", mbits.shift, "\n")
+ gothrow("docheckmark and ischeckmarked disagree")
+ }
+ } else {
+ // If marked we have nothing to do.
+ if mbits.bits&bitMarked != 0 {
+ return wbuf
+ }
+
+ // Each byte of GC bitmap holds info for two words.
+ // If the current object is larger than two words, or if the object is one word
+ // but the object it shares the byte with is already marked,
+ // then all the possible concurrent updates are trying to set the same bit,
+ // so we can use a non-atomic update.
+ if mbits.xbits&(bitMask|bitMask<<gcBits) != bitBoundary|bitBoundary<<gcBits || work.nproc == 1 {
+ *mbits.bitp = mbits.xbits | bitMarked<<mbits.shift
+ } else {
+ atomicor8(mbits.bitp, bitMarked<<mbits.shift)
+ }
+ }
+
+ if !checkmark && (mbits.xbits>>(mbits.shift+2))&_BitsMask == _BitsDead {
+ return wbuf // noscan object
+ }
+
+ // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
+ // seems like a nice optimization that can be added back in.
+ // There needs to be time between the PREFETCH and the use.
+ // Previously we put the obj in an 8 element buffer that is drained at a rate
+ // to give the PREFETCH time to do its work.
+ // Use of PREFETCHNTA might be more appropriate than PREFETCH
+
+ // If workbuf is full, obtain an empty one.
+ if wbuf.nobj >= uintptr(len(wbuf.obj)) {
+ wbuf = getempty(wbuf)
+ }
+
+ wbuf.obj[wbuf.nobj] = obj
+ wbuf.nobj++
+ return wbuf
+}
+
+// Scan the object b of size n, adding pointers to wbuf.
+// Return possibly new wbuf to use.
+// If ptrmask != nil, it specifies where pointers are in b.
+// If ptrmask == nil, the GC bitmap should be consulted.
+// In this case, n may be an overestimate of the size; the GC bitmap
+// must also be used to make sure the scan stops at the end of b.
+func scanobject(b, n uintptr, ptrmask *uint8, wbuf *workbuf) *workbuf {
+ arena_start := mheap_.arena_start
+ arena_used := mheap_.arena_used
+
+ // Find bits of the beginning of the object.
+ var ptrbitp unsafe.Pointer
+ var mbits markbits
+ if ptrmask == nil {
+ b = objectstart(b, &mbits)
+ if b == 0 {
+ return wbuf
+ }
+ ptrbitp = unsafe.Pointer(mbits.bitp)
+ }
+ for i := uintptr(0); i < n; i += ptrSize {
+ // Find bits for this word.
+ var bits uintptr
+ if ptrmask != nil {
+ // dense mask (stack or data)
+ bits = (uintptr(*(*byte)(add(unsafe.Pointer(ptrmask), (i/ptrSize)/4))) >> (((i / ptrSize) % 4) * bitsPerPointer)) & bitsMask
+ } else {
+ // Check if we have reached end of span.
+ // n is an overestimate of the size of the object.
+ if (b+i)%_PageSize == 0 && h_spans[(b-arena_start)>>_PageShift] != h_spans[(b+i-arena_start)>>_PageShift] {
+ break
+ }
+
+ // Consult GC bitmap.
+ bits = uintptr(*(*byte)(ptrbitp))
+ if wordsPerBitmapByte != 2 {
+ gothrow("alg doesn't work for wordsPerBitmapByte != 2")
+ }
+ j := (uintptr(b) + i) / ptrSize & 1 // j indicates upper nibble or lower nibble
+ bits >>= gcBits * j
+ if i == 0 {
+ bits &^= bitBoundary
+ }
+ ptrbitp = add(ptrbitp, -j)
+
+ if bits&bitBoundary != 0 && i != 0 {
+ break // reached beginning of the next object
+ }
+ bits = (bits & bitPtrMask) >> 2 // bits refer to the type bits.
+
+ if i != 0 && bits == bitsDead { // BitsDead in first nibble not valid during checkmark
+ break // reached no-scan part of the object
+ }
+ }
+
+ if bits <= _BitsScalar { // _BitsScalar, _BitsDead, _BitsScalarMarked
+ continue
+ }
+
+ if bits&_BitsPointer != _BitsPointer {
+ print("gc checkmark=", checkmark, " b=", hex(b), " ptrmask=", ptrmask, " mbits.bitp=", mbits.bitp, " mbits.xbits=", hex(mbits.xbits), " bits=", hex(bits), "\n")
+ gothrow("unexpected garbage collection bits")
+ }
+
+ obj := *(*uintptr)(unsafe.Pointer(b + i))
+
+ // At this point we have extracted the next potential pointer.
+ // Check if it points into heap.
+ if obj == 0 || obj < arena_start || obj >= arena_used {
+ continue
+ }
+
+ // Mark the object. return some important bits.
+ // We we combine the following two rotines we don't have to pass mbits or obj around.
+ var mbits markbits
+ obj = objectstart(obj, &mbits)
+ if obj == 0 {
+ continue
+ }
+ wbuf = greyobject(obj, &mbits, wbuf)
+ }
+ return wbuf
+}
+
+// scanblock starts by scanning b as scanobject would.
+// If the gcphase is GCscan, that's all scanblock does.
+// Otherwise it traverses some fraction of the pointers it found in b, recursively.
+// As a special case, scanblock(nil, 0, nil) means to scan previously queued work,
+// stopping only when no work is left in the system.
+func scanblock(b, n uintptr, ptrmask *uint8) {
+ wbuf := getpartialorempty()
+ if b != 0 {
+ wbuf = scanobject(b, n, ptrmask, wbuf)
+ if gcphase == _GCscan {
+ if inheap(b) && ptrmask == nil {
+ // b is in heap, we are in GCscan so there should be a ptrmask.
+ gothrow("scanblock: In GCscan phase and inheap is true.")
+ }
+ // GCscan only goes one level deep since mark wb not turned on.
+ putpartial(wbuf)
+ return
+ }
+ }
+ if gcphase == _GCscan {
+ gothrow("scanblock: In GCscan phase but no b passed in.")
+ }
+
+ keepworking := b == 0
+
+ // ptrmask can have 2 possible values:
+ // 1. nil - obtain pointer mask from GC bitmap.
+ // 2. pointer to a compact mask (for stacks and data).
+ for {
+ if wbuf.nobj == 0 {
+ if !keepworking {
+ putempty(wbuf)
+ return
+ }
+ // Refill workbuf from global queue.
+ wbuf = getfull(wbuf)
+ if wbuf == nil { // nil means out of work barrier reached
+ return
+ }
+
+ if wbuf.nobj <= 0 {
+ gothrow("runtime:scanblock getfull returns empty buffer")
+ }
+ }
+
+ // If another proc wants a pointer, give it some.
+ if work.nwait > 0 && wbuf.nobj > 4 && work.full == 0 {
+ wbuf = handoff(wbuf)
+ }
+
+ // This might be a good place to add prefetch code...
+ // if(wbuf->nobj > 4) {
+ // PREFETCH(wbuf->obj[wbuf->nobj - 3];
+ // }
+ wbuf.nobj--
+ b = wbuf.obj[wbuf.nobj]
+ wbuf = scanobject(b, mheap_.arena_used-b, nil, wbuf)
+ }
+}
+
+func markroot(desc *parfor, i uint32) {
+ // Note: if you add a case here, please also update heapdump.c:dumproots.
+ switch i {
+ case _RootData:
+ scanblock(uintptr(unsafe.Pointer(&data)), uintptr(unsafe.Pointer(&edata))-uintptr(unsafe.Pointer(&data)), gcdatamask.bytedata)
+
+ case _RootBss:
+ scanblock(uintptr(unsafe.Pointer(&bss)), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)), gcbssmask.bytedata)
+
+ case _RootFinalizers:
+ for fb := allfin; fb != nil; fb = fb.alllink {
+ scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), uintptr(fb.cnt)*unsafe.Sizeof(fb.fin[0]), &finptrmask[0])
+ }
+
+ case _RootSpans:
+ // mark MSpan.specials
+ sg := mheap_.sweepgen
+ for spanidx := uint32(0); spanidx < uint32(len(work.spans)); spanidx++ {
+ s := work.spans[spanidx]
+ if s.state != mSpanInUse {
+ continue
+ }
+ if !checkmark && s.sweepgen != sg {
+ // sweepgen was updated (+2) during non-checkmark GC pass
+ print("sweep ", s.sweepgen, " ", sg, "\n")
+ gothrow("gc: unswept span")
+ }
+ for sp := s.specials; sp != nil; sp = sp.next {
+ if sp.kind != _KindSpecialFinalizer {
+ continue
+ }
+ // don't mark finalized object, but scan it so we
+ // retain everything it points to.
+ spf := (*specialfinalizer)(unsafe.Pointer(sp))
+ // A finalizer can be set for an inner byte of an object, find object beginning.
+ p := uintptr(s.start<<_PageShift) + uintptr(spf.special.offset)/s.elemsize*s.elemsize
+ if gcphase != _GCscan {
+ scanblock(p, s.elemsize, nil) // scanned during mark phase
+ }
+ scanblock(uintptr(unsafe.Pointer(&spf.fn)), ptrSize, &oneptr[0])
+ }
+ }
+
+ case _RootFlushCaches:
+ if gcphase != _GCscan { // Do not flush mcaches during GCscan phase.
+ flushallmcaches()
+ }
+
+ default:
+ // the rest is scanning goroutine stacks
+ if uintptr(i-_RootCount) >= allglen {
+ gothrow("markroot: bad index")
+ }
+ gp := allgs[i-_RootCount]
+
+ // remember when we've first observed the G blocked
+ // needed only to output in traceback
+ status := readgstatus(gp) // We are not in a scan state
+ if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
+ gp.waitsince = work.tstart
+ }
+
+ // Shrink a stack if not much of it is being used but not in the scan phase.
+ if gcphase != _GCscan { // Do not shrink during GCscan phase.
+ shrinkstack(gp)
+ }
+ if readgstatus(gp) == _Gdead {
+ gp.gcworkdone = true
+ } else {
+ gp.gcworkdone = false
+ }
+ restart := stopg(gp)
+
+ // goroutine will scan its own stack when it stops running.
+ // Wait until it has.
+ for readgstatus(gp) == _Grunning && !gp.gcworkdone {
+ }
+
+ // scanstack(gp) is done as part of gcphasework
+ // But to make sure we finished we need to make sure that
+ // the stack traps have all responded so drop into
+ // this while loop until they respond.
+ for !gp.gcworkdone {
+ status = readgstatus(gp)
+ if status == _Gdead {
+ gp.gcworkdone = true // scan is a noop
+ break
+ }
+ if status == _Gwaiting || status == _Grunnable {
+ restart = stopg(gp)
+ }
+ }
+ if restart {
+ restartg(gp)
+ }
+ }
+}
+
+// Get an empty work buffer off the work.empty list,
+// allocating new buffers as needed.
+func getempty(b *workbuf) *workbuf {
+ if b != nil {
+ putfull(b)
+ b = nil
+ }
+ if work.empty != 0 {
+ b = (*workbuf)(lfstackpop(&work.empty))
+ }
+ if b != nil && b.nobj != 0 {
+ _g_ := getg()
+ print("m", _g_.m.id, ": getempty: popped b=", b, " with non-zero b.nobj=", b.nobj, "\n")
+ gothrow("getempty: workbuffer not empty, b->nobj not 0")
+ }
+ if b == nil {
+ b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), _CacheLineSize, &memstats.gc_sys))
+ b.nobj = 0
+ }
+ return b
+}
+
+func putempty(b *workbuf) {
+ if b.nobj != 0 {
+ gothrow("putempty: b->nobj not 0")
+ }
+ lfstackpush(&work.empty, &b.node)
+}
+
+func putfull(b *workbuf) {
+ if b.nobj <= 0 {
+ gothrow("putfull: b->nobj <= 0")
+ }
+ lfstackpush(&work.full, &b.node)
+}
+
+// Get an partially empty work buffer
+// if none are available get an empty one.
+func getpartialorempty() *workbuf {
+ b := (*workbuf)(lfstackpop(&work.partial))
+ if b == nil {
+ b = getempty(nil)
+ }
+ return b
+}
+
+func putpartial(b *workbuf) {
+ if b.nobj == 0 {
+ lfstackpush(&work.empty, &b.node)
+ } else if b.nobj < uintptr(len(b.obj)) {
+ lfstackpush(&work.partial, &b.node)
+ } else if b.nobj == uintptr(len(b.obj)) {
+ lfstackpush(&work.full, &b.node)
+ } else {
+ print("b=", b, " b.nobj=", b.nobj, " len(b.obj)=", len(b.obj), "\n")
+ gothrow("putpartial: bad Workbuf b.nobj")
+ }
+}
+
+// Get a full work buffer off the work.full or a partially
+// filled one off the work.partial list. If nothing is available
+// wait until all the other gc helpers have finished and then
+// return nil.
+// getfull acts as a barrier for work.nproc helpers. As long as one
+// gchelper is actively marking objects it
+// may create a workbuffer that the other helpers can work on.
+// The for loop either exits when a work buffer is found
+// or when _all_ of the work.nproc GC helpers are in the loop
+// looking for work and thus not capable of creating new work.
+// This is in fact the termination condition for the STW mark
+// phase.
+func getfull(b *workbuf) *workbuf {
+ if b != nil {
+ putempty(b)
+ }
+
+ b = (*workbuf)(lfstackpop(&work.full))
+ if b == nil {
+ b = (*workbuf)(lfstackpop(&work.partial))
+ }
+ if b != nil || work.nproc == 1 {
+ return b
+ }
+
+ xadd(&work.nwait, +1)
+ for i := 0; ; i++ {
+ if work.full != 0 {
+ xadd(&work.nwait, -1)
+ b = (*workbuf)(lfstackpop(&work.full))
+ if b == nil {
+ b = (*workbuf)(lfstackpop(&work.partial))
+ }
+ if b != nil {
+ return b
+ }
+ xadd(&work.nwait, +1)
+ }
+ if work.nwait == work.nproc {
+ return nil
+ }
+ _g_ := getg()
+ if i < 10 {
+ _g_.m.gcstats.nprocyield++
+ procyield(20)
+ } else if i < 20 {
+ _g_.m.gcstats.nosyield++
+ osyield()
+ } else {
+ _g_.m.gcstats.nsleep++
+ usleep(100)
+ }
+ }
+}
+
+func handoff(b *workbuf) *workbuf {
+ // Make new buffer with half of b's pointers.
+ b1 := getempty(nil)
+ n := b.nobj / 2
+ b.nobj -= n
+ b1.nobj = n
+ memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), n*unsafe.Sizeof(b1.obj[0]))
+ _g_ := getg()
+ _g_.m.gcstats.nhandoff++
+ _g_.m.gcstats.nhandoffcnt += uint64(n)
+
+ // Put b on full list - let first half of b get stolen.
+ lfstackpush(&work.full, &b.node)
+ return b1
+}
+
+func stackmapdata(stkmap *stackmap, n int32) bitvector {
+ if n < 0 || n >= stkmap.n {
+ gothrow("stackmapdata: index out of range")
+ }
+ return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+31)/32*4))))}
+}
+
+// Scan a stack frame: local variables and function arguments/results.
+func scanframe(frame *stkframe, unused unsafe.Pointer) bool {
+
+ f := frame.fn
+ targetpc := frame.continpc
+ if targetpc == 0 {
+ // Frame is dead.
+ return true
+ }
+ if _DebugGC > 1 {
+ print("scanframe ", gofuncname(f), "\n")
+ }
+ if targetpc != f.entry {
+ targetpc--
+ }
+ pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
+ if pcdata == -1 {
+ // We do not have a valid pcdata value but there might be a
+ // stackmap for this function. It is likely that we are looking
+ // at the function prologue, assume so and hope for the best.
+ pcdata = 0
+ }
+
+ // Scan local variables if stack frame has been allocated.
+ size := frame.varp - frame.sp
+ var minsize uintptr
+ if thechar != '6' && thechar != '8' {
+ minsize = ptrSize
+ } else {
+ minsize = 0
+ }
+ if size > minsize {
+ stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
+ if stkmap == nil || stkmap.n <= 0 {
+ print("runtime: frame ", gofuncname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
+ gothrow("missing stackmap")
+ }
+
+ // Locals bitmap information, scan just the pointers in locals.
+ if pcdata < 0 || pcdata >= stkmap.n {
+ // don't know where we are
+ print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", gofuncname(f), " (targetpc=", targetpc, ")\n")
+ gothrow("scanframe: bad symbol table")
+ }
+ bv := stackmapdata(stkmap, pcdata)
+ size = (uintptr(bv.n) * ptrSize) / bitsPerPointer
+ scanblock(frame.varp-size, uintptr(bv.n)/bitsPerPointer*ptrSize, bv.bytedata)
+ }
+
+ // Scan arguments.
+ if frame.arglen > 0 {
+ var bv bitvector
+ if frame.argmap != nil {
+ bv = *frame.argmap
+ } else {
+ stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
+ if stkmap == nil || stkmap.n <= 0 {
+ print("runtime: frame ", gofuncname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
+ gothrow("missing stackmap")
+ }
+ if pcdata < 0 || pcdata >= stkmap.n {
+ // don't know where we are
+ print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", gofuncname(f), " (targetpc=", targetpc, ")\n")
+ gothrow("scanframe: bad symbol table")
+ }
+ bv = stackmapdata(stkmap, pcdata)
+ }
+ scanblock(frame.argp, uintptr(bv.n)/bitsPerPointer*ptrSize, bv.bytedata)
+ }
+ return true
+}
+
+func scanstack(gp *g) {
+ // TODO(rsc): Due to a precedence error, this was never checked in the original C version.
+ // If you enable the check, the gothrow happens.
+ /*
+ if readgstatus(gp)&_Gscan == 0 {
+ print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
+ gothrow("mark - bad status")
+ }
+ */
+
+ switch readgstatus(gp) &^ _Gscan {
+ default:
+ print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
+ gothrow("mark - bad status")
+ case _Gdead:
+ return
+ case _Grunning:
+ print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
+ gothrow("scanstack: goroutine not stopped")
+ case _Grunnable, _Gsyscall, _Gwaiting:
+ // ok
+ }
+
+ if gp == getg() {
+ gothrow("can't scan our own stack")
+ }
+ mp := gp.m
+ if mp != nil && mp.helpgc != 0 {
+ gothrow("can't scan gchelper stack")
+ }
+
+ gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
+ tracebackdefers(gp, scanframe, nil)
+}
+
+// If the slot is grey or black return true, if white return false.
+// If the slot is not in the known heap and thus does not have a valid GC bitmap then
+// it is considered grey. Globals and stacks can hold such slots.
+// The slot is grey if its mark bit is set and it is enqueued to be scanned.
+// The slot is black if it has already been scanned.
+// It is white if it has a valid mark bit and the bit is not set.
+func shaded(slot uintptr) bool {
+ if !inheap(slot) { // non-heap slots considered grey
+ return true
+ }
+
+ var mbits markbits
+ valid := objectstart(slot, &mbits)
+ if valid == 0 {
+ return true
+ }
+
+ if checkmark {
+ return ischeckmarked(&mbits)
+ }
+
+ return mbits.bits&bitMarked != 0
+}
+
+// Shade the object if it isn't already.
+// The object is not nil and known to be in the heap.
+func shade(b uintptr) {
+ if !inheap(b) {
+ gothrow("shade: passed an address not in the heap")
+ }
+
+ wbuf := getpartialorempty()
+ // Mark the object, return some important bits.
+ // If we combine the following two rotines we don't have to pass mbits or obj around.
+ var mbits markbits
+ obj := objectstart(b, &mbits)
+ if obj != 0 {
+ wbuf = greyobject(obj, &mbits, wbuf) // augments the wbuf
+ }
+ putpartial(wbuf)
+}
+
+// This is the Dijkstra barrier coarsened to always shade the ptr (dst) object.
+// The original Dijkstra barrier only shaded ptrs being placed in black slots.
+//
+// Shade indicates that it has seen a white pointer by adding the referent
+// to wbuf as well as marking it.
+//
+// slot is the destination (dst) in go code
+// ptr is the value that goes into the slot (src) in the go code
+//
+// Dijkstra pointed out that maintaining the no black to white
+// pointers means that white to white pointers not need
+// to be noted by the write barrier. Furthermore if either
+// white object dies before it is reached by the
+// GC then the object can be collected during this GC cycle
+// instead of waiting for the next cycle. Unfortunately the cost of
+// ensure that the object holding the slot doesn't concurrently
+// change to black without the mutator noticing seems prohibitive.
+//
+// Consider the following example where the mutator writes into
+// a slot and then loads the slot's mark bit while the GC thread
+// writes to the slot's mark bit and then as part of scanning reads
+// the slot.
+//
+// Initially both [slot] and [slotmark] are 0 (nil)
+// Mutator thread GC thread
+// st [slot], ptr st [slotmark], 1
+//
+// ld r1, [slotmark] ld r2, [slot]
+//
+// This is a classic example of independent reads of independent writes,
+// aka IRIW. The question is if r1==r2==0 is allowed and for most HW the
+// answer is yes without inserting a memory barriers between the st and the ld.
+// These barriers are expensive so we have decided that we will
+// always grey the ptr object regardless of the slot's color.
+func gcmarkwb_m(slot *uintptr, ptr uintptr) {
+ switch gcphase {
+ default:
+ gothrow("gcphasework in bad gcphase")
+
+ case _GCoff, _GCquiesce, _GCstw, _GCsweep, _GCscan:
+ // ok
+
+ case _GCmark, _GCmarktermination:
+ if ptr != 0 && inheap(ptr) {
+ shade(ptr)
+ }
+ }
+}
+
+// The gp has been moved to a GC safepoint. GC phase specific
+// work is done here.
+func gcphasework(gp *g) {
+ switch gcphase {
+ default:
+ gothrow("gcphasework in bad gcphase")
+ case _GCoff, _GCquiesce, _GCstw, _GCsweep:
+ // No work.
+ case _GCscan:
+ // scan the stack, mark the objects, put pointers in work buffers
+ // hanging off the P where this is being run.
+ scanstack(gp)
+ case _GCmark:
+ // No work.
+ case _GCmarktermination:
+ scanstack(gp)
+ // All available mark work will be emptied before returning.
+ }
+ gp.gcworkdone = true
+}
+
+var finalizer1 = [...]byte{
+ // Each Finalizer is 5 words, ptr ptr uintptr ptr ptr.
+ // Each byte describes 4 words.
+ // Need 4 Finalizers described by 5 bytes before pattern repeats:
+ // ptr ptr uintptr ptr ptr
+ // ptr ptr uintptr ptr ptr
+ // ptr ptr uintptr ptr ptr
+ // ptr ptr uintptr ptr ptr
+ // aka
+ // ptr ptr uintptr ptr
+ // ptr ptr ptr uintptr
+ // ptr ptr ptr ptr
+ // uintptr ptr ptr ptr
+ // ptr uintptr ptr ptr
+ // Assumptions about Finalizer layout checked below.
+ bitsPointer | bitsPointer<<2 | bitsScalar<<4 | bitsPointer<<6,
+ bitsPointer | bitsPointer<<2 | bitsPointer<<4 | bitsScalar<<6,
+ bitsPointer | bitsPointer<<2 | bitsPointer<<4 | bitsPointer<<6,
+ bitsScalar | bitsPointer<<2 | bitsPointer<<4 | bitsPointer<<6,
+ bitsPointer | bitsScalar<<2 | bitsPointer<<4 | bitsPointer<<6,
+}
+
+func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
+ lock(&finlock)
+ if finq == nil || finq.cnt == finq.cap {
+ if finc == nil {
+ finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
+ finc.cap = int32((_FinBlockSize-unsafe.Sizeof(finblock{}))/unsafe.Sizeof(finalizer{}) + 1)
+ finc.alllink = allfin
+ allfin = finc
+ if finptrmask[0] == 0 {
+ // Build pointer mask for Finalizer array in block.
+ // Check assumptions made in finalizer1 array above.
+ if (unsafe.Sizeof(finalizer{}) != 5*ptrSize ||
+ unsafe.Offsetof(finalizer{}.fn) != 0 ||
+ unsafe.Offsetof(finalizer{}.arg) != ptrSize ||
+ unsafe.Offsetof(finalizer{}.nret) != 2*ptrSize ||
+ unsafe.Offsetof(finalizer{}.fint) != 3*ptrSize ||
+ unsafe.Offsetof(finalizer{}.ot) != 4*ptrSize ||
+ bitsPerPointer != 2) {
+ gothrow("finalizer out of sync")
+ }
+ for i := range finptrmask {
+ finptrmask[i] = finalizer1[i%len(finalizer1)]
+ }
+ }
+ }
+ block := finc
+ finc = block.next
+ block.next = finq
+ finq = block
+ }
+ f := (*finalizer)(add(unsafe.Pointer(&finq.fin[0]), uintptr(finq.cnt)*unsafe.Sizeof(finq.fin[0])))
+ finq.cnt++
+ f.fn = fn
+ f.nret = nret
+ f.fint = fint
+ f.ot = ot
+ f.arg = p
+ fingwake = true
+ unlock(&finlock)
+}
+
+func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
+ for fb := allfin; fb != nil; fb = fb.alllink {
+ for i := int32(0); i < fb.cnt; i++ {
+ f := &fb.fin[i]
+ callback(f.fn, f.arg, f.nret, f.fint, f.ot)
+ }
+ }
+}
+
+// Returns only when span s has been swept.
+func mSpan_EnsureSwept(s *mspan) {
+ // Caller must disable preemption.
+ // Otherwise when this function returns the span can become unswept again
+ // (if GC is triggered on another goroutine).
+ _g_ := getg()
+ if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
+ gothrow("MSpan_EnsureSwept: m is not locked")
+ }
+
+ sg := mheap_.sweepgen
+ if atomicload(&s.sweepgen) == sg {
+ return
+ }
+ // The caller must be sure that the span is a MSpanInUse span.
+ if cas(&s.sweepgen, sg-2, sg-1) {
+ mSpan_Sweep(s, false)
+ return
+ }
+ // unfortunate condition, and we don't have efficient means to wait
+ for atomicload(&s.sweepgen) != sg {
+ osyield()
+ }
+}
+
+// Sweep frees or collects finalizers for blocks not marked in the mark phase.
+// It clears the mark bits in preparation for the next GC round.
+// Returns true if the span was returned to heap.
+// If preserve=true, don't return it to heap nor relink in MCentral lists;
+// caller takes care of it.
+func mSpan_Sweep(s *mspan, preserve bool) bool {
+ if checkmark {
+ gothrow("MSpan_Sweep: checkmark only runs in STW and after the sweep")
+ }
+
+ // It's critical that we enter this function with preemption disabled,
+ // GC must not start while we are in the middle of this function.
+ _g_ := getg()
+ if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
+ gothrow("MSpan_Sweep: m is not locked")
+ }
+ sweepgen := mheap_.sweepgen
+ if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
+ print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
+ gothrow("MSpan_Sweep: bad span state")
+ }
+ arena_start := mheap_.arena_start
+ cl := s.sizeclass
+ size := s.elemsize
+ var n int32
+ var npages int32
+ if cl == 0 {
+ n = 1
+ } else {
+ // Chunk full of small blocks.
+ npages = class_to_allocnpages[cl]
+ n = (npages << _PageShift) / int32(size)
+ }
+ res := false
+ nfree := 0
+ var head mlink
+ end := &head
+ c := _g_.m.mcache
+ sweepgenset := false
+
+ // Mark any free objects in this span so we don't collect them.
+ for link := s.freelist; link != nil; link = link.next {
+ off := (uintptr(unsafe.Pointer(link)) - arena_start) / ptrSize
+ bitp := arena_start - off/wordsPerBitmapByte - 1
+ shift := (off % wordsPerBitmapByte) * gcBits
+ *(*byte)(unsafe.Pointer(bitp)) |= bitMarked << shift
+ }
+
+ // Unlink & free special records for any objects we're about to free.
+ specialp := &s.specials
+ special := *specialp
+ for special != nil {
+ // A finalizer can be set for an inner byte of an object, find object beginning.
+ p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
+ off := (p - arena_start) / ptrSize
+ bitp := arena_start - off/wordsPerBitmapByte - 1
+ shift := (off % wordsPerBitmapByte) * gcBits
+ bits := (*(*byte)(unsafe.Pointer(bitp)) >> shift) & bitMask
+ if bits&bitMarked == 0 {
+ // Find the exact byte for which the special was setup
+ // (as opposed to object beginning).
+ p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
+ // about to free object: splice out special record
+ y := special
+ special = special.next
+ *specialp = special
+ if !freespecial(y, unsafe.Pointer(p), size, false) {
+ // stop freeing of object if it has a finalizer
+ *(*byte)(unsafe.Pointer(bitp)) |= bitMarked << shift
+ }
+ } else {
+ // object is still live: keep special record
+ specialp = &special.next
+ special = *specialp
+ }
+ }
+
+ // Sweep through n objects of given size starting at p.
+ // This thread owns the span now, so it can manipulate
+ // the block bitmap without atomic operations.
+ p := uintptr(s.start << _PageShift)
+ off := (p - arena_start) / ptrSize
+ bitp := arena_start - off/wordsPerBitmapByte - 1
+ shift := uint(0)
+ step := size / (ptrSize * wordsPerBitmapByte)
+ // Rewind to the previous quadruple as we move to the next
+ // in the beginning of the loop.
+ bitp += step
+ if step == 0 {
+ // 8-byte objects.
+ bitp++
+ shift = gcBits
+ }
+ for ; n > 0; n, p = n-1, p+size {
+ bitp -= step
+ if step == 0 {
+ if shift != 0 {
+ bitp--
+ }
+ shift = gcBits - shift
+ }
+
+ xbits := *(*byte)(unsafe.Pointer(bitp))
+ bits := (xbits >> shift) & bitMask
+
+ // Allocated and marked object, reset bits to allocated.
+ if bits&bitMarked != 0 {
+ *(*byte)(unsafe.Pointer(bitp)) &^= bitMarked << shift
+ continue
+ }
+
+ // At this point we know that we are looking at garbage object
+ // that needs to be collected.
+ if debug.allocfreetrace != 0 {
+ tracefree(unsafe.Pointer(p), size)
+ }
+
+ // Reset to allocated+noscan.
+ *(*byte)(unsafe.Pointer(bitp)) = uint8(uintptr(xbits&^((bitMarked|bitsMask<<2)<<shift)) | uintptr(bitsDead)<<(shift+2))
+ if cl == 0 {
+ // Free large span.
+ if preserve {
+ gothrow("can't preserve large span")
+ }
+ unmarkspan(p, s.npages<<_PageShift)
+ s.needzero = 1
+
+ // important to set sweepgen before returning it to heap
+ atomicstore(&s.sweepgen, sweepgen)
+ sweepgenset = true
+
+ // NOTE(rsc,dvyukov): The original implementation of efence
+ // in CL 22060046 used SysFree instead of SysFault, so that
+ // the operating system would eventually give the memory
+ // back to us again, so that an efence program could run
+ // longer without running out of memory. Unfortunately,
+ // calling SysFree here without any kind of adjustment of the
+ // heap data structures means that when the memory does
+ // come back to us, we have the wrong metadata for it, either in
+ // the MSpan structures or in the garbage collection bitmap.
+ // Using SysFault here means that the program will run out of
+ // memory fairly quickly in efence mode, but at least it won't
+ // have mysterious crashes due to confused memory reuse.
+ // It should be possible to switch back to SysFree if we also
+ // implement and then call some kind of MHeap_DeleteSpan.
+ if debug.efence > 0 {
+ s.limit = 0 // prevent mlookup from finding this span
+ sysFault(unsafe.Pointer(p), size)
+ } else {
+ mHeap_Free(&mheap_, s, 1)
+ }
+ c.local_nlargefree++
+ c.local_largefree += size
+ xadd64(&memstats.next_gc, -int64(size)*int64(gcpercent+100)/100)
+ res = true
+ } else {
+ // Free small object.
+ if size > 2*ptrSize {
+ *(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
+ } else if size > ptrSize {
+ *(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0
+ }
+ end.next = (*mlink)(unsafe.Pointer(p))
+ end = end.next
+ nfree++
+ }
+ }
+
+ // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
+ // because of the potential for a concurrent free/SetFinalizer.
+ // But we need to set it before we make the span available for allocation
+ // (return it to heap or mcentral), because allocation code assumes that a
+ // span is already swept if available for allocation.
+ if !sweepgenset && nfree == 0 {
+ // The span must be in our exclusive ownership until we update sweepgen,
+ // check for potential races.
+ if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
+ print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
+ gothrow("MSpan_Sweep: bad span state after sweep")
+ }
+ atomicstore(&s.sweepgen, sweepgen)
+ }
+ if nfree > 0 {
+ c.local_nsmallfree[cl] += uintptr(nfree)
+ c.local_cachealloc -= intptr(uintptr(nfree) * size)
+ xadd64(&memstats.next_gc, -int64(nfree)*int64(size)*int64(gcpercent+100)/100)
+ res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head.next, end, preserve)
+ // MCentral_FreeSpan updates sweepgen
+ }
+ return res
+}
+
+// State of background sweep.
+// Protected by gclock.
+type sweepdata struct {
+ g *g
+ parked bool
+ started bool
+
+ spanidx uint32 // background sweeper position
+
+ nbgsweep uint32
+ npausesweep uint32
+}
+
+var sweep sweepdata
+
+// sweeps one span
+// returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
+func sweepone() uintptr {
+ _g_ := getg()
+
+ // increment locks to ensure that the goroutine is not preempted
+ // in the middle of sweep thus leaving the span in an inconsistent state for next GC
+ _g_.m.locks++
+ sg := mheap_.sweepgen
+ for {
+ idx := xadd(&sweep.spanidx, 1) - 1
+ if idx >= uint32(len(work.spans)) {
+ mheap_.sweepdone = 1
+ _g_.m.locks--
+ return ^uintptr(0)
+ }
+ s := work.spans[idx]
+ if s.state != mSpanInUse {
+ s.sweepgen = sg
+ continue
+ }
+ if s.sweepgen != sg-2 || !cas(&s.sweepgen, sg-2, sg-1) {
+ continue
+ }
+ npages := s.npages
+ if !mSpan_Sweep(s, false) {
+ npages = 0
+ }
+ _g_.m.locks--
+ return npages
+ }
+}
+
+func gosweepone() uintptr {
+ var ret uintptr
+ systemstack(func() {
+ ret = sweepone()
+ })
+ return ret
+}
+
+func gosweepdone() bool {
+ return mheap_.sweepdone != 0
+}
+
+func gchelper() {
+ _g_ := getg()
+ _g_.m.traceback = 2
+ gchelperstart()
+
+ // parallel mark for over GC roots
+ parfordo(work.markfor)
+ if gcphase != _GCscan {
+ scanblock(0, 0, nil) // blocks in getfull
+ }
+
+ nproc := work.nproc // work.nproc can change right after we increment work.ndone
+ if xadd(&work.ndone, +1) == nproc-1 {
+ notewakeup(&work.alldone)
+ }
+ _g_.m.traceback = 0
+}
+
+func cachestats() {
+ for i := 0; ; i++ {
+ p := allp[i]
+ if p == nil {
+ break
+ }
+ c := p.mcache
+ if c == nil {
+ continue
+ }
+ purgecachedstats(c)
+ }
+}
+
+func flushallmcaches() {
+ for i := 0; ; i++ {
+ p := allp[i]
+ if p == nil {
+ break
+ }
+ c := p.mcache
+ if c == nil {
+ continue
+ }
+ mCache_ReleaseAll(c)
+ stackcache_clear(c)
+ }
+}
+
+func updatememstats(stats *gcstats) {
+ if stats != nil {
+ *stats = gcstats{}
+ }
+ for mp := allm; mp != nil; mp = mp.alllink {
+ if stats != nil {
+ src := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(&mp.gcstats))
+ dst := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(stats))
+ for i, v := range src {
+ dst[i] += v
+ }
+ mp.gcstats = gcstats{}
+ }
+ }
+
+ memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
+ memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
+ memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
+ memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
+
+ // Calculate memory allocator stats.
+ // During program execution we only count number of frees and amount of freed memory.
+ // Current number of alive object in the heap and amount of alive heap memory
+ // are calculated by scanning all spans.
+ // Total number of mallocs is calculated as number of frees plus number of alive objects.
+ // Similarly, total amount of allocated memory is calculated as amount of freed memory
+ // plus amount of alive heap memory.
+ memstats.alloc = 0
+ memstats.total_alloc = 0
+ memstats.nmalloc = 0
+ memstats.nfree = 0
+ for i := 0; i < len(memstats.by_size); i++ {
+ memstats.by_size[i].nmalloc = 0
+ memstats.by_size[i].nfree = 0
+ }
+
+ // Flush MCache's to MCentral.
+ systemstack(flushallmcaches)
+
+ // Aggregate local stats.
+ cachestats()
+
+ // Scan all spans and count number of alive objects.
+ lock(&mheap_.lock)
+ for i := uint32(0); i < mheap_.nspan; i++ {
+ s := h_allspans[i]
+ if s.state != mSpanInUse {
+ continue
+ }
+ if s.sizeclass == 0 {
+ memstats.nmalloc++
+ memstats.alloc += uint64(s.elemsize)
+ } else {
+ memstats.nmalloc += uint64(s.ref)
+ memstats.by_size[s.sizeclass].nmalloc += uint64(s.ref)
+ memstats.alloc += uint64(s.ref) * uint64(s.elemsize)
+ }
+ }
+ unlock(&mheap_.lock)
+
+ // Aggregate by size class.
+ smallfree := uint64(0)
+ memstats.nfree = mheap_.nlargefree
+ for i := 0; i < len(memstats.by_size); i++ {
+ memstats.nfree += mheap_.nsmallfree[i]
+ memstats.by_size[i].nfree = mheap_.nsmallfree[i]
+ memstats.by_size[i].nmalloc += mheap_.nsmallfree[i]
+ smallfree += uint64(mheap_.nsmallfree[i]) * uint64(class_to_size[i])
+ }
+ memstats.nfree += memstats.tinyallocs
+ memstats.nmalloc += memstats.nfree
+
+ // Calculate derived stats.
+ memstats.total_alloc = uint64(memstats.alloc) + uint64(mheap_.largefree) + smallfree
+ memstats.heap_alloc = memstats.alloc
+ memstats.heap_objects = memstats.nmalloc - memstats.nfree
+}
+
+func gcinit() {
+ if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
+ gothrow("runtime: size of Workbuf is suboptimal")
+ }
+
+ work.markfor = parforalloc(_MaxGcproc)
+ gcpercent = readgogc()
+ gcdatamask = unrollglobgcprog((*byte)(unsafe.Pointer(&gcdata)), uintptr(unsafe.Pointer(&edata))-uintptr(unsafe.Pointer(&data)))
+ gcbssmask = unrollglobgcprog((*byte)(unsafe.Pointer(&gcbss)), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)))
+}
+
+// Called from malloc.go using onM, stopping and starting the world handled in caller.
+func gc_m(start_time int64, eagersweep bool) {
+ _g_ := getg()
+ gp := _g_.m.curg
+ casgstatus(gp, _Grunning, _Gwaiting)
+ gp.waitreason = "garbage collection"
+
+ gc(start_time, eagersweep)
+ casgstatus(gp, _Gwaiting, _Grunning)
+}
+
+// Similar to clearcheckmarkbits but works on a single span.
+// It preforms two tasks.
+// 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
+// for nibbles with the BoundaryBit set.
+// 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
+// BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
+// For the second case it is possible to restore the BitsDead pattern but since
+// clearmark is a debug tool performance has a lower priority than simplicity.
+// The span is MSpanInUse and the world is stopped.
+func clearcheckmarkbitsspan(s *mspan) {
+ if s.state != _MSpanInUse {
+ print("runtime:clearcheckmarkbitsspan: state=", s.state, "\n")
+ gothrow("clearcheckmarkbitsspan: bad span state")
+ }
+
+ arena_start := mheap_.arena_start
+ cl := s.sizeclass
+ size := s.elemsize
+ var n int32
+ if cl == 0 {
+ n = 1
+ } else {
+ // Chunk full of small blocks
+ npages := class_to_allocnpages[cl]
+ n = npages << _PageShift / int32(size)
+ }
+
+ // MSpan_Sweep has similar code but instead of overloading and
+ // complicating that routine we do a simpler walk here.
+ // Sweep through n objects of given size starting at p.
+ // This thread owns the span now, so it can manipulate
+ // the block bitmap without atomic operations.
+ p := uintptr(s.start) << _PageShift
+
+ // Find bits for the beginning of the span.
+ off := (p - arena_start) / ptrSize
+ bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
+ step := size / (ptrSize * wordsPerBitmapByte)
+
+ // The type bit values are:
+ // 00 - BitsDead, for us BitsScalarMarked
+ // 01 - BitsScalar
+ // 10 - BitsPointer
+ // 11 - unused, for us BitsPointerMarked
+ //
+ // When called to prepare for the checkmark phase (checkmark==1),
+ // we change BitsDead to BitsScalar, so that there are no BitsScalarMarked
+ // type bits anywhere.
+ //
+ // The checkmark phase marks by changing BitsScalar to BitsScalarMarked
+ // and BitsPointer to BitsPointerMarked.
+ //
+ // When called to clean up after the checkmark phase (checkmark==0),
+ // we unmark by changing BitsScalarMarked back to BitsScalar and
+ // BitsPointerMarked back to BitsPointer.
+ //
+ // There are two problems with the scheme as just described.
+ // First, the setup rewrites BitsDead to BitsScalar, but the type bits
+ // following a BitsDead are uninitialized and must not be used.
+ // Second, objects that are free are expected to have their type
+ // bits zeroed (BitsDead), so in the cleanup we need to restore
+ // any BitsDeads that were there originally.
+ //
+ // In a one-word object (8-byte allocation on 64-bit system),
+ // there is no difference between BitsScalar and BitsDead, because
+ // neither is a pointer and there are no more words in the object,
+ // so using BitsScalar during the checkmark is safe and mapping
+ // both back to BitsDead during cleanup is also safe.
+ //
+ // In a larger object, we need to be more careful. During setup,
+ // if the type of the first word is BitsDead, we change it to BitsScalar
+ // (as we must) but also initialize the type of the second
+ // word to BitsDead, so that a scan during the checkmark phase
+ // will still stop before seeing the uninitialized type bits in the
+ // rest of the object. The sequence 'BitsScalar BitsDead' never
+ // happens in real type bitmaps - BitsDead is always as early
+ // as possible, so immediately after the last BitsPointer.
+ // During cleanup, if we see a BitsScalar, we can check to see if it
+ // is followed by BitsDead. If so, it was originally BitsDead and
+ // we can change it back.
+
+ if step == 0 {
+ // updating top and bottom nibbles, all boundaries
+ for i := int32(0); i < n/2; i, bitp = i+1, addb(bitp, uintptrMask&-1) {
+ if *bitp&bitBoundary == 0 {
+ gothrow("missing bitBoundary")
+ }
+ b := (*bitp & bitPtrMask) >> 2
+ if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
+ *bitp &^= 0x0c // convert to _BitsDead
+ } else if b == _BitsScalarMarked || b == _BitsPointerMarked {
+ *bitp &^= _BitsCheckMarkXor << 2
+ }
+
+ if (*bitp>>gcBits)&bitBoundary == 0 {
+ gothrow("missing bitBoundary")
+ }
+ b = ((*bitp >> gcBits) & bitPtrMask) >> 2
+ if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
+ *bitp &^= 0xc0 // convert to _BitsDead
+ } else if b == _BitsScalarMarked || b == _BitsPointerMarked {
+ *bitp &^= _BitsCheckMarkXor << (2 + gcBits)
+ }
+ }
+ } else {
+ // updating bottom nibble for first word of each object
+ for i := int32(0); i < n; i, bitp = i+1, addb(bitp, -step) {
+ if *bitp&bitBoundary == 0 {
+ gothrow("missing bitBoundary")
+ }
+ b := (*bitp & bitPtrMask) >> 2
+
+ if checkmark && b == _BitsDead {
+ // move BitsDead into second word.
+ // set bits to BitsScalar in preparation for checkmark phase.
+ *bitp &^= 0xc0
+ *bitp |= _BitsScalar << 2
+ } else if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) && *bitp&0xc0 == 0 {
+ // Cleaning up after checkmark phase.
+ // First word is scalar or dead (we forgot)
+ // and second word is dead.
+ // First word might as well be dead too.
+ *bitp &^= 0x0c
+ } else if b == _BitsScalarMarked || b == _BitsPointerMarked {
+ *bitp ^= _BitsCheckMarkXor << 2
+ }
+ }
+ }
+}
+
+// clearcheckmarkbits preforms two tasks.
+// 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
+// for nibbles with the BoundaryBit set.
+// 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
+// BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
+// This is a bit expensive but preserves the BitsDead encoding during the normal marking.
+// BitsDead remains valid for every nibble except the ones with BitsBoundary set.
+func clearcheckmarkbits() {
+ for _, s := range work.spans {
+ if s.state == _MSpanInUse {
+ clearcheckmarkbitsspan(s)
+ }
+ }
+}
+
+// Called from malloc.go using onM.
+// The world is stopped. Rerun the scan and mark phases
+// using the bitMarkedCheck bit instead of the
+// bitMarked bit. If the marking encounters an
+// bitMarked bit that is not set then we throw.
+func gccheckmark_m(startTime int64, eagersweep bool) {
+ if !gccheckmarkenable {
+ return
+ }
+
+ if checkmark {
+ gothrow("gccheckmark_m, entered with checkmark already true")
+ }
+
+ checkmark = true
+ clearcheckmarkbits() // Converts BitsDead to BitsScalar.
+ gc_m(startTime, eagersweep) // turns off checkmark
+ // Work done, fixed up the GC bitmap to remove the checkmark bits.
+ clearcheckmarkbits()
+}
+
+func gccheckmarkenable_m() {
+ gccheckmarkenable = true
+}
+
+func gccheckmarkdisable_m() {
+ gccheckmarkenable = false
+}
+
+func finishsweep_m() {
+ // The world is stopped so we should be able to complete the sweeps
+ // quickly.
+ for sweepone() != ^uintptr(0) {
+ sweep.npausesweep++
+ }
+
+ // There may be some other spans being swept concurrently that
+ // we need to wait for. If finishsweep_m is done with the world stopped
+ // this code is not required.
+ sg := mheap_.sweepgen
+ for _, s := range work.spans {
+ if s.sweepgen != sg && s.state == _MSpanInUse {
+ mSpan_EnsureSwept(s)
+ }
+ }
+}
+
+// Scan all of the stacks, greying (or graying if in America) the referents
+// but not blackening them since the mark write barrier isn't installed.
+func gcscan_m() {
+ _g_ := getg()
+
+ // Grab the g that called us and potentially allow rescheduling.
+ // This allows it to be scanned like other goroutines.
+ mastergp := _g_.m.curg
+ casgstatus(mastergp, _Grunning, _Gwaiting)
+ mastergp.waitreason = "garbage collection scan"
+
+ // Span sweeping has been done by finishsweep_m.
+ // Long term we will want to make this goroutine runnable
+ // by placing it onto a scanenqueue state and then calling
+ // runtime·restartg(mastergp) to make it Grunnable.
+ // At the bottom we will want to return this p back to the scheduler.
+ oldphase := gcphase
+
+ // Prepare flag indicating that the scan has not been completed.
+ lock(&allglock)
+ local_allglen := allglen
+ for i := uintptr(0); i < local_allglen; i++ {
+ gp := allgs[i]
+ gp.gcworkdone = false // set to true in gcphasework
+ }
+ unlock(&allglock)
+
+ work.nwait = 0
+ work.ndone = 0
+ work.nproc = 1 // For now do not do this in parallel.
+ gcphase = _GCscan
+ // ackgcphase is not needed since we are not scanning running goroutines.
+ parforsetup(work.markfor, work.nproc, uint32(_RootCount+local_allglen), nil, false, markroot)
+ parfordo(work.markfor)
+
+ lock(&allglock)
+ // Check that gc work is done.
+ for i := uintptr(0); i < local_allglen; i++ {
+ gp := allgs[i]
+ if !gp.gcworkdone {
+ gothrow("scan missed a g")
+ }
+ }
+ unlock(&allglock)
+
+ gcphase = oldphase
+ casgstatus(mastergp, _Gwaiting, _Grunning)
+ // Let the g that called us continue to run.
+}
+
+// Mark all objects that are known about.
+func gcmark_m() {
+ scanblock(0, 0, nil)
+}
+
+// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
+// all go routines see the new barrier.
+func gcinstallmarkwb_m() {
+ gcphase = _GCmark
+}
+
+// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
+// all go routines see the new barrier.
+func gcinstalloffwb_m() {
+ gcphase = _GCoff
+}
+
+func gc(start_time int64, eagersweep bool) {
+ if _DebugGCPtrs {
+ print("GC start\n")
+ }
+
+ if debug.allocfreetrace > 0 {
+ tracegc()
+ }
+
+ _g_ := getg()
+ _g_.m.traceback = 2
+ t0 := start_time
+ work.tstart = start_time
+
+ var t1 int64
+ if debug.gctrace > 0 {
+ t1 = nanotime()
+ }
+
+ if !checkmark {
+ finishsweep_m() // skip during checkmark debug phase.
+ }
+
+ // Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
+ // resizing/freeing allspans.
+ // New spans can be created while GC progresses, but they are not garbage for
+ // this round:
+ // - new stack spans can be created even while the world is stopped.
+ // - new malloc spans can be created during the concurrent sweep
+
+ // Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
+ lock(&mheap_.lock)
+ // Free the old cached sweep array if necessary.
+ if work.spans != nil && &work.spans[0] != &h_allspans[0] {
+ sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
+ }
+ // Cache the current array for marking.
+ mheap_.gcspans = mheap_.allspans
+ work.spans = h_allspans
+ unlock(&mheap_.lock)
+ oldphase := gcphase
+
+ work.nwait = 0
+ work.ndone = 0
+ work.nproc = uint32(gcprocs())
+ gcphase = _GCmarktermination
+
+ // World is stopped so allglen will not change.
+ for i := uintptr(0); i < allglen; i++ {
+ gp := allgs[i]
+ gp.gcworkdone = false // set to true in gcphasework
+ }
+
+ parforsetup(work.markfor, work.nproc, uint32(_RootCount+allglen), nil, false, markroot)
+ if work.nproc > 1 {
+ noteclear(&work.alldone)
+ helpgc(int32(work.nproc))
+ }
+
+ var t2 int64
+ if debug.gctrace > 0 {
+ t2 = nanotime()
+ }
+
+ gchelperstart()
+ parfordo(work.markfor)
+ scanblock(0, 0, nil)
+
+ if work.full != 0 {
+ gothrow("work.full != 0")
+ }
+ if work.partial != 0 {
+ gothrow("work.partial != 0")
+ }
+
+ gcphase = oldphase
+ var t3 int64
+ if debug.gctrace > 0 {
+ t3 = nanotime()
+ }
+
+ if work.nproc > 1 {
+ notesleep(&work.alldone)
+ }
+
+ shrinkfinish()
+
+ cachestats()
+ // next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
+ // estimate what was live heap size after previous GC (for printing only)
+ heap0 := memstats.next_gc * 100 / (uint64(gcpercent) + 100)
+ // conservatively set next_gc to high value assuming that everything is live
+ // concurrent/lazy sweep will reduce this number while discovering new garbage
+ memstats.next_gc = memstats.heap_alloc + memstats.heap_alloc*uint64(gcpercent)/100
+
+ t4 := nanotime()
+ atomicstore64(&memstats.last_gc, uint64(unixnanotime())) // must be Unix time to make sense to user
+ memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(t4 - t0)
+ memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(t4)
+ memstats.pause_total_ns += uint64(t4 - t0)
+ memstats.numgc++
+ if memstats.debuggc {
+ print("pause ", t4-t0, "\n")
+ }
+
+ if debug.gctrace > 0 {
+ heap1 := memstats.heap_alloc
+ var stats gcstats
+ updatememstats(&stats)
+ if heap1 != memstats.heap_alloc {
+ print("runtime: mstats skew: heap=", heap1, "/", memstats.heap_alloc, "\n")
+ gothrow("mstats skew")
+ }
+ obj := memstats.nmalloc - memstats.nfree
+
+ stats.nprocyield += work.markfor.nprocyield
+ stats.nosyield += work.markfor.nosyield
+ stats.nsleep += work.markfor.nsleep
+
+ print("gc", memstats.numgc, "(", work.nproc, "): ",
+ (t1-t0)/1000, "+", (t2-t1)/1000, "+", (t3-t2)/1000, "+", (t4-t3)/1000, " us, ",
+ heap0>>20, " -> ", heap1>>20, " MB, ",
+ obj, " (", memstats.nmalloc, "-", memstats.nfree, ") objects, ",
+ gcount(), " goroutines, ",
+ len(work.spans), "/", sweep.nbgsweep, "/", sweep.npausesweep, " sweeps, ",
+ stats.nhandoff, "(", stats.nhandoffcnt, ") handoff, ",
+ work.markfor.nsteal, "(", work.markfor.nstealcnt, ") steal, ",
+ stats.nprocyield, "/", stats.nosyield, "/", stats.nsleep, " yields\n")
+ sweep.nbgsweep = 0
+ sweep.npausesweep = 0
+ }
+
+ // See the comment in the beginning of this function as to why we need the following.
+ // Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
+ lock(&mheap_.lock)
+ // Free the old cached mark array if necessary.
+ if work.spans != nil && &work.spans[0] != &h_allspans[0] {
+ sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
+ }
+
+ if gccheckmarkenable {
+ if !checkmark {
+ // first half of two-pass; don't set up sweep
+ unlock(&mheap_.lock)
+ return
+ }
+ checkmark = false // done checking marks
+ }
+
+ // Cache the current array for sweeping.
+ mheap_.gcspans = mheap_.allspans
+ mheap_.sweepgen += 2
+ mheap_.sweepdone = 0
+ work.spans = h_allspans
+ sweep.spanidx = 0
+ unlock(&mheap_.lock)
+
+ if _ConcurrentSweep && !eagersweep {
+ lock(&gclock)
+ if !sweep.started {
+ go bgsweep()
+ sweep.started = true
+ } else if sweep.parked {
+ sweep.parked = false
+ ready(sweep.g)
+ }
+ unlock(&gclock)
+ } else {
+ // Sweep all spans eagerly.
+ for sweepone() != ^uintptr(0) {
+ sweep.npausesweep++
+ }
+ // Do an additional mProf_GC, because all 'free' events are now real as well.
+ mProf_GC()
+ }
+
+ mProf_GC()
+ _g_.m.traceback = 0
+
+ if _DebugGCPtrs {
+ print("GC end\n")
+ }
+}
+
+func readmemstats_m(stats *MemStats) {
+ updatememstats(nil)
+
+ // Size of the trailing by_size array differs between Go and C,
+ // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
+ memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
+
+ // Stack numbers are part of the heap numbers, separate those out for user consumption
+ stats.StackSys = stats.StackInuse
+ stats.HeapInuse -= stats.StackInuse
+ stats.HeapSys -= stats.StackInuse
+}
+
+//go:linkname readGCStats runtime/debug.readGCStats
+func readGCStats(pauses *[]uint64) {
+ systemstack(func() {
+ readGCStats_m(pauses)
+ })
+}
+
+func readGCStats_m(pauses *[]uint64) {
+ p := *pauses
+ // Calling code in runtime/debug should make the slice large enough.
+ if cap(p) < len(memstats.pause_ns)+3 {
+ gothrow("runtime: short slice passed to readGCStats")
+ }
+
+ // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
+ lock(&mheap_.lock)
+
+ n := memstats.numgc
+ if n > uint32(len(memstats.pause_ns)) {
+ n = uint32(len(memstats.pause_ns))
+ }
+
+ // The pause buffer is circular. The most recent pause is at
+ // pause_ns[(numgc-1)%len(pause_ns)], and then backward
+ // from there to go back farther in time. We deliver the times
+ // most recent first (in p[0]).
+ p = p[:cap(p)]
+ for i := uint32(0); i < n; i++ {
+ j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
+ p[i] = memstats.pause_ns[j]
+ p[n+i] = memstats.pause_end[j]
+ }
+
+ p[n+n] = memstats.last_gc
+ p[n+n+1] = uint64(memstats.numgc)
+ p[n+n+2] = memstats.pause_total_ns
+ unlock(&mheap_.lock)
+ *pauses = p[:n+n+3]
+}
+
+func setGCPercent(in int32) (out int32) {
+ lock(&mheap_.lock)
+ out = gcpercent
+ if in < 0 {
+ in = -1
+ }
+ gcpercent = in
+ unlock(&mheap_.lock)
+ return out
+}
+
+func gchelperstart() {
+ _g_ := getg()
+
+ if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc {
+ gothrow("gchelperstart: bad m->helpgc")
+ }
+ if _g_ != _g_.m.g0 {
+ gothrow("gchelper not running on g0 stack")
+ }
+}
+
+func wakefing() *g {
+ var res *g
+ lock(&finlock)
+ if fingwait && fingwake {
+ fingwait = false
+ fingwake = false
+ res = fing
+ }
+ unlock(&finlock)
+ return res
+}
+
+func addb(p *byte, n uintptr) *byte {
+ return (*byte)(add(unsafe.Pointer(p), n))
+}
+
+// Recursively unrolls GC program in prog.
+// mask is where to store the result.
+// ppos is a pointer to position in mask, in bits.
+// sparse says to generate 4-bits per word mask for heap (2-bits for data/bss otherwise).
+func unrollgcprog1(maskp *byte, prog *byte, ppos *uintptr, inplace, sparse bool) *byte {
+ arena_start := mheap_.arena_start
+ pos := *ppos
+ mask := (*[1 << 30]byte)(unsafe.Pointer(maskp))
+ for {
+ switch *prog {
+ default:
+ gothrow("unrollgcprog: unknown instruction")
+
+ case insData:
+ prog = addb(prog, 1)
+ siz := int(*prog)
+ prog = addb(prog, 1)
+ p := (*[1 << 30]byte)(unsafe.Pointer(prog))
+ for i := 0; i < siz; i++ {
+ v := p[i/_PointersPerByte]
+ v >>= (uint(i) % _PointersPerByte) * _BitsPerPointer
+ v &= _BitsMask
+ if inplace {
+ // Store directly into GC bitmap.
+ off := (uintptr(unsafe.Pointer(&mask[pos])) - arena_start) / ptrSize
+ bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
+ shift := (off % wordsPerBitmapByte) * gcBits
+ if shift == 0 {
+ *bitp = 0
+ }
+ *bitp |= v << (shift + 2)
+ pos += ptrSize
+ } else if sparse {
+ // 4-bits per word
+ v <<= (pos % 8) + 2
+ mask[pos/8] |= v
+ pos += gcBits
+ } else {
+ // 2-bits per word
+ v <<= pos % 8
+ mask[pos/8] |= v
+ pos += _BitsPerPointer
+ }
+ }
+ prog = addb(prog, round(uintptr(siz)*_BitsPerPointer, 8)/8)
+
+ case insArray:
+ prog = (*byte)(add(unsafe.Pointer(prog), 1))
+ siz := uintptr(0)
+ for i := uintptr(0); i < ptrSize; i++ {
+ siz = (siz << 8) + uintptr(*(*byte)(add(unsafe.Pointer(prog), ptrSize-i-1)))
+ }
+ prog = (*byte)(add(unsafe.Pointer(prog), ptrSize))
+ var prog1 *byte
+ for i := uintptr(0); i < siz; i++ {
+ prog1 = unrollgcprog1(&mask[0], prog, &pos, inplace, sparse)
+ }
+ if *prog1 != insArrayEnd {
+ gothrow("unrollgcprog: array does not end with insArrayEnd")
+ }
+ prog = (*byte)(add(unsafe.Pointer(prog1), 1))
+
+ case insArrayEnd, insEnd:
+ *ppos = pos
+ return prog
+ }
+ }
+}
+
+// Unrolls GC program prog for data/bss, returns dense GC mask.
+func unrollglobgcprog(prog *byte, size uintptr) bitvector {
+ masksize := round(round(size, ptrSize)/ptrSize*bitsPerPointer, 8) / 8
+ mask := (*[1 << 30]byte)(persistentalloc(masksize+1, 0, &memstats.gc_sys))
+ mask[masksize] = 0xa1
+ pos := uintptr(0)
+ prog = unrollgcprog1(&mask[0], prog, &pos, false, false)
+ if pos != size/ptrSize*bitsPerPointer {
+ print("unrollglobgcprog: bad program size, got ", pos, ", expect ", size/ptrSize*bitsPerPointer, "\n")
+ gothrow("unrollglobgcprog: bad program size")
+ }
+ if *prog != insEnd {
+ gothrow("unrollglobgcprog: program does not end with insEnd")
+ }
+ if mask[masksize] != 0xa1 {
+ gothrow("unrollglobgcprog: overflow")
+ }
+ return bitvector{int32(masksize * 8), &mask[0]}
+}
+
+func unrollgcproginplace_m(v unsafe.Pointer, typ *_type, size, size0 uintptr) {
+ pos := uintptr(0)
+ prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
+ for pos != size0 {
+ unrollgcprog1((*byte)(v), prog, &pos, true, true)
+ }
+
+ // Mark first word as bitAllocated.
+ arena_start := mheap_.arena_start
+ off := (uintptr(v) - arena_start) / ptrSize
+ bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
+ shift := (off % wordsPerBitmapByte) * gcBits
+ *bitp |= bitBoundary << shift
+
+ // Mark word after last as BitsDead.
+ if size0 < size {
+ off := (uintptr(v) + size0 - arena_start) / ptrSize
+ bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
+ shift := (off % wordsPerBitmapByte) * gcBits
+ *bitp &= uint8(^(bitPtrMask << shift) | uintptr(bitsDead)<<(shift+2))
+ }
+}
+
+var unroll mutex
+
+// Unrolls GC program in typ.gc[1] into typ.gc[0]
+func unrollgcprog_m(typ *_type) {
+ lock(&unroll)
+ mask := (*byte)(unsafe.Pointer(uintptr(typ.gc[0])))
+ if *mask == 0 {
+ pos := uintptr(8) // skip the unroll flag
+ prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
+ prog = unrollgcprog1(mask, prog, &pos, false, true)
+ if *prog != insEnd {
+ gothrow("unrollgcprog: program does not end with insEnd")
+ }
+ if typ.size/ptrSize%2 != 0 {
+ // repeat the program
+ prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
+ unrollgcprog1(mask, prog, &pos, false, true)
+ }
+
+ // atomic way to say mask[0] = 1
+ atomicor8(mask, 1)
+ }
+ unlock(&unroll)
+}
+
+// mark the span of memory at v as having n blocks of the given size.
+// if leftover is true, there is left over space at the end of the span.
+func markspan(v unsafe.Pointer, size uintptr, n uintptr, leftover bool) {
+ if uintptr(v)+size*n > mheap_.arena_used || uintptr(v) < mheap_.arena_start {
+ gothrow("markspan: bad pointer")
+ }
+
+ // Find bits of the beginning of the span.
+ off := (uintptr(v) - uintptr(mheap_.arena_start)) / ptrSize
+ if off%wordsPerBitmapByte != 0 {
+ gothrow("markspan: unaligned length")
+ }
+ b := mheap_.arena_start - off/wordsPerBitmapByte - 1
+
+ // Okay to use non-atomic ops here, because we control
+ // the entire span, and each bitmap byte has bits for only
+ // one span, so no other goroutines are changing these bitmap words.
+
+ if size == ptrSize {
+ // Possible only on 64-bits (minimal size class is 8 bytes).
+ // Set memory to 0x11.
+ if (bitBoundary|bitsDead)<<gcBits|bitBoundary|bitsDead != 0x11 {
+ gothrow("markspan: bad bits")
+ }
+ if n%(wordsPerBitmapByte*ptrSize) != 0 {
+ gothrow("markspan: unaligned length")
+ }
+ b = b - n/wordsPerBitmapByte + 1 // find first byte
+ if b%ptrSize != 0 {
+ gothrow("markspan: unaligned pointer")
+ }
+ for i := uintptr(0); i < n; i, b = i+wordsPerBitmapByte*ptrSize, b+ptrSize {
+ *(*uintptr)(unsafe.Pointer(b)) = uintptrMask & 0x1111111111111111 // bitBoundary | bitsDead, repeated
+ }
+ return
+ }
+
+ if leftover {
+ n++ // mark a boundary just past end of last block too
+ }
+ step := size / (ptrSize * wordsPerBitmapByte)
+ for i := uintptr(0); i < n; i, b = i+1, b-step {
+ *(*byte)(unsafe.Pointer(b)) = bitBoundary | bitsDead<<2
+ }
+}
+
+// unmark the span of memory at v of length n bytes.
+func unmarkspan(v, n uintptr) {
+ if v+n > mheap_.arena_used || v < mheap_.arena_start {
+ gothrow("markspan: bad pointer")
+ }
+
+ off := (v - mheap_.arena_start) / ptrSize // word offset
+ if off%(ptrSize*wordsPerBitmapByte) != 0 {
+ gothrow("markspan: unaligned pointer")
+ }
+
+ b := mheap_.arena_start - off/wordsPerBitmapByte - 1
+ n /= ptrSize
+ if n%(ptrSize*wordsPerBitmapByte) != 0 {
+ gothrow("unmarkspan: unaligned length")
+ }
+
+ // Okay to use non-atomic ops here, because we control
+ // the entire span, and each bitmap word has bits for only
+ // one span, so no other goroutines are changing these
+ // bitmap words.
+ n /= wordsPerBitmapByte
+ memclr(unsafe.Pointer(b-n+1), n)
+}
+
+func mHeap_MapBits(h *mheap) {
+ // Caller has added extra mappings to the arena.
+ // Add extra mappings of bitmap words as needed.
+ // We allocate extra bitmap pieces in chunks of bitmapChunk.
+ const bitmapChunk = 8192
+
+ n := (h.arena_used - h.arena_start) / (ptrSize * wordsPerBitmapByte)
+ n = round(n, bitmapChunk)
+ n = round(n, _PhysPageSize)
+ if h.bitmap_mapped >= n {
+ return
+ }
+
+ sysMap(unsafe.Pointer(h.arena_start-n), n-h.bitmap_mapped, h.arena_reserved, &memstats.gc_sys)
+ h.bitmap_mapped = n
+}
+
+func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
+ target := (*stkframe)(ctxt)
+ if frame.sp <= target.sp && target.sp < frame.varp {
+ *target = *frame
+ return false
+ }
+ return true
+}
+
+// Returns GC type info for object p for testing.
+func getgcmask(p unsafe.Pointer, t *_type, mask **byte, len *uintptr) {
+ *mask = nil
+ *len = 0
+
+ // data
+ if uintptr(unsafe.Pointer(&data)) <= uintptr(p) && uintptr(p) < uintptr(unsafe.Pointer(&edata)) {
+ n := (*ptrtype)(unsafe.Pointer(t)).elem.size
+ *len = n / ptrSize
+ *mask = &make([]byte, *len)[0]
+ for i := uintptr(0); i < n; i += ptrSize {
+ off := (uintptr(p) + i - uintptr(unsafe.Pointer(&data))) / ptrSize
+ bits := (*(*byte)(add(unsafe.Pointer(gcdatamask.bytedata), off/pointersPerByte)) >> ((off % pointersPerByte) * bitsPerPointer)) & bitsMask
+ *(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
+ }
+ return
+ }
+
+ // bss
+ if uintptr(unsafe.Pointer(&bss)) <= uintptr(p) && uintptr(p) < uintptr(unsafe.Pointer(&ebss)) {
+ n := (*ptrtype)(unsafe.Pointer(t)).elem.size
+ *len = n / ptrSize
+ *mask = &make([]byte, *len)[0]
+ for i := uintptr(0); i < n; i += ptrSize {
+ off := (uintptr(p) + i - uintptr(unsafe.Pointer(&bss))) / ptrSize
+ bits := (*(*byte)(add(unsafe.Pointer(gcbssmask.bytedata), off/pointersPerByte)) >> ((off % pointersPerByte) * bitsPerPointer)) & bitsMask
+ *(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
+ }
+ return
+ }
+
+ // heap
+ var n uintptr
+ var base uintptr
+ if mlookup(uintptr(p), &base, &n, nil) != 0 {
+ *len = n / ptrSize
+ *mask = &make([]byte, *len)[0]
+ for i := uintptr(0); i < n; i += ptrSize {
+ off := (uintptr(base) + i - mheap_.arena_start) / ptrSize
+ b := mheap_.arena_start - off/wordsPerBitmapByte - 1
+ shift := (off % wordsPerBitmapByte) * gcBits
+ bits := (*(*byte)(unsafe.Pointer(b)) >> (shift + 2)) & bitsMask
+ *(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
+ }
+ return
+ }
+
+ // stack
+ var frame stkframe
+ frame.sp = uintptr(p)
+ _g_ := getg()
+ gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
+ if frame.fn != nil {
+ f := frame.fn
+ targetpc := frame.continpc
+ if targetpc == 0 {
+ return
+ }
+ if targetpc != f.entry {
+ targetpc--
+ }
+ pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
+ if pcdata == -1 {
+ return
+ }
+ stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
+ if stkmap == nil || stkmap.n <= 0 {
+ return
+ }
+ bv := stackmapdata(stkmap, pcdata)
+ size := uintptr(bv.n) / bitsPerPointer * ptrSize
+ n := (*ptrtype)(unsafe.Pointer(t)).elem.size
+ *len = n / ptrSize
+ *mask = &make([]byte, *len)[0]
+ for i := uintptr(0); i < n; i += ptrSize {
+ off := (uintptr(p) + i - frame.varp + size) / ptrSize
+ bits := ((*(*byte)(add(unsafe.Pointer(bv.bytedata), off*bitsPerPointer/8))) >> ((off * bitsPerPointer) % 8)) & bitsMask
+ *(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
+ }
+ }
+}
+
+func unixnanotime() int64 {
+ var now int64
+ gc_unixnanotime(&now)
+ return now
+}
diff --git a/src/runtime/mgc0.c b/src/runtime/mgc0.c
deleted file mode 100644
index f37c01af0..000000000
--- a/src/runtime/mgc0.c
+++ /dev/null
@@ -1,2682 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Garbage collector (GC).
-//
-// The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple GC
-// thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
-// non-generational and non-compacting. Allocation is done using size segregated per P allocation
-// areas to minimize fragmentation while eliminating locks in the common case.
-//
-// The algorithm decomposes into several steps.
-// This is a high level description of the algorithm being used. For an overview of GC a good
-// place to start is Richard Jones' gchandbook.org.
-//
-// The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
-// Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
-// On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), 966-975.
-// For journal quality proofs that these steps are complete, correct, and terminate see
-// Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
-// Concurrency and Computation: Practice and Experience 15(3-5), 2003.
-//
-// 0. Set phase = GCscan from GCoff.
-// 1. Wait for all P's to acknowledge phase change.
-// At this point all goroutines have passed through a GC safepoint and
-// know we are in the GCscan phase.
-// 2. GC scans all goroutine stacks, mark and enqueues all encountered pointers
-// (marking avoids most duplicate enqueuing but races may produce duplication which is benign).
-// Preempted goroutines are scanned before P schedules next goroutine.
-// 3. Set phase = GCmark.
-// 4. Wait for all P's to acknowledge phase change.
-// 5. Now write barrier marks and enqueues black, grey, or white to white pointers.
-// Malloc still allocates white (non-marked) objects.
-// 6. Meanwhile GC transitively walks the heap marking reachable objects.
-// 7. When GC finishes marking heap, it preempts P's one-by-one and
-// retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine
-// currently scheduled on the P).
-// 8. Once the GC has exhausted all available marking work it sets phase = marktermination.
-// 9. Wait for all P's to acknowledge phase change.
-// 10. Malloc now allocates black objects, so number of unmarked reachable objects
-// monotonically decreases.
-// 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet reachable objects.
-// 12. When GC completes a full cycle over P's and discovers no new grey
-// objects, (which means all reachable objects are marked) set phase = GCsweep.
-// 13. Wait for all P's to acknowledge phase change.
-// 14. Now malloc allocates white (but sweeps spans before use).
-// Write barrier becomes nop.
-// 15. GC does background sweeping, see description below.
-// 16. When sweeping is complete set phase to GCoff.
-// 17. When sufficient allocation has taken place replay the sequence starting at 0 above,
-// see discussion of GC rate below.
-
-// Changing phases.
-// Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase.
-// All phase action must be benign in the presence of a change.
-// Starting with GCoff
-// GCoff to GCscan
-// GSscan scans stacks and globals greying them and never marks an object black.
-// Once all the P's are aware of the new phase they will scan gs on preemption.
-// This means that the scanning of preempted gs can't start until all the Ps
-// have acknowledged.
-// GCscan to GCmark
-// GCMark turns on the write barrier which also only greys objects. No scanning
-// of objects (making them black) can happen until all the Ps have acknowledged
-// the phase change.
-// GCmark to GCmarktermination
-// The only change here is that we start allocating black so the Ps must acknowledge
-// the change before we begin the termination algorithm
-// GCmarktermination to GSsweep
-// Object currently on the freelist must be marked black for this to work.
-// Are things on the free lists black or white? How does the sweep phase work?
-
-// Concurrent sweep.
-// The sweep phase proceeds concurrently with normal program execution.
-// The heap is swept span-by-span both lazily (when a goroutine needs another span)
-// and concurrently in a background goroutine (this helps programs that are not CPU bound).
-// However, at the end of the stop-the-world GC phase we don't know the size of the live heap,
-// and so next_gc calculation is tricky and happens as follows.
-// At the end of the stop-the-world phase next_gc is conservatively set based on total
-// heap size; all spans are marked as "needs sweeping".
-// Whenever a span is swept, next_gc is decremented by GOGC*newly_freed_memory.
-// The background sweeper goroutine simply sweeps spans one-by-one bringing next_gc
-// closer to the target value. However, this is not enough to avoid over-allocating memory.
-// Consider that a goroutine wants to allocate a new span for a large object and
-// there are no free swept spans, but there are small-object unswept spans.
-// If the goroutine naively allocates a new span, it can surpass the yet-unknown
-// target next_gc value. In order to prevent such cases (1) when a goroutine needs
-// to allocate a new small-object span, it sweeps small-object spans for the same
-// object size until it frees at least one object; (2) when a goroutine needs to
-// allocate large-object span from heap, it sweeps spans until it frees at least
-// that many pages into heap. Together these two measures ensure that we don't surpass
-// target next_gc value by a large margin. There is an exception: if a goroutine sweeps
-// and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
-// but there can still be other one-page unswept spans which could be combined into a two-page span.
-// It's critical to ensure that no operations proceed on unswept spans (that would corrupt
-// mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
-// so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
-// When a goroutine explicitly frees an object or sets a finalizer, it ensures that
-// the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
-// The finalizer goroutine is kicked off only when all spans are swept.
-// When the next GC starts, it sweeps all not-yet-swept spans (if any).
-
-// GC rate.
-// Next GC is after we've allocated an extra amount of memory proportional to
-// the amount already in use. The proportion is controlled by GOGC environment variable
-// (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
-// (this mark is tracked in next_gc variable). This keeps the GC cost in linear
-// proportion to the allocation cost. Adjusting GOGC just changes the linear constant
-// (and also the amount of extra memory used).
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-#include "stack.h"
-#include "mgc0.h"
-#include "chan.h"
-#include "race.h"
-#include "type.h"
-#include "typekind.h"
-#include "funcdata.h"
-#include "textflag.h"
-
-enum {
- Debug = 0,
- DebugPtrs = 0, // if 1, print trace of every pointer load during GC
- ConcurrentSweep = 1,
-
- FinBlockSize = 4*1024,
- RootData = 0,
- RootBss = 1,
- RootFinalizers = 2,
- RootSpans = 3,
- RootFlushCaches = 4,
- RootCount = 5,
-};
-
-// ptrmask for an allocation containing a single pointer.
-static byte oneptr[] = {BitsPointer};
-
-// Initialized from $GOGC. GOGC=off means no GC.
-extern int32 runtime·gcpercent;
-
-// Holding worldsema grants an M the right to try to stop the world.
-// The procedure is:
-//
-// runtime·semacquire(&runtime·worldsema);
-// m->gcing = 1;
-// runtime·stoptheworld();
-//
-// ... do stuff ...
-//
-// m->gcing = 0;
-// runtime·semrelease(&runtime·worldsema);
-// runtime·starttheworld();
-//
-uint32 runtime·worldsema = 1;
-
-// It is a bug if bits does not have bitBoundary set but
-// there are still some cases where this happens related
-// to stack spans.
-typedef struct Markbits Markbits;
-struct Markbits {
- byte *bitp; // pointer to the byte holding xbits
- byte shift; // bits xbits needs to be shifted to get bits
- byte xbits; // byte holding all the bits from *bitp
- byte bits; // mark and boundary bits relevant to corresponding slot.
- byte tbits; // pointer||scalar bits relevant to corresponding slot.
-};
-
-extern byte runtime·data[];
-extern byte runtime·edata[];
-extern byte runtime·bss[];
-extern byte runtime·ebss[];
-
-extern byte runtime·gcdata[];
-extern byte runtime·gcbss[];
-
-Mutex runtime·finlock; // protects the following variables
-G* runtime·fing; // goroutine that runs finalizers
-FinBlock* runtime·finq; // list of finalizers that are to be executed
-FinBlock* runtime·finc; // cache of free blocks
-static byte finptrmask[FinBlockSize/PtrSize/PointersPerByte];
-bool runtime·fingwait;
-bool runtime·fingwake;
-FinBlock *runtime·allfin; // list of all blocks
-
-BitVector runtime·gcdatamask;
-BitVector runtime·gcbssmask;
-
-Mutex runtime·gclock;
-
-static Workbuf* getpartialorempty(void);
-static void putpartial(Workbuf*);
-static Workbuf* getempty(Workbuf*);
-static Workbuf* getfull(Workbuf*);
-static void putempty(Workbuf*);
-static void putfull(Workbuf*);
-static Workbuf* handoff(Workbuf*);
-static void gchelperstart(void);
-static void flushallmcaches(void);
-static bool scanframe(Stkframe*, void*);
-static void scanstack(G*);
-static BitVector unrollglobgcprog(byte*, uintptr);
-static void scanblock(byte*, uintptr, byte*);
-static byte* objectstart(byte*, Markbits*);
-static Workbuf* greyobject(byte*, Markbits*, Workbuf*);
-static bool inheap(byte*);
-static bool shaded(byte*);
-static void shade(byte*);
-static void slottombits(byte*, Markbits*);
-static void atomicxor8(byte*, byte);
-static bool ischeckmarked(Markbits*);
-static bool ismarked(Markbits*);
-static void clearcheckmarkbits(void);
-static void clearcheckmarkbitsspan(MSpan*);
-
-void runtime·bgsweep(void);
-void runtime·finishsweep_m(void);
-static FuncVal bgsweepv = {runtime·bgsweep};
-
-typedef struct WorkData WorkData;
-struct WorkData {
- uint64 full; // lock-free list of full blocks
- uint64 empty; // lock-free list of empty blocks
- uint64 partial; // lock-free list of partially filled blocks
- byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
- uint32 nproc;
- int64 tstart;
- volatile uint32 nwait;
- volatile uint32 ndone;
- Note alldone;
- ParFor* markfor;
-
- // Copy of mheap.allspans for marker or sweeper.
- MSpan** spans;
- uint32 nspan;
-};
-WorkData runtime·work;
-
-// To help debug the concurrent GC we remark with the world
-// stopped ensuring that any object encountered has their normal
-// mark bit set. To do this we use an orthogonal bit
-// pattern to indicate the object is marked. The following pattern
-// uses the upper two bits in the object's bounday nibble.
-// 01: scalar not marked
-// 10: pointer not marked
-// 11: pointer marked
-// 00: scalar marked
-// Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
-// The higher bit is 1 for pointers and 0 for scalars, whether the object
-// is marked or not.
-// The first nibble no longer holds the bitsDead pattern indicating that the
-// there are no more pointers in the object. This information is held
-// in the second nibble.
-
-// When marking an object if the bool checkmark is true one uses the above
-// encoding, otherwise one uses the bitMarked bit in the lower two bits
-// of the nibble.
-static bool checkmark = false;
-static bool gccheckmarkenable = true;
-
-// Is address b in the known heap. If it doesn't have a valid gcmap
-// returns false. For example pointers into stacks will return false.
-static bool
-inheap(byte *b)
-{
- MSpan *s;
- pageID k;
- uintptr x;
-
- if(b == nil || b < runtime·mheap.arena_start || b >= runtime·mheap.arena_used)
- return false;
- // Not a beginning of a block, consult span table to find the block beginning.
- k = (uintptr)b>>PageShift;
- x = k;
- x -= (uintptr)runtime·mheap.arena_start>>PageShift;
- s = runtime·mheap.spans[x];
- if(s == nil || k < s->start || b >= s->limit || s->state != MSpanInUse)
- return false;
- return true;
-}
-
-// Given an address in the heap return the relevant byte from the gcmap. This routine
-// can be used on addresses to the start of an object or to the interior of the an object.
-static void
-slottombits(byte *obj, Markbits *mbits)
-{
- uintptr off;
-
- off = (uintptr*)((uintptr)obj&~(PtrSize-1)) - (uintptr*)runtime·mheap.arena_start;
- mbits->bitp = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
- mbits->shift = (off % wordsPerBitmapByte) * gcBits;
- mbits->xbits = *mbits->bitp;
- mbits->bits = (mbits->xbits >> mbits->shift) & bitMask;
- mbits->tbits = ((mbits->xbits >> mbits->shift) & bitPtrMask) >> 2;
-}
-
-// b is a pointer into the heap.
-// Find the start of the object refered to by b.
-// Set mbits to the associated bits from the bit map.
-// If b is not a valid heap object return nil and
-// undefined values in mbits.
-static byte*
-objectstart(byte *b, Markbits *mbits)
-{
- byte *obj, *p;
- MSpan *s;
- pageID k;
- uintptr x, size, idx;
-
- obj = (byte*)((uintptr)b&~(PtrSize-1));
- for(;;) {
- slottombits(obj, mbits);
- if((mbits->bits&bitBoundary) == bitBoundary)
- break;
-
- // Not a beginning of a block, consult span table to find the block beginning.
- k = (uintptr)obj>>PageShift;
- x = k;
- x -= (uintptr)runtime·mheap.arena_start>>PageShift;
- s = runtime·mheap.spans[x];
- if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse){
- if(s != nil && s->state == MSpanStack) {
- return nil; // This is legit.
- }
-
- // The following ensures that we are rigorous about what data
- // structures hold valid pointers
- if(0) {
- // Still happens sometimes. We don't know why.
- runtime·printf("runtime:objectstart Span weird: obj=%p, k=%p", obj, k);
- if (s == nil)
- runtime·printf(" s=nil\n");
- else
- runtime·printf(" s->start=%p s->limit=%p, s->state=%d\n", s->start*PageSize, s->limit, s->state);
- runtime·throw("objectstart: bad pointer in unexpected span");
- }
- return nil;
- }
- p = (byte*)((uintptr)s->start<<PageShift);
- if(s->sizeclass != 0) {
- size = s->elemsize;
- idx = ((byte*)obj - p)/size;
- p = p+idx*size;
- }
- if(p == obj) {
- runtime·printf("runtime: failed to find block beginning for %p s=%p s->limit=%p\n",
- p, s->start*PageSize, s->limit);
- runtime·throw("failed to find block beginning");
- }
- obj = p;
- }
- // if size(obj.firstfield) < PtrSize, the &obj.secondfield could map to the boundary bit
- // Clear any low bits to get to the start of the object.
- // greyobject depends on this.
- return obj;
-}
-
-// Slow for now as we serialize this, since this is on a debug path
-// speed is not critical at this point.
-static Mutex andlock;
-static void
-atomicand8(byte *src, byte val)
-{
- runtime·lock(&andlock);
- *src = *src&val;
- runtime·unlock(&andlock);
-}
-
-// Mark using the checkmark scheme.
-void
-docheckmark(Markbits *mbits)
-{
- // xor 01 moves 01(scalar unmarked) to 00(scalar marked)
- // and 10(pointer unmarked) to 11(pointer marked)
- if(mbits->tbits == BitsScalar)
- atomicand8(mbits->bitp, ~(byte)(BitsCheckMarkXor<<mbits->shift<<2));
- else if(mbits->tbits == BitsPointer)
- runtime·atomicor8(mbits->bitp, BitsCheckMarkXor<<mbits->shift<<2);
-
- // reload bits for ischeckmarked
- mbits->xbits = *mbits->bitp;
- mbits->bits = (mbits->xbits >> mbits->shift) & bitMask;
- mbits->tbits = ((mbits->xbits >> mbits->shift) & bitPtrMask) >> 2;
-
- return;
-}
-
-// In the default scheme does mbits refer to a marked object.
-static bool
-ismarked(Markbits *mbits)
-{
- if((mbits->bits&bitBoundary) != bitBoundary)
- runtime·throw("ismarked: bits should have boundary bit set");
- return (mbits->bits&bitMarked) == bitMarked;
-}
-
-// In the checkmark scheme does mbits refer to a marked object.
-static bool
-ischeckmarked(Markbits *mbits)
-{
- if((mbits->bits&bitBoundary) != bitBoundary)
- runtime·printf("runtime:ischeckmarked: bits should have boundary bit set\n");
- return mbits->tbits==BitsScalarMarked || mbits->tbits==BitsPointerMarked;
-}
-
-// When in GCmarkterminate phase we allocate black.
-void
-runtime·gcmarknewobject_m(void)
-{
- Markbits mbits;
- byte *obj;
-
- if(runtime·gcphase != GCmarktermination)
- runtime·throw("marking new object while not in mark termination phase");
- if(checkmark) // The world should be stopped so this should not happen.
- runtime·throw("gcmarknewobject called while doing checkmark");
-
- obj = g->m->ptrarg[0];
- slottombits((byte*)((uintptr)obj & (PtrSize-1)), &mbits);
-
- if((mbits.bits&bitMarked) != 0)
- return;
-
- // Each byte of GC bitmap holds info for two words.
- // If the current object is larger than two words, or if the object is one word
- // but the object it shares the byte with is already marked,
- // then all the possible concurrent updates are trying to set the same bit,
- // so we can use a non-atomic update.
- if((mbits.xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) || runtime·work.nproc == 1)
- *mbits.bitp = mbits.xbits | (bitMarked<<mbits.shift);
- else
- runtime·atomicor8(mbits.bitp, bitMarked<<mbits.shift);
- return;
-}
-
-// obj is the start of an object with mark mbits.
-// If it isn't already marked, mark it and enqueue into workbuf.
-// Return possibly new workbuf to use.
-static Workbuf*
-greyobject(byte *obj, Markbits *mbits, Workbuf *wbuf)
-{
- // obj should be start of allocation, and so must be at least pointer-aligned.
- if(((uintptr)obj & (PtrSize-1)) != 0)
- runtime·throw("greyobject: obj not pointer-aligned");
-
- if(checkmark) {
- if(!ismarked(mbits)) {
- MSpan *s;
- pageID k;
- uintptr x, i;
-
- runtime·printf("runtime:greyobject: checkmarks finds unexpected unmarked object obj=%p, mbits->bits=%x, *mbits->bitp=%x\n", obj, mbits->bits, *mbits->bitp);
-
- k = (uintptr)obj>>PageShift;
- x = k;
- x -= (uintptr)runtime·mheap.arena_start>>PageShift;
- s = runtime·mheap.spans[x];
- runtime·printf("runtime:greyobject Span: obj=%p, k=%p", obj, k);
- if (s == nil) {
- runtime·printf(" s=nil\n");
- } else {
- runtime·printf(" s->start=%p s->limit=%p, s->state=%d, s->sizeclass=%d, s->elemsize=%D \n", s->start*PageSize, s->limit, s->state, s->sizeclass, s->elemsize);
- for(i=0; i<s->sizeclass; i++) {
- runtime·printf(" ((uintptr*)obj)[%D]=%p\n", i, ((uintptr*)obj)[i]);
- }
- }
- runtime·throw("checkmark found unmarked object");
- }
- if(ischeckmarked(mbits))
- return wbuf;
- docheckmark(mbits);
- if(!ischeckmarked(mbits)) {
- runtime·printf("mbits xbits=%x bits=%x tbits=%x shift=%d\n", mbits->xbits, mbits->bits, mbits->tbits, mbits->shift);
- runtime·throw("docheckmark and ischeckmarked disagree");
- }
- } else {
- // If marked we have nothing to do.
- if((mbits->bits&bitMarked) != 0)
- return wbuf;
-
- // Each byte of GC bitmap holds info for two words.
- // If the current object is larger than two words, or if the object is one word
- // but the object it shares the byte with is already marked,
- // then all the possible concurrent updates are trying to set the same bit,
- // so we can use a non-atomic update.
- if((mbits->xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) || runtime·work.nproc == 1)
- *mbits->bitp = mbits->xbits | (bitMarked<<mbits->shift);
- else
- runtime·atomicor8(mbits->bitp, bitMarked<<mbits->shift);
- }
-
- if (!checkmark && (((mbits->xbits>>(mbits->shift+2))&BitsMask) == BitsDead))
- return wbuf; // noscan object
-
- // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
- // seems like a nice optimization that can be added back in.
- // There needs to be time between the PREFETCH and the use.
- // Previously we put the obj in an 8 element buffer that is drained at a rate
- // to give the PREFETCH time to do its work.
- // Use of PREFETCHNTA might be more appropriate than PREFETCH
-
- // If workbuf is full, obtain an empty one.
- if(wbuf->nobj >= nelem(wbuf->obj)) {
- wbuf = getempty(wbuf);
- }
-
- wbuf->obj[wbuf->nobj] = obj;
- wbuf->nobj++;
- return wbuf;
-}
-
-// Scan the object b of size n, adding pointers to wbuf.
-// Return possibly new wbuf to use.
-// If ptrmask != nil, it specifies where pointers are in b.
-// If ptrmask == nil, the GC bitmap should be consulted.
-// In this case, n may be an overestimate of the size; the GC bitmap
-// must also be used to make sure the scan stops at the end of b.
-static Workbuf*
-scanobject(byte *b, uintptr n, byte *ptrmask, Workbuf *wbuf)
-{
- byte *obj, *arena_start, *arena_used, *ptrbitp;
- uintptr i, j;
- int32 bits;
- Markbits mbits;
-
- arena_start = (byte*)runtime·mheap.arena_start;
- arena_used = runtime·mheap.arena_used;
- ptrbitp = nil;
-
- // Find bits of the beginning of the object.
- if(ptrmask == nil) {
- b = objectstart(b, &mbits);
- if(b == nil)
- return wbuf;
- ptrbitp = mbits.bitp; //arena_start - off/wordsPerBitmapByte - 1;
- }
- for(i = 0; i < n; i += PtrSize) {
- // Find bits for this word.
- if(ptrmask != nil) {
- // dense mask (stack or data)
- bits = (ptrmask[(i/PtrSize)/4]>>(((i/PtrSize)%4)*BitsPerPointer))&BitsMask;
- } else {
- // Check if we have reached end of span.
- // n is an overestimate of the size of the object.
- if((((uintptr)b+i)%PageSize) == 0 &&
- runtime·mheap.spans[(b-arena_start)>>PageShift] != runtime·mheap.spans[(b+i-arena_start)>>PageShift])
- break;
- // Consult GC bitmap.
- bits = *ptrbitp;
- if(wordsPerBitmapByte != 2)
- runtime·throw("alg doesn't work for wordsPerBitmapByte != 2");
- j = ((uintptr)b+i)/PtrSize & 1; // j indicates upper nibble or lower nibble
- bits >>= gcBits*j;
- if(i == 0)
- bits &= ~bitBoundary;
- ptrbitp -= j;
-
- if((bits&bitBoundary) != 0 && i != 0)
- break; // reached beginning of the next object
- bits = (bits&bitPtrMask)>>2; // bits refer to the type bits.
-
- if(i != 0 && bits == BitsDead) // BitsDead in first nibble not valid during checkmark
- break; // reached no-scan part of the object
- }
-
- if(bits <= BitsScalar) // Bits Scalar ||
- // BitsDead || // default encoding
- // BitsScalarMarked // checkmark encoding
- continue;
-
- if((bits&BitsPointer) != BitsPointer) {
- runtime·printf("gc checkmark=%d, b=%p ptrmask=%p, mbits.bitp=%p, mbits.xbits=%x, bits=%x\n", checkmark, b, ptrmask, mbits.bitp, mbits.xbits, bits);
- runtime·throw("unexpected garbage collection bits");
- }
-
- obj = *(byte**)(b+i);
- // At this point we have extracted the next potential pointer.
- // Check if it points into heap.
- if(obj == nil || obj < arena_start || obj >= arena_used)
- continue;
- // Mark the object. return some important bits.
- // We we combine the following two rotines we don't have to pass mbits or obj around.
- obj = objectstart(obj, &mbits);
- // In the case of the span being MSpan_Stack mbits is useless and will not have
- // the boundary bit set. It does not need to be greyed since it will be
- // scanned using the scan stack mechanism.
- if(obj == nil)
- continue;
- wbuf = greyobject(obj, &mbits, wbuf);
- }
- return wbuf;
-}
-
-// scanblock starts by scanning b as scanobject would.
-// If the gcphase is GCscan, that's all scanblock does.
-// Otherwise it traverses some fraction of the pointers it found in b, recursively.
-// As a special case, scanblock(nil, 0, nil) means to scan previously queued work,
-// stopping only when no work is left in the system.
-static void
-scanblock(byte *b, uintptr n, byte *ptrmask)
-{
- Workbuf *wbuf;
- bool keepworking;
-
- wbuf = getpartialorempty();
- if(b != nil) {
- wbuf = scanobject(b, n, ptrmask, wbuf);
- if(runtime·gcphase == GCscan) {
- if(inheap(b) && !ptrmask)
- // b is in heap, we are in GCscan so there should be a ptrmask.
- runtime·throw("scanblock: In GCscan phase and inheap is true.");
- // GCscan only goes one level deep since mark wb not turned on.
- putpartial(wbuf);
- return;
- }
- }
- if(runtime·gcphase == GCscan) {
- runtime·throw("scanblock: In GCscan phase but no b passed in.");
- }
-
- keepworking = b == nil;
-
- // ptrmask can have 2 possible values:
- // 1. nil - obtain pointer mask from GC bitmap.
- // 2. pointer to a compact mask (for stacks and data).
- for(;;) {
- if(wbuf->nobj == 0) {
- if(!keepworking) {
- putempty(wbuf);
- return;
- }
- // Refill workbuf from global queue.
- wbuf = getfull(wbuf);
- if(wbuf == nil) // nil means out of work barrier reached
- return;
-
- if(wbuf->nobj<=0) {
- runtime·throw("runtime:scanblock getfull returns empty buffer");
- }
-
- }
-
- // If another proc wants a pointer, give it some.
- if(runtime·work.nwait > 0 && wbuf->nobj > 4 && runtime·work.full == 0) {
- wbuf = handoff(wbuf);
- }
-
- // This might be a good place to add prefetch code...
- // if(wbuf->nobj > 4) {
- // PREFETCH(wbuf->obj[wbuf->nobj - 3];
- // }
- --wbuf->nobj;
- b = wbuf->obj[wbuf->nobj];
- wbuf = scanobject(b, runtime·mheap.arena_used - b, nil, wbuf);
- }
-}
-
-static void
-markroot(ParFor *desc, uint32 i)
-{
- FinBlock *fb;
- MSpan *s;
- uint32 spanidx, sg;
- G *gp;
- void *p;
- uint32 status;
- bool restart;
-
- USED(&desc);
- // Note: if you add a case here, please also update heapdump.c:dumproots.
- switch(i) {
- case RootData:
- scanblock(runtime·data, runtime·edata - runtime·data, runtime·gcdatamask.bytedata);
- break;
-
- case RootBss:
- scanblock(runtime·bss, runtime·ebss - runtime·bss, runtime·gcbssmask.bytedata);
- break;
-
- case RootFinalizers:
- for(fb=runtime·allfin; fb; fb=fb->alllink)
- scanblock((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), finptrmask);
- break;
-
- case RootSpans:
- // mark MSpan.specials
- sg = runtime·mheap.sweepgen;
- for(spanidx=0; spanidx<runtime·work.nspan; spanidx++) {
- Special *sp;
- SpecialFinalizer *spf;
-
- s = runtime·work.spans[spanidx];
- if(s->state != MSpanInUse)
- continue;
- if(!checkmark && s->sweepgen != sg) {
- // sweepgen was updated (+2) during non-checkmark GC pass
- runtime·printf("sweep %d %d\n", s->sweepgen, sg);
- runtime·throw("gc: unswept span");
- }
- for(sp = s->specials; sp != nil; sp = sp->next) {
- if(sp->kind != KindSpecialFinalizer)
- continue;
- // don't mark finalized object, but scan it so we
- // retain everything it points to.
- spf = (SpecialFinalizer*)sp;
- // A finalizer can be set for an inner byte of an object, find object beginning.
- p = (void*)((s->start << PageShift) + spf->special.offset/s->elemsize*s->elemsize);
- if(runtime·gcphase != GCscan)
- scanblock(p, s->elemsize, nil); // Scanned during mark phase
- scanblock((void*)&spf->fn, PtrSize, oneptr);
- }
- }
- break;
-
- case RootFlushCaches:
- if (runtime·gcphase != GCscan) // Do not flush mcaches during GCscan phase.
- flushallmcaches();
- break;
-
- default:
- // the rest is scanning goroutine stacks
- if(i - RootCount >= runtime·allglen)
- runtime·throw("markroot: bad index");
- gp = runtime·allg[i - RootCount];
- // remember when we've first observed the G blocked
- // needed only to output in traceback
- status = runtime·readgstatus(gp); // We are not in a scan state
- if((status == Gwaiting || status == Gsyscall) && gp->waitsince == 0)
- gp->waitsince = runtime·work.tstart;
- // Shrink a stack if not much of it is being used but not in the scan phase.
- if (runtime·gcphase != GCscan) // Do not shrink during GCscan phase.
- runtime·shrinkstack(gp);
- if(runtime·readgstatus(gp) == Gdead)
- gp->gcworkdone = true;
- else
- gp->gcworkdone = false;
- restart = runtime·stopg(gp);
-
- // goroutine will scan its own stack when it stops running.
- // Wait until it has.
- while(runtime·readgstatus(gp) == Grunning && !gp->gcworkdone) {
- }
-
- // scanstack(gp) is done as part of gcphasework
- // But to make sure we finished we need to make sure that
- // the stack traps have all responded so drop into
- // this while loop until they respond.
- while(!gp->gcworkdone){
- status = runtime·readgstatus(gp);
- if(status == Gdead) {
- gp->gcworkdone = true; // scan is a noop
- break;
- //do nothing, scan not needed.
- }
- if(status == Gwaiting || status == Grunnable)
- restart = runtime·stopg(gp);
- }
- if(restart)
- runtime·restartg(gp);
- break;
- }
-}
-
-// Get an empty work buffer off the work.empty list,
-// allocating new buffers as needed.
-static Workbuf*
-getempty(Workbuf *b)
-{
- if(b != nil) {
- putfull(b);
- b = nil;
- }
- if(runtime·work.empty)
- b = (Workbuf*)runtime·lfstackpop(&runtime·work.empty);
-
- if(b && b->nobj != 0) {
- runtime·printf("m%d: getempty: popped b=%p with non-zero b->nobj=%d\n", g->m->id, b, (uint32)b->nobj);
- runtime·throw("getempty: workbuffer not empty, b->nobj not 0");
- }
- if(b == nil) {
- b = runtime·persistentalloc(sizeof(*b), CacheLineSize, &mstats.gc_sys);
- b->nobj = 0;
- }
- return b;
-}
-
-static void
-putempty(Workbuf *b)
-{
- if(b->nobj != 0) {
- runtime·throw("putempty: b->nobj not 0\n");
- }
- runtime·lfstackpush(&runtime·work.empty, &b->node);
-}
-
-// Put a full or partially full workbuf on the full list.
-static void
-putfull(Workbuf *b)
-{
- if(b->nobj <= 0) {
- runtime·throw("putfull: b->nobj <= 0\n");
- }
- runtime·lfstackpush(&runtime·work.full, &b->node);
-}
-
-// Get an partially empty work buffer
-// if none are available get an empty one.
-static Workbuf*
-getpartialorempty(void)
-{
- Workbuf *b;
-
- b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
- if(b == nil)
- b = getempty(nil);
- return b;
-}
-
-static void
-putpartial(Workbuf *b)
-{
-
- if(b->nobj == 0)
- runtime·lfstackpush(&runtime·work.empty, &b->node);
- else if (b->nobj < nelem(b->obj))
- runtime·lfstackpush(&runtime·work.partial, &b->node);
- else if (b->nobj == nelem(b->obj))
- runtime·lfstackpush(&runtime·work.full, &b->node);
- else {
- runtime·printf("b=%p, b->nobj=%d, nelem(b->obj)=%d\n", b, (uint32)b->nobj, (uint32)nelem(b->obj));
- runtime·throw("putpartial: bad Workbuf b->nobj");
- }
-}
-
-// Get a full work buffer off the work.full or a partially
-// filled one off the work.partial list. If nothing is available
-// wait until all the other gc helpers have finished and then
-// return nil.
-// getfull acts as a barrier for work.nproc helpers. As long as one
-// gchelper is actively marking objects it
-// may create a workbuffer that the other helpers can work on.
-// The for loop either exits when a work buffer is found
-// or when _all_ of the work.nproc GC helpers are in the loop
-// looking for work and thus not capable of creating new work.
-// This is in fact the termination condition for the STW mark
-// phase.
-static Workbuf*
-getfull(Workbuf *b)
-{
- int32 i;
-
- if(b != nil)
- putempty(b);
-
- b = (Workbuf*)runtime·lfstackpop(&runtime·work.full);
- if(b==nil)
- b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
- if(b != nil || runtime·work.nproc == 1)
- return b;
-
- runtime·xadd(&runtime·work.nwait, +1);
- for(i=0;; i++) {
- if(runtime·work.full != 0) {
- runtime·xadd(&runtime·work.nwait, -1);
- b = (Workbuf*)runtime·lfstackpop(&runtime·work.full);
- if(b==nil)
- b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
- if(b != nil)
- return b;
- runtime·xadd(&runtime·work.nwait, +1);
- }
- if(runtime·work.nwait == runtime·work.nproc)
- return nil;
- if(i < 10) {
- g->m->gcstats.nprocyield++;
- runtime·procyield(20);
- } else if(i < 20) {
- g->m->gcstats.nosyield++;
- runtime·osyield();
- } else {
- g->m->gcstats.nsleep++;
- runtime·usleep(100);
- }
- }
-}
-
-static Workbuf*
-handoff(Workbuf *b)
-{
- int32 n;
- Workbuf *b1;
-
- // Make new buffer with half of b's pointers.
- b1 = getempty(nil);
- n = b->nobj/2;
- b->nobj -= n;
- b1->nobj = n;
- runtime·memmove(b1->obj, b->obj+b->nobj, n*sizeof b1->obj[0]);
- g->m->gcstats.nhandoff++;
- g->m->gcstats.nhandoffcnt += n;
-
- // Put b on full list - let first half of b get stolen.
- runtime·lfstackpush(&runtime·work.full, &b->node);
- return b1;
-}
-
-BitVector
-runtime·stackmapdata(StackMap *stackmap, int32 n)
-{
- if(n < 0 || n >= stackmap->n)
- runtime·throw("stackmapdata: index out of range");
- return (BitVector){stackmap->nbit, stackmap->bytedata + n*((stackmap->nbit+31)/32*4)};
-}
-
-// Scan a stack frame: local variables and function arguments/results.
-static bool
-scanframe(Stkframe *frame, void *unused)
-{
- Func *f;
- StackMap *stackmap;
- BitVector bv;
- uintptr size, minsize;
- uintptr targetpc;
- int32 pcdata;
-
- USED(unused);
- f = frame->fn;
- targetpc = frame->continpc;
- if(targetpc == 0) {
- // Frame is dead.
- return true;
- }
- if(Debug > 1)
- runtime·printf("scanframe %s\n", runtime·funcname(f));
- if(targetpc != f->entry)
- targetpc--;
- pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, targetpc);
- if(pcdata == -1) {
- // We do not have a valid pcdata value but there might be a
- // stackmap for this function. It is likely that we are looking
- // at the function prologue, assume so and hope for the best.
- pcdata = 0;
- }
-
- // Scan local variables if stack frame has been allocated.
- size = frame->varp - frame->sp;
- if(thechar != '6' && thechar != '8')
- minsize = sizeof(uintptr);
- else
- minsize = 0;
- if(size > minsize) {
- stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
- if(stackmap == nil || stackmap->n <= 0) {
- runtime·printf("runtime: frame %s untyped locals %p+%p\n", runtime·funcname(f), (byte*)(frame->varp-size), size);
- runtime·throw("missing stackmap");
- }
-
- // Locals bitmap information, scan just the pointers in locals.
- if(pcdata < 0 || pcdata >= stackmap->n) {
- // don't know where we are
- runtime·printf("runtime: pcdata is %d and %d locals stack map entries for %s (targetpc=%p)\n",
- pcdata, stackmap->n, runtime·funcname(f), targetpc);
- runtime·throw("scanframe: bad symbol table");
- }
- bv = runtime·stackmapdata(stackmap, pcdata);
- size = (bv.n * PtrSize) / BitsPerPointer;
- scanblock((byte*)(frame->varp - size), bv.n/BitsPerPointer*PtrSize, bv.bytedata);
- }
-
- // Scan arguments.
- if(frame->arglen > 0) {
- if(frame->argmap != nil)
- bv = *frame->argmap;
- else {
- stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
- if(stackmap == nil || stackmap->n <= 0) {
- runtime·printf("runtime: frame %s untyped args %p+%p\n", runtime·funcname(f), frame->argp, (uintptr)frame->arglen);
- runtime·throw("missing stackmap");
- }
- if(pcdata < 0 || pcdata >= stackmap->n) {
- // don't know where we are
- runtime·printf("runtime: pcdata is %d and %d args stack map entries for %s (targetpc=%p)\n",
- pcdata, stackmap->n, runtime·funcname(f), targetpc);
- runtime·throw("scanframe: bad symbol table");
- }
- bv = runtime·stackmapdata(stackmap, pcdata);
- }
- scanblock((byte*)frame->argp, bv.n/BitsPerPointer*PtrSize, bv.bytedata);
- }
- return true;
-}
-
-static void
-scanstack(G *gp)
-{
- M *mp;
- bool (*fn)(Stkframe*, void*);
-
- if(runtime·readgstatus(gp)&Gscan == 0) {
- runtime·printf("runtime: gp=%p, goid=%D, gp->atomicstatus=%d\n", gp, gp->goid, runtime·readgstatus(gp));
- runtime·throw("mark - bad status");
- }
-
- switch(runtime·readgstatus(gp)&~Gscan) {
- default:
- runtime·printf("runtime: gp=%p, goid=%D, gp->atomicstatus=%d\n", gp, gp->goid, runtime·readgstatus(gp));
- runtime·throw("mark - bad status");
- case Gdead:
- return;
- case Grunning:
- runtime·throw("scanstack: - goroutine not stopped");
- case Grunnable:
- case Gsyscall:
- case Gwaiting:
- break;
- }
-
- if(gp == g)
- runtime·throw("can't scan our own stack");
- if((mp = gp->m) != nil && mp->helpgc)
- runtime·throw("can't scan gchelper stack");
-
- fn = scanframe;
- runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, &fn, nil, 0);
- runtime·tracebackdefers(gp, &fn, nil);
-}
-
-// If the slot is grey or black return true, if white return false.
-// If the slot is not in the known heap and thus does not have a valid GC bitmap then
-// it is considered grey. Globals and stacks can hold such slots.
-// The slot is grey if its mark bit is set and it is enqueued to be scanned.
-// The slot is black if it has already been scanned.
-// It is white if it has a valid mark bit and the bit is not set.
-static bool
-shaded(byte *slot)
-{
- Markbits mbits;
- byte *valid;
-
- if(!inheap(slot)) // non-heap slots considered grey
- return true;
-
- valid = objectstart(slot, &mbits);
- if(valid == nil)
- return true;
-
- if(checkmark)
- return ischeckmarked(&mbits);
-
- return (mbits.bits&bitMarked) != 0;
-}
-
-// Shade the object if it isn't already.
-// The object is not nil and known to be in the heap.
-static void
-shade(byte *b)
-{
- byte *obj;
- Workbuf *wbuf;
- Markbits mbits;
-
- if(!inheap(b))
- runtime·throw("shade: passed an address not in the heap");
-
- wbuf = getpartialorempty();
- // Mark the object, return some important bits.
- // If we combine the following two rotines we don't have to pass mbits or obj around.
- obj = objectstart(b, &mbits);
- if(obj != nil)
- wbuf = greyobject(obj, &mbits, wbuf); // augments the wbuf
-
- putpartial(wbuf);
- return;
-}
-
-// This is the Dijkstra barrier coarsened to always shade the ptr (dst) object.
-// The original Dijkstra barrier only shaded ptrs being placed in black slots.
-//
-// Shade indicates that it has seen a white pointer by adding the referent
-// to wbuf as well as marking it.
-//
-// slot is the destination (dst) in go code
-// ptr is the value that goes into the slot (src) in the go code
-//
-// Dijkstra pointed out that maintaining the no black to white
-// pointers means that white to white pointers not need
-// to be noted by the write barrier. Furthermore if either
-// white object dies before it is reached by the
-// GC then the object can be collected during this GC cycle
-// instead of waiting for the next cycle. Unfortunately the cost of
-// ensure that the object holding the slot doesn't concurrently
-// change to black without the mutator noticing seems prohibitive.
-//
-// Consider the following example where the mutator writes into
-// a slot and then loads the slot's mark bit while the GC thread
-// writes to the slot's mark bit and then as part of scanning reads
-// the slot.
-//
-// Initially both [slot] and [slotmark] are 0 (nil)
-// Mutator thread GC thread
-// st [slot], ptr st [slotmark], 1
-//
-// ld r1, [slotmark] ld r2, [slot]
-//
-// This is a classic example of independent reads of independent writes,
-// aka IRIW. The question is if r1==r2==0 is allowed and for most HW the
-// answer is yes without inserting a memory barriers between the st and the ld.
-// These barriers are expensive so we have decided that we will
-// always grey the ptr object regardless of the slot's color.
-//
-void
-runtime·gcmarkwb_m()
-{
- byte *ptr;
- ptr = (byte*)g->m->scalararg[1];
-
- switch(runtime·gcphase) {
- default:
- runtime·throw("gcphasework in bad gcphase");
- case GCoff:
- case GCquiesce:
- case GCstw:
- case GCsweep:
- case GCscan:
- break;
- case GCmark:
- if(ptr != nil && inheap(ptr))
- shade(ptr);
- break;
- case GCmarktermination:
- if(ptr != nil && inheap(ptr))
- shade(ptr);
- break;
- }
-}
-
-// The gp has been moved to a GC safepoint. GC phase specific
-// work is done here.
-void
-runtime·gcphasework(G *gp)
-{
- switch(runtime·gcphase) {
- default:
- runtime·throw("gcphasework in bad gcphase");
- case GCoff:
- case GCquiesce:
- case GCstw:
- case GCsweep:
- // No work.
- break;
- case GCscan:
- // scan the stack, mark the objects, put pointers in work buffers
- // hanging off the P where this is being run.
- scanstack(gp);
- break;
- case GCmark:
- break;
- case GCmarktermination:
- scanstack(gp);
- // All available mark work will be emptied before returning.
- break;
- }
- gp->gcworkdone = true;
-}
-
-#pragma dataflag NOPTR
-static byte finalizer1[] = {
- // Each Finalizer is 5 words, ptr ptr uintptr ptr ptr.
- // Each byte describes 4 words.
- // Need 4 Finalizers described by 5 bytes before pattern repeats:
- // ptr ptr uintptr ptr ptr
- // ptr ptr uintptr ptr ptr
- // ptr ptr uintptr ptr ptr
- // ptr ptr uintptr ptr ptr
- // aka
- // ptr ptr uintptr ptr
- // ptr ptr ptr uintptr
- // ptr ptr ptr ptr
- // uintptr ptr ptr ptr
- // ptr uintptr ptr ptr
- // Assumptions about Finalizer layout checked below.
- BitsPointer | BitsPointer<<2 | BitsScalar<<4 | BitsPointer<<6,
- BitsPointer | BitsPointer<<2 | BitsPointer<<4 | BitsScalar<<6,
- BitsPointer | BitsPointer<<2 | BitsPointer<<4 | BitsPointer<<6,
- BitsScalar | BitsPointer<<2 | BitsPointer<<4 | BitsPointer<<6,
- BitsPointer | BitsScalar<<2 | BitsPointer<<4 | BitsPointer<<6,
-};
-
-void
-runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType *ot)
-{
- FinBlock *block;
- Finalizer *f;
- int32 i;
-
- runtime·lock(&runtime·finlock);
- if(runtime·finq == nil || runtime·finq->cnt == runtime·finq->cap) {
- if(runtime·finc == nil) {
- runtime·finc = runtime·persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
- runtime·finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
- runtime·finc->alllink = runtime·allfin;
- runtime·allfin = runtime·finc;
- if(finptrmask[0] == 0) {
- // Build pointer mask for Finalizer array in block.
- // Check assumptions made in finalizer1 array above.
- if(sizeof(Finalizer) != 5*PtrSize ||
- offsetof(Finalizer, fn) != 0 ||
- offsetof(Finalizer, arg) != PtrSize ||
- offsetof(Finalizer, nret) != 2*PtrSize ||
- offsetof(Finalizer, fint) != 3*PtrSize ||
- offsetof(Finalizer, ot) != 4*PtrSize ||
- BitsPerPointer != 2) {
- runtime·throw("finalizer out of sync");
- }
- for(i=0; i<nelem(finptrmask); i++)
- finptrmask[i] = finalizer1[i%nelem(finalizer1)];
- }
- }
- block = runtime·finc;
- runtime·finc = block->next;
- block->next = runtime·finq;
- runtime·finq = block;
- }
- f = &runtime·finq->fin[runtime·finq->cnt];
- runtime·finq->cnt++;
- f->fn = fn;
- f->nret = nret;
- f->fint = fint;
- f->ot = ot;
- f->arg = p;
- runtime·fingwake = true;
- runtime·unlock(&runtime·finlock);
-}
-
-void
-runtime·iterate_finq(void (*callback)(FuncVal*, byte*, uintptr, Type*, PtrType*))
-{
- FinBlock *fb;
- Finalizer *f;
- uintptr i;
-
- for(fb = runtime·allfin; fb; fb = fb->alllink) {
- for(i = 0; i < fb->cnt; i++) {
- f = &fb->fin[i];
- callback(f->fn, f->arg, f->nret, f->fint, f->ot);
- }
- }
-}
-
-// Returns only when span s has been swept.
-void
-runtime·MSpan_EnsureSwept(MSpan *s)
-{
- uint32 sg;
-
- // Caller must disable preemption.
- // Otherwise when this function returns the span can become unswept again
- // (if GC is triggered on another goroutine).
- if(g->m->locks == 0 && g->m->mallocing == 0 && g != g->m->g0)
- runtime·throw("MSpan_EnsureSwept: m is not locked");
-
- sg = runtime·mheap.sweepgen;
- if(runtime·atomicload(&s->sweepgen) == sg)
- return;
- // The caller must be sure that the span is a MSpanInUse span.
- if(runtime·cas(&s->sweepgen, sg-2, sg-1)) {
- runtime·MSpan_Sweep(s, false);
- return;
- }
- // unfortunate condition, and we don't have efficient means to wait
- while(runtime·atomicload(&s->sweepgen) != sg)
- runtime·osyield();
-}
-
-// Sweep frees or collects finalizers for blocks not marked in the mark phase.
-// It clears the mark bits in preparation for the next GC round.
-// Returns true if the span was returned to heap.
-// If preserve=true, don't return it to heap nor relink in MCentral lists;
-// caller takes care of it.
-bool
-runtime·MSpan_Sweep(MSpan *s, bool preserve)
-{
- int32 cl, n, npages, nfree;
- uintptr size, off, step;
- uint32 sweepgen;
- byte *p, *bitp, shift, xbits, bits;
- MCache *c;
- byte *arena_start;
- MLink head, *end, *link;
- Special *special, **specialp, *y;
- bool res, sweepgenset;
-
- if(checkmark)
- runtime·throw("MSpan_Sweep: checkmark only runs in STW and after the sweep.");
-
- // It's critical that we enter this function with preemption disabled,
- // GC must not start while we are in the middle of this function.
- if(g->m->locks == 0 && g->m->mallocing == 0 && g != g->m->g0)
- runtime·throw("MSpan_Sweep: m is not locked");
- sweepgen = runtime·mheap.sweepgen;
- if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
- runtime·printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
- s->state, s->sweepgen, sweepgen);
- runtime·throw("MSpan_Sweep: bad span state");
- }
- arena_start = runtime·mheap.arena_start;
- cl = s->sizeclass;
- size = s->elemsize;
- if(cl == 0) {
- n = 1;
- } else {
- // Chunk full of small blocks.
- npages = runtime·class_to_allocnpages[cl];
- n = (npages << PageShift) / size;
- }
- res = false;
- nfree = 0;
- end = &head;
- c = g->m->mcache;
- sweepgenset = false;
-
- // Mark any free objects in this span so we don't collect them.
- for(link = s->freelist; link != nil; link = link->next) {
- off = (uintptr*)link - (uintptr*)arena_start;
- bitp = arena_start - off/wordsPerBitmapByte - 1;
- shift = (off % wordsPerBitmapByte) * gcBits;
- *bitp |= bitMarked<<shift;
- }
-
- // Unlink & free special records for any objects we're about to free.
- specialp = &s->specials;
- special = *specialp;
- while(special != nil) {
- // A finalizer can be set for an inner byte of an object, find object beginning.
- p = (byte*)(s->start << PageShift) + special->offset/size*size;
- off = (uintptr*)p - (uintptr*)arena_start;
- bitp = arena_start - off/wordsPerBitmapByte - 1;
- shift = (off % wordsPerBitmapByte) * gcBits;
- bits = (*bitp>>shift) & bitMask;
- if((bits&bitMarked) == 0) {
- // Find the exact byte for which the special was setup
- // (as opposed to object beginning).
- p = (byte*)(s->start << PageShift) + special->offset;
- // about to free object: splice out special record
- y = special;
- special = special->next;
- *specialp = special;
- if(!runtime·freespecial(y, p, size, false)) {
- // stop freeing of object if it has a finalizer
- *bitp |= bitMarked << shift;
- }
- } else {
- // object is still live: keep special record
- specialp = &special->next;
- special = *specialp;
- }
- }
-
- // Sweep through n objects of given size starting at p.
- // This thread owns the span now, so it can manipulate
- // the block bitmap without atomic operations.
- p = (byte*)(s->start << PageShift);
- // Find bits for the beginning of the span.
- off = (uintptr*)p - (uintptr*)arena_start;
- bitp = arena_start - off/wordsPerBitmapByte - 1;
- shift = 0;
- step = size/(PtrSize*wordsPerBitmapByte);
- // Rewind to the previous quadruple as we move to the next
- // in the beginning of the loop.
- bitp += step;
- if(step == 0) {
- // 8-byte objects.
- bitp++;
- shift = gcBits;
- }
- for(; n > 0; n--, p += size) {
- bitp -= step;
- if(step == 0) {
- if(shift != 0)
- bitp--;
- shift = gcBits - shift;
- }
-
- xbits = *bitp;
- bits = (xbits>>shift) & bitMask;
-
- // Allocated and marked object, reset bits to allocated.
- if((bits&bitMarked) != 0) {
- *bitp &= ~(bitMarked<<shift);
- continue;
- }
- // At this point we know that we are looking at garbage object
- // that needs to be collected.
- if(runtime·debug.allocfreetrace)
- runtime·tracefree(p, size);
- // Reset to allocated+noscan.
- *bitp = (xbits & ~((bitMarked|(BitsMask<<2))<<shift)) | ((uintptr)BitsDead<<(shift+2));
- if(cl == 0) {
- // Free large span.
- if(preserve)
- runtime·throw("can't preserve large span");
- runtime·unmarkspan(p, s->npages<<PageShift);
- s->needzero = 1;
- // important to set sweepgen before returning it to heap
- runtime·atomicstore(&s->sweepgen, sweepgen);
- sweepgenset = true;
- // NOTE(rsc,dvyukov): The original implementation of efence
- // in CL 22060046 used SysFree instead of SysFault, so that
- // the operating system would eventually give the memory
- // back to us again, so that an efence program could run
- // longer without running out of memory. Unfortunately,
- // calling SysFree here without any kind of adjustment of the
- // heap data structures means that when the memory does
- // come back to us, we have the wrong metadata for it, either in
- // the MSpan structures or in the garbage collection bitmap.
- // Using SysFault here means that the program will run out of
- // memory fairly quickly in efence mode, but at least it won't
- // have mysterious crashes due to confused memory reuse.
- // It should be possible to switch back to SysFree if we also
- // implement and then call some kind of MHeap_DeleteSpan.
- if(runtime·debug.efence) {
- s->limit = nil; // prevent mlookup from finding this span
- runtime·SysFault(p, size);
- } else
- runtime·MHeap_Free(&runtime·mheap, s, 1);
- c->local_nlargefree++;
- c->local_largefree += size;
- runtime·xadd64(&mstats.next_gc, -(uint64)(size * (runtime·gcpercent + 100)/100));
- res = true;
- } else {
- // Free small object.
- if(size > 2*sizeof(uintptr))
- ((uintptr*)p)[1] = (uintptr)0xdeaddeaddeaddeadll; // mark as "needs to be zeroed"
- else if(size > sizeof(uintptr))
- ((uintptr*)p)[1] = 0;
-
- end->next = (MLink*)p;
- end = (MLink*)p;
- nfree++;
- }
- }
-
- // We need to set s->sweepgen = h->sweepgen only when all blocks are swept,
- // because of the potential for a concurrent free/SetFinalizer.
- // But we need to set it before we make the span available for allocation
- // (return it to heap or mcentral), because allocation code assumes that a
- // span is already swept if available for allocation.
-
- if(!sweepgenset && nfree == 0) {
- // The span must be in our exclusive ownership until we update sweepgen,
- // check for potential races.
- if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
- runtime·printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
- s->state, s->sweepgen, sweepgen);
- runtime·throw("MSpan_Sweep: bad span state after sweep");
- }
- runtime·atomicstore(&s->sweepgen, sweepgen);
- }
- if(nfree > 0) {
- c->local_nsmallfree[cl] += nfree;
- c->local_cachealloc -= nfree * size;
- runtime·xadd64(&mstats.next_gc, -(uint64)(nfree * size * (runtime·gcpercent + 100)/100));
- res = runtime·MCentral_FreeSpan(&runtime·mheap.central[cl].mcentral, s, nfree, head.next, end, preserve);
- // MCentral_FreeSpan updates sweepgen
- }
- return res;
-}
-
-// State of background runtime·sweep.
-// Protected by runtime·gclock.
-typedef struct SweepData SweepData;
-struct SweepData
-{
- G* g;
- bool parked;
-
- uint32 spanidx; // background sweeper position
-
- uint32 nbgsweep;
- uint32 npausesweep;
-};
-SweepData runtime·sweep;
-
-// sweeps one span
-// returns number of pages returned to heap, or -1 if there is nothing to sweep
-uintptr
-runtime·sweepone(void)
-{
- MSpan *s;
- uint32 idx, sg;
- uintptr npages;
-
- // increment locks to ensure that the goroutine is not preempted
- // in the middle of sweep thus leaving the span in an inconsistent state for next GC
- g->m->locks++;
- sg = runtime·mheap.sweepgen;
- for(;;) {
- idx = runtime·xadd(&runtime·sweep.spanidx, 1) - 1;
- if(idx >= runtime·work.nspan) {
- runtime·mheap.sweepdone = true;
- g->m->locks--;
- return -1;
- }
- s = runtime·work.spans[idx];
- if(s->state != MSpanInUse) {
- s->sweepgen = sg;
- continue;
- }
- if(s->sweepgen != sg-2 || !runtime·cas(&s->sweepgen, sg-2, sg-1))
- continue;
- npages = s->npages;
- if(!runtime·MSpan_Sweep(s, false))
- npages = 0;
- g->m->locks--;
- return npages;
- }
-}
-
-static void
-sweepone_m(void)
-{
- g->m->scalararg[0] = runtime·sweepone();
-}
-
-#pragma textflag NOSPLIT
-uintptr
-runtime·gosweepone(void)
-{
- void (*fn)(void);
-
- fn = sweepone_m;
- runtime·onM(&fn);
- return g->m->scalararg[0];
-}
-
-#pragma textflag NOSPLIT
-bool
-runtime·gosweepdone(void)
-{
- return runtime·mheap.sweepdone;
-}
-
-
-void
-runtime·gchelper(void)
-{
- uint32 nproc;
-
- g->m->traceback = 2;
- gchelperstart();
-
- // parallel mark for over GC roots
- runtime·parfordo(runtime·work.markfor);
- if(runtime·gcphase != GCscan)
- scanblock(nil, 0, nil); // blocks in getfull
- nproc = runtime·work.nproc; // work.nproc can change right after we increment work.ndone
- if(runtime·xadd(&runtime·work.ndone, +1) == nproc-1)
- runtime·notewakeup(&runtime·work.alldone);
- g->m->traceback = 0;
-}
-
-static void
-cachestats(void)
-{
- MCache *c;
- P *p, **pp;
-
- for(pp=runtime·allp; p=*pp; pp++) {
- c = p->mcache;
- if(c==nil)
- continue;
- runtime·purgecachedstats(c);
- }
-}
-
-static void
-flushallmcaches(void)
-{
- P *p, **pp;
- MCache *c;
-
- // Flush MCache's to MCentral.
- for(pp=runtime·allp; p=*pp; pp++) {
- c = p->mcache;
- if(c==nil)
- continue;
- runtime·MCache_ReleaseAll(c);
- runtime·stackcache_clear(c);
- }
-}
-
-static void
-flushallmcaches_m(G *gp)
-{
- flushallmcaches();
- runtime·gogo(&gp->sched);
-}
-
-void
-runtime·updatememstats(GCStats *stats)
-{
- M *mp;
- MSpan *s;
- int32 i;
- uint64 smallfree;
- uint64 *src, *dst;
- void (*fn)(G*);
-
- if(stats)
- runtime·memclr((byte*)stats, sizeof(*stats));
- for(mp=runtime·allm; mp; mp=mp->alllink) {
- if(stats) {
- src = (uint64*)&mp->gcstats;
- dst = (uint64*)stats;
- for(i=0; i<sizeof(*stats)/sizeof(uint64); i++)
- dst[i] += src[i];
- runtime·memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
- }
- }
- mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
- mstats.mspan_inuse = runtime·mheap.spanalloc.inuse;
- mstats.sys = mstats.heap_sys + mstats.stacks_sys + mstats.mspan_sys +
- mstats.mcache_sys + mstats.buckhash_sys + mstats.gc_sys + mstats.other_sys;
-
- // Calculate memory allocator stats.
- // During program execution we only count number of frees and amount of freed memory.
- // Current number of alive object in the heap and amount of alive heap memory
- // are calculated by scanning all spans.
- // Total number of mallocs is calculated as number of frees plus number of alive objects.
- // Similarly, total amount of allocated memory is calculated as amount of freed memory
- // plus amount of alive heap memory.
- mstats.alloc = 0;
- mstats.total_alloc = 0;
- mstats.nmalloc = 0;
- mstats.nfree = 0;
- for(i = 0; i < nelem(mstats.by_size); i++) {
- mstats.by_size[i].nmalloc = 0;
- mstats.by_size[i].nfree = 0;
- }
-
- // Flush MCache's to MCentral.
- if(g == g->m->g0)
- flushallmcaches();
- else {
- fn = flushallmcaches_m;
- runtime·mcall(&fn);
- }
-
- // Aggregate local stats.
- cachestats();
-
- // Scan all spans and count number of alive objects.
- runtime·lock(&runtime·mheap.lock);
- for(i = 0; i < runtime·mheap.nspan; i++) {
- s = runtime·mheap.allspans[i];
- if(s->state != MSpanInUse)
- continue;
- if(s->sizeclass == 0) {
- mstats.nmalloc++;
- mstats.alloc += s->elemsize;
- } else {
- mstats.nmalloc += s->ref;
- mstats.by_size[s->sizeclass].nmalloc += s->ref;
- mstats.alloc += s->ref*s->elemsize;
- }
- }
- runtime·unlock(&runtime·mheap.lock);
-
- // Aggregate by size class.
- smallfree = 0;
- mstats.nfree = runtime·mheap.nlargefree;
- for(i = 0; i < nelem(mstats.by_size); i++) {
- mstats.nfree += runtime·mheap.nsmallfree[i];
- mstats.by_size[i].nfree = runtime·mheap.nsmallfree[i];
- mstats.by_size[i].nmalloc += runtime·mheap.nsmallfree[i];
- smallfree += runtime·mheap.nsmallfree[i] * runtime·class_to_size[i];
- }
- mstats.nfree += mstats.tinyallocs;
- mstats.nmalloc += mstats.nfree;
-
- // Calculate derived stats.
- mstats.total_alloc = mstats.alloc + runtime·mheap.largefree + smallfree;
- mstats.heap_alloc = mstats.alloc;
- mstats.heap_objects = mstats.nmalloc - mstats.nfree;
-}
-
-// Structure of arguments passed to function gc().
-// This allows the arguments to be passed via runtime·mcall.
-struct gc_args
-{
- int64 start_time; // start time of GC in ns (just before stoptheworld)
- bool eagersweep;
-};
-
-static void gc(struct gc_args *args);
-
-int32
-runtime·readgogc(void)
-{
- byte *p;
-
- p = runtime·getenv("GOGC");
- if(p == nil || p[0] == '\0')
- return 100;
- if(runtime·strcmp(p, (byte*)"off") == 0)
- return -1;
- return runtime·atoi(p);
-}
-
-void
-runtime·gcinit(void)
-{
- if(sizeof(Workbuf) != WorkbufSize)
- runtime·throw("runtime: size of Workbuf is suboptimal");
-
- runtime·work.markfor = runtime·parforalloc(MaxGcproc);
- runtime·gcpercent = runtime·readgogc();
- runtime·gcdatamask = unrollglobgcprog(runtime·gcdata, runtime·edata - runtime·data);
- runtime·gcbssmask = unrollglobgcprog(runtime·gcbss, runtime·ebss - runtime·bss);
-}
-
-// Called from malloc.go using onM, stopping and starting the world handled in caller.
-void
-runtime·gc_m(void)
-{
- struct gc_args a;
- G *gp;
-
- gp = g->m->curg;
- runtime·casgstatus(gp, Grunning, Gwaiting);
- gp->waitreason = runtime·gostringnocopy((byte*)"garbage collection");
-
- a.start_time = (uint64)(g->m->scalararg[0]) | ((uint64)(g->m->scalararg[1]) << 32);
- a.eagersweep = g->m->scalararg[2];
- gc(&a);
- runtime·casgstatus(gp, Gwaiting, Grunning);
-}
-
-// Similar to clearcheckmarkbits but works on a single span.
-// It preforms two tasks.
-// 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
-// for nibbles with the BoundaryBit set.
-// 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
-// BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
-// For the second case it is possible to restore the BitsDead pattern but since
-// clearmark is a debug tool performance has a lower priority than simplicity.
-// The span is MSpanInUse and the world is stopped.
-static void
-clearcheckmarkbitsspan(MSpan *s)
-{
- int32 cl, n, npages, i;
- uintptr size, off, step;
- byte *p, *bitp, *arena_start, b;
-
- if(s->state != MSpanInUse) {
- runtime·printf("runtime:clearcheckmarkbitsspan: state=%d\n",
- s->state);
- runtime·throw("clearcheckmarkbitsspan: bad span state");
- }
- arena_start = runtime·mheap.arena_start;
- cl = s->sizeclass;
- size = s->elemsize;
- if(cl == 0) {
- n = 1;
- } else {
- // Chunk full of small blocks.
- npages = runtime·class_to_allocnpages[cl];
- n = (npages << PageShift) / size;
- }
-
- // MSpan_Sweep has similar code but instead of overloading and
- // complicating that routine we do a simpler walk here.
- // Sweep through n objects of given size starting at p.
- // This thread owns the span now, so it can manipulate
- // the block bitmap without atomic operations.
- p = (byte*)(s->start << PageShift);
- // Find bits for the beginning of the span.
- off = (uintptr*)p - (uintptr*)arena_start;
- bitp = arena_start - off/wordsPerBitmapByte - 1;
- step = size/(PtrSize*wordsPerBitmapByte);
-
- // The type bit values are:
- // 00 - BitsDead, for us BitsScalarMarked
- // 01 - BitsScalar
- // 10 - BitsPointer
- // 11 - unused, for us BitsPointerMarked
- //
- // When called to prepare for the checkmark phase (checkmark==1),
- // we change BitsDead to BitsScalar, so that there are no BitsScalarMarked
- // type bits anywhere.
- //
- // The checkmark phase marks by changing BitsScalar to BitsScalarMarked
- // and BitsPointer to BitsPointerMarked.
- //
- // When called to clean up after the checkmark phase (checkmark==0),
- // we unmark by changing BitsScalarMarked back to BitsScalar and
- // BitsPointerMarked back to BitsPointer.
- //
- // There are two problems with the scheme as just described.
- // First, the setup rewrites BitsDead to BitsScalar, but the type bits
- // following a BitsDead are uninitialized and must not be used.
- // Second, objects that are free are expected to have their type
- // bits zeroed (BitsDead), so in the cleanup we need to restore
- // any BitsDeads that were there originally.
- //
- // In a one-word object (8-byte allocation on 64-bit system),
- // there is no difference between BitsScalar and BitsDead, because
- // neither is a pointer and there are no more words in the object,
- // so using BitsScalar during the checkmark is safe and mapping
- // both back to BitsDead during cleanup is also safe.
- //
- // In a larger object, we need to be more careful. During setup,
- // if the type of the first word is BitsDead, we change it to BitsScalar
- // (as we must) but also initialize the type of the second
- // word to BitsDead, so that a scan during the checkmark phase
- // will still stop before seeing the uninitialized type bits in the
- // rest of the object. The sequence 'BitsScalar BitsDead' never
- // happens in real type bitmaps - BitsDead is always as early
- // as possible, so immediately after the last BitsPointer.
- // During cleanup, if we see a BitsScalar, we can check to see if it
- // is followed by BitsDead. If so, it was originally BitsDead and
- // we can change it back.
-
- if(step == 0) {
- // updating top and bottom nibbles, all boundaries
- for(i=0; i<n/2; i++, bitp--) {
- if((*bitp & bitBoundary) != bitBoundary)
- runtime·throw("missing bitBoundary");
- b = (*bitp & bitPtrMask)>>2;
- if(!checkmark && (b == BitsScalar || b == BitsScalarMarked))
- *bitp &= ~0x0c; // convert to BitsDead
- else if(b == BitsScalarMarked || b == BitsPointerMarked)
- *bitp ^= BitsCheckMarkXor<<2;
-
- if(((*bitp>>gcBits) & bitBoundary) != bitBoundary)
- runtime·throw("missing bitBoundary");
- b = ((*bitp>>gcBits) & bitPtrMask)>>2;
- if(!checkmark && (b == BitsScalar || b == BitsScalarMarked))
- *bitp &= ~0xc0; // convert to BitsDead
- else if(b == BitsScalarMarked || b == BitsPointerMarked)
- *bitp ^= BitsCheckMarkXor<<(2+gcBits);
- }
- } else {
- // updating bottom nibble for first word of each object
- for(i=0; i<n; i++, bitp -= step) {
- if((*bitp & bitBoundary) != bitBoundary)
- runtime·throw("missing bitBoundary");
- b = (*bitp & bitPtrMask)>>2;
-
- if(checkmark && b == BitsDead) {
- // move BitsDead into second word.
- // set bits to BitsScalar in preparation for checkmark phase.
- *bitp &= ~0xc0;
- *bitp |= BitsScalar<<2;
- } else if(!checkmark && (b == BitsScalar || b == BitsScalarMarked) && (*bitp & 0xc0) == 0) {
- // Cleaning up after checkmark phase.
- // First word is scalar or dead (we forgot)
- // and second word is dead.
- // First word might as well be dead too.
- *bitp &= ~0x0c;
- } else if(b == BitsScalarMarked || b == BitsPointerMarked)
- *bitp ^= BitsCheckMarkXor<<2;
- }
- }
-}
-
-// clearcheckmarkbits preforms two tasks.
-// 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
-// for nibbles with the BoundaryBit set.
-// 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
-// BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
-// This is a bit expensive but preserves the BitsDead encoding during the normal marking.
-// BitsDead remains valid for every nibble except the ones with BitsBoundary set.
-static void
-clearcheckmarkbits(void)
-{
- uint32 idx;
- MSpan *s;
- for(idx=0; idx<runtime·work.nspan; idx++) {
- s = runtime·work.spans[idx];
- if(s->state == MSpanInUse) {
- clearcheckmarkbitsspan(s);
- }
- }
-}
-
-// Called from malloc.go using onM.
-// The world is stopped. Rerun the scan and mark phases
-// using the bitMarkedCheck bit instead of the
-// bitMarked bit. If the marking encounters an
-// bitMarked bit that is not set then we throw.
-void
-runtime·gccheckmark_m(void)
-{
- if(!gccheckmarkenable)
- return;
-
- if(checkmark)
- runtime·throw("gccheckmark_m, entered with checkmark already true.");
-
- checkmark = true;
- clearcheckmarkbits(); // Converts BitsDead to BitsScalar.
- runtime·gc_m(); // turns off checkmark
- // Work done, fixed up the GC bitmap to remove the checkmark bits.
- clearcheckmarkbits();
-}
-
-// checkmarkenable is initially false
-void
-runtime·gccheckmarkenable_m(void)
-{
- gccheckmarkenable = true;
-}
-
-void
-runtime·gccheckmarkdisable_m(void)
-{
- gccheckmarkenable = false;
-}
-
-void
-runtime·finishsweep_m(void)
-{
- uint32 i, sg;
- MSpan *s;
-
- // The world is stopped so we should be able to complete the sweeps
- // quickly.
- while(runtime·sweepone() != -1)
- runtime·sweep.npausesweep++;
-
- // There may be some other spans being swept concurrently that
- // we need to wait for. If finishsweep_m is done with the world stopped
- // this code is not required.
- sg = runtime·mheap.sweepgen;
- for(i=0; i<runtime·work.nspan; i++) {
- s = runtime·work.spans[i];
- if(s->sweepgen == sg) {
- continue;
- }
- if(s->state != MSpanInUse) // Span is not part of the GCed heap so no need to ensure it is swept.
- continue;
- runtime·MSpan_EnsureSwept(s);
- }
-}
-
-// Scan all of the stacks, greying (or graying if in America) the referents
-// but not blackening them since the mark write barrier isn't installed.
-void
-runtime·gcscan_m(void)
-{
- uint32 i, allglen, oldphase;
- G *gp, *mastergp, **allg;
-
- // Grab the g that called us and potentially allow rescheduling.
- // This allows it to be scanned like other goroutines.
- mastergp = g->m->curg;
-
- runtime·casgstatus(mastergp, Grunning, Gwaiting);
- mastergp->waitreason = runtime·gostringnocopy((byte*)"garbage collection scan");
-
- // Span sweeping has been done by finishsweep_m.
- // Long term we will want to make this goroutine runnable
- // by placing it onto a scanenqueue state and then calling
- // runtime·restartg(mastergp) to make it Grunnable.
- // At the bottom we will want to return this p back to the scheduler.
-
- oldphase = runtime·gcphase;
-
- runtime·lock(&runtime·allglock);
- allglen = runtime·allglen;
- allg = runtime·allg;
- // Prepare flag indicating that the scan has not been completed.
- for(i = 0; i < allglen; i++) {
- gp = allg[i];
- gp->gcworkdone = false; // set to true in gcphasework
- }
- runtime·unlock(&runtime·allglock);
-
- runtime·work.nwait = 0;
- runtime·work.ndone = 0;
- runtime·work.nproc = 1; // For now do not do this in parallel.
- runtime·gcphase = GCscan;
- // ackgcphase is not needed since we are not scanning running goroutines.
- runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + allglen, nil, false, markroot);
- runtime·parfordo(runtime·work.markfor);
-
- runtime·lock(&runtime·allglock);
-
- allg = runtime·allg;
- // Check that gc work is done.
- for(i = 0; i < allglen; i++) {
- gp = allg[i];
- if(!gp->gcworkdone) {
- runtime·throw("scan missed a g");
- }
- }
- runtime·unlock(&runtime·allglock);
-
- runtime·gcphase = oldphase;
- runtime·casgstatus(mastergp, Gwaiting, Grunning);
- // Let the g that called us continue to run.
-}
-
-// Mark all objects that are known about.
-void
-runtime·gcmark_m(void)
-{
- scanblock(nil, 0, nil);
-}
-
-// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
-// all go routines see the new barrier.
-void
-runtime·gcinstallmarkwb_m(void)
-{
- runtime·gcphase = GCmark;
-}
-
-// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
-// all go routines see the new barrier.
-void
-runtime·gcinstalloffwb_m(void)
-{
- runtime·gcphase = GCoff;
-}
-
-static void
-gc(struct gc_args *args)
-{
- int64 t0, t1, t2, t3, t4;
- uint64 heap0, heap1, obj;
- GCStats stats;
- uint32 oldphase;
- uint32 i;
- G *gp;
-
- if(runtime·debug.allocfreetrace)
- runtime·tracegc();
-
- g->m->traceback = 2;
- t0 = args->start_time;
- runtime·work.tstart = args->start_time;
-
- t1 = 0;
- if(runtime·debug.gctrace)
- t1 = runtime·nanotime();
-
- if(!checkmark)
- runtime·finishsweep_m(); // skip during checkmark debug phase.
-
- // Cache runtime·mheap.allspans in work.spans to avoid conflicts with
- // resizing/freeing allspans.
- // New spans can be created while GC progresses, but they are not garbage for
- // this round:
- // - new stack spans can be created even while the world is stopped.
- // - new malloc spans can be created during the concurrent sweep
-
- // Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
- runtime·lock(&runtime·mheap.lock);
- // Free the old cached sweep array if necessary.
- if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans)
- runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys);
- // Cache the current array for marking.
- runtime·mheap.gcspans = runtime·mheap.allspans;
- runtime·work.spans = runtime·mheap.allspans;
- runtime·work.nspan = runtime·mheap.nspan;
- runtime·unlock(&runtime·mheap.lock);
- oldphase = runtime·gcphase;
-
- runtime·work.nwait = 0;
- runtime·work.ndone = 0;
- runtime·work.nproc = runtime·gcprocs();
- runtime·gcphase = GCmarktermination;
-
- // World is stopped so allglen will not change.
- for(i = 0; i < runtime·allglen; i++) {
- gp = runtime·allg[i];
- gp->gcworkdone = false; // set to true in gcphasework
- }
-
- runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + runtime·allglen, nil, false, markroot);
- if(runtime·work.nproc > 1) {
- runtime·noteclear(&runtime·work.alldone);
- runtime·helpgc(runtime·work.nproc);
- }
-
- t2 = 0;
- if(runtime·debug.gctrace)
- t2 = runtime·nanotime();
-
- gchelperstart();
- runtime·parfordo(runtime·work.markfor);
-
- scanblock(nil, 0, nil);
-
- if(runtime·work.full)
- runtime·throw("runtime·work.full != nil");
- if(runtime·work.partial)
- runtime·throw("runtime·work.partial != nil");
-
- runtime·gcphase = oldphase;
- t3 = 0;
- if(runtime·debug.gctrace)
- t3 = runtime·nanotime();
-
- if(runtime·work.nproc > 1)
- runtime·notesleep(&runtime·work.alldone);
-
- runtime·shrinkfinish();
-
- cachestats();
- // next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
- // estimate what was live heap size after previous GC (for tracing only)
- heap0 = mstats.next_gc*100/(runtime·gcpercent+100);
- // conservatively set next_gc to high value assuming that everything is live
- // concurrent/lazy sweep will reduce this number while discovering new garbage
- mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*runtime·gcpercent/100;
-
- t4 = runtime·nanotime();
- runtime·atomicstore64(&mstats.last_gc, runtime·unixnanotime()); // must be Unix time to make sense to user
- mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
- mstats.pause_end[mstats.numgc%nelem(mstats.pause_end)] = t4;
- mstats.pause_total_ns += t4 - t0;
- mstats.numgc++;
- if(mstats.debuggc)
- runtime·printf("pause %D\n", t4-t0);
-
- if(runtime·debug.gctrace) {
- heap1 = mstats.heap_alloc;
- runtime·updatememstats(&stats);
- if(heap1 != mstats.heap_alloc) {
- runtime·printf("runtime: mstats skew: heap=%D/%D\n", heap1, mstats.heap_alloc);
- runtime·throw("mstats skew");
- }
- obj = mstats.nmalloc - mstats.nfree;
-
- stats.nprocyield += runtime·work.markfor->nprocyield;
- stats.nosyield += runtime·work.markfor->nosyield;
- stats.nsleep += runtime·work.markfor->nsleep;
-
- runtime·printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
- " %d goroutines,"
- " %d/%d/%d sweeps,"
- " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
- mstats.numgc, runtime·work.nproc, (t1-t0)/1000, (t2-t1)/1000, (t3-t2)/1000, (t4-t3)/1000,
- heap0>>20, heap1>>20, obj,
- mstats.nmalloc, mstats.nfree,
- runtime·gcount(),
- runtime·work.nspan, runtime·sweep.nbgsweep, runtime·sweep.npausesweep,
- stats.nhandoff, stats.nhandoffcnt,
- runtime·work.markfor->nsteal, runtime·work.markfor->nstealcnt,
- stats.nprocyield, stats.nosyield, stats.nsleep);
- runtime·sweep.nbgsweep = runtime·sweep.npausesweep = 0;
- }
-
- // See the comment in the beginning of this function as to why we need the following.
- // Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
- runtime·lock(&runtime·mheap.lock);
- // Free the old cached mark array if necessary.
- if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans)
- runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys);
-
- if(gccheckmarkenable) {
- if(!checkmark) {
- // first half of two-pass; don't set up sweep
- runtime·unlock(&runtime·mheap.lock);
- return;
- }
- checkmark = false; // done checking marks
- }
-
- // Cache the current array for sweeping.
- runtime·mheap.gcspans = runtime·mheap.allspans;
- runtime·mheap.sweepgen += 2;
- runtime·mheap.sweepdone = false;
- runtime·work.spans = runtime·mheap.allspans;
- runtime·work.nspan = runtime·mheap.nspan;
- runtime·sweep.spanidx = 0;
- runtime·unlock(&runtime·mheap.lock);
-
-
- if(ConcurrentSweep && !args->eagersweep) {
- runtime·lock(&runtime·gclock);
- if(runtime·sweep.g == nil)
- runtime·sweep.g = runtime·newproc1(&bgsweepv, nil, 0, 0, gc);
- else if(runtime·sweep.parked) {
- runtime·sweep.parked = false;
- runtime·ready(runtime·sweep.g);
- }
- runtime·unlock(&runtime·gclock);
- } else {
- // Sweep all spans eagerly.
- while(runtime·sweepone() != -1)
- runtime·sweep.npausesweep++;
- // Do an additional mProf_GC, because all 'free' events are now real as well.
- runtime·mProf_GC();
- }
-
- runtime·mProf_GC();
- g->m->traceback = 0;
-}
-
-extern uintptr runtime·sizeof_C_MStats;
-
-static void readmemstats_m(void);
-
-void
-runtime·readmemstats_m(void)
-{
- MStats *stats;
-
- stats = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
-
- runtime·updatememstats(nil);
- // Size of the trailing by_size array differs between Go and C,
- // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
- runtime·memmove(stats, &mstats, runtime·sizeof_C_MStats);
-
- // Stack numbers are part of the heap numbers, separate those out for user consumption
- stats->stacks_sys = stats->stacks_inuse;
- stats->heap_inuse -= stats->stacks_inuse;
- stats->heap_sys -= stats->stacks_inuse;
-}
-
-static void readgcstats_m(void);
-
-#pragma textflag NOSPLIT
-void
-runtime∕debug·readGCStats(Slice *pauses)
-{
- void (*fn)(void);
-
- g->m->ptrarg[0] = pauses;
- fn = readgcstats_m;
- runtime·onM(&fn);
-}
-
-static void
-readgcstats_m(void)
-{
- Slice *pauses;
- uint64 *p;
- uint32 i, j, n;
-
- pauses = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
-
- // Calling code in runtime/debug should make the slice large enough.
- if(pauses->cap < nelem(mstats.pause_ns)+3)
- runtime·throw("runtime: short slice passed to readGCStats");
-
- // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
- p = (uint64*)pauses->array;
- runtime·lock(&runtime·mheap.lock);
-
- n = mstats.numgc;
- if(n > nelem(mstats.pause_ns))
- n = nelem(mstats.pause_ns);
-
- // The pause buffer is circular. The most recent pause is at
- // pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
- // from there to go back farther in time. We deliver the times
- // most recent first (in p[0]).
- for(i=0; i<n; i++) {
- j = (mstats.numgc-1-i)%nelem(mstats.pause_ns);
- p[i] = mstats.pause_ns[j];
- p[n+i] = mstats.pause_end[j];
- }
-
- p[n+n] = mstats.last_gc;
- p[n+n+1] = mstats.numgc;
- p[n+n+2] = mstats.pause_total_ns;
- runtime·unlock(&runtime·mheap.lock);
- pauses->len = n+n+3;
-}
-
-void
-runtime·setgcpercent_m(void)
-{
- int32 in;
- int32 out;
-
- in = (int32)(intptr)g->m->scalararg[0];
-
- runtime·lock(&runtime·mheap.lock);
- out = runtime·gcpercent;
- if(in < 0)
- in = -1;
- runtime·gcpercent = in;
- runtime·unlock(&runtime·mheap.lock);
-
- g->m->scalararg[0] = (uintptr)(intptr)out;
-}
-
-static void
-gchelperstart(void)
-{
- if(g->m->helpgc < 0 || g->m->helpgc >= MaxGcproc)
- runtime·throw("gchelperstart: bad m->helpgc");
- if(g != g->m->g0)
- runtime·throw("gchelper not running on g0 stack");
-}
-
-G*
-runtime·wakefing(void)
-{
- G *res;
-
- res = nil;
- runtime·lock(&runtime·finlock);
- if(runtime·fingwait && runtime·fingwake) {
- runtime·fingwait = false;
- runtime·fingwake = false;
- res = runtime·fing;
- }
- runtime·unlock(&runtime·finlock);
- return res;
-}
-
-// Recursively unrolls GC program in prog.
-// mask is where to store the result.
-// ppos is a pointer to position in mask, in bits.
-// sparse says to generate 4-bits per word mask for heap (2-bits for data/bss otherwise).
-static byte*
-unrollgcprog1(byte *mask, byte *prog, uintptr *ppos, bool inplace, bool sparse)
-{
- uintptr pos, siz, i, off;
- byte *arena_start, *prog1, v, *bitp, shift;
-
- arena_start = runtime·mheap.arena_start;
- pos = *ppos;
- for(;;) {
- switch(prog[0]) {
- case insData:
- prog++;
- siz = prog[0];
- prog++;
- for(i = 0; i < siz; i++) {
- v = prog[i/PointersPerByte];
- v >>= (i%PointersPerByte)*BitsPerPointer;
- v &= BitsMask;
- if(inplace) {
- // Store directly into GC bitmap.
- off = (uintptr*)(mask+pos) - (uintptr*)arena_start;
- bitp = arena_start - off/wordsPerBitmapByte - 1;
- shift = (off % wordsPerBitmapByte) * gcBits;
- if(shift==0)
- *bitp = 0;
- *bitp |= v<<(shift+2);
- pos += PtrSize;
- } else if(sparse) {
- // 4-bits per word
- v <<= (pos%8)+2;
- mask[pos/8] |= v;
- pos += gcBits;
- } else {
- // 2-bits per word
- v <<= pos%8;
- mask[pos/8] |= v;
- pos += BitsPerPointer;
- }
- }
- prog += ROUND(siz*BitsPerPointer, 8)/8;
- break;
- case insArray:
- prog++;
- siz = 0;
- for(i = 0; i < PtrSize; i++)
- siz = (siz<<8) + prog[PtrSize-i-1];
- prog += PtrSize;
- prog1 = nil;
- for(i = 0; i < siz; i++)
- prog1 = unrollgcprog1(mask, prog, &pos, inplace, sparse);
- if(prog1[0] != insArrayEnd)
- runtime·throw("unrollgcprog: array does not end with insArrayEnd");
- prog = prog1+1;
- break;
- case insArrayEnd:
- case insEnd:
- *ppos = pos;
- return prog;
- default:
- runtime·throw("unrollgcprog: unknown instruction");
- }
- }
-}
-
-// Unrolls GC program prog for data/bss, returns dense GC mask.
-static BitVector
-unrollglobgcprog(byte *prog, uintptr size)
-{
- byte *mask;
- uintptr pos, masksize;
-
- masksize = ROUND(ROUND(size, PtrSize)/PtrSize*BitsPerPointer, 8)/8;
- mask = runtime·persistentalloc(masksize+1, 0, &mstats.gc_sys);
- mask[masksize] = 0xa1;
- pos = 0;
- prog = unrollgcprog1(mask, prog, &pos, false, false);
- if(pos != size/PtrSize*BitsPerPointer) {
- runtime·printf("unrollglobgcprog: bad program size, got %D, expect %D\n",
- (uint64)pos, (uint64)size/PtrSize*BitsPerPointer);
- runtime·throw("unrollglobgcprog: bad program size");
- }
- if(prog[0] != insEnd)
- runtime·throw("unrollglobgcprog: program does not end with insEnd");
- if(mask[masksize] != 0xa1)
- runtime·throw("unrollglobgcprog: overflow");
- return (BitVector){masksize*8, mask};
-}
-
-void
-runtime·unrollgcproginplace_m(void)
-{
- uintptr size, size0, pos, off;
- byte *arena_start, *prog, *bitp, shift;
- Type *typ;
- void *v;
-
- v = g->m->ptrarg[0];
- typ = g->m->ptrarg[1];
- size = g->m->scalararg[0];
- size0 = g->m->scalararg[1];
- g->m->ptrarg[0] = nil;
- g->m->ptrarg[1] = nil;
-
- pos = 0;
- prog = (byte*)typ->gc[1];
- while(pos != size0)
- unrollgcprog1(v, prog, &pos, true, true);
- // Mark first word as bitAllocated.
- arena_start = runtime·mheap.arena_start;
- off = (uintptr*)v - (uintptr*)arena_start;
- bitp = arena_start - off/wordsPerBitmapByte - 1;
- shift = (off % wordsPerBitmapByte) * gcBits;
- *bitp |= bitBoundary<<shift;
- // Mark word after last as BitsDead.
- if(size0 < size) {
- off = (uintptr*)((byte*)v + size0) - (uintptr*)arena_start;
- bitp = arena_start - off/wordsPerBitmapByte - 1;
- shift = (off % wordsPerBitmapByte) * gcBits;
- *bitp &= ~(bitPtrMask<<shift) | ((uintptr)BitsDead<<(shift+2));
- }
-}
-
-// Unrolls GC program in typ->gc[1] into typ->gc[0]
-void
-runtime·unrollgcprog_m(void)
-{
- static Mutex lock;
- Type *typ;
- byte *mask, *prog;
- uintptr pos;
- uintptr x;
-
- typ = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
-
- runtime·lock(&lock);
- mask = (byte*)typ->gc[0];
- if(mask[0] == 0) {
- pos = 8; // skip the unroll flag
- prog = (byte*)typ->gc[1];
- prog = unrollgcprog1(mask, prog, &pos, false, true);
- if(prog[0] != insEnd)
- runtime·throw("unrollgcprog: program does not end with insEnd");
- if(((typ->size/PtrSize)%2) != 0) {
- // repeat the program twice
- prog = (byte*)typ->gc[1];
- unrollgcprog1(mask, prog, &pos, false, true);
- }
-
- // atomic way to say mask[0] = 1
- x = *(uintptr*)mask;
- ((byte*)&x)[0] = 1;
- runtime·atomicstorep((void**)mask, (void*)x);
- }
- runtime·unlock(&lock);
-}
-
-// mark the span of memory at v as having n blocks of the given size.
-// if leftover is true, there is left over space at the end of the span.
-void
-runtime·markspan(void *v, uintptr size, uintptr n, bool leftover)
-{
- uintptr i, off, step;
- byte *b;
-
- if((byte*)v+size*n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
- runtime·throw("markspan: bad pointer");
-
- // Find bits of the beginning of the span.
- off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
- b = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
- if((off%wordsPerBitmapByte) != 0)
- runtime·throw("markspan: unaligned length");
-
- // Okay to use non-atomic ops here, because we control
- // the entire span, and each bitmap byte has bits for only
- // one span, so no other goroutines are changing these bitmap words.
-
- if(size == PtrSize) {
- // Possible only on 64-bits (minimal size class is 8 bytes).
- // Poor man's memset(0x11).
- if(0x11 != ((bitBoundary+BitsDead)<<gcBits) + (bitBoundary+BitsDead))
- runtime·throw("markspan: bad bits");
- if((n%(wordsPerBitmapByte*PtrSize)) != 0)
- runtime·throw("markspan: unaligned length");
- b = b - n/wordsPerBitmapByte + 1; // find first byte
- if(((uintptr)b%PtrSize) != 0)
- runtime·throw("markspan: unaligned pointer");
- for(i = 0; i != n; i += wordsPerBitmapByte*PtrSize, b += PtrSize)
- *(uintptr*)b = (uintptr)0x1111111111111111ULL; // bitBoundary+BitsDead
- return;
- }
-
- if(leftover)
- n++; // mark a boundary just past end of last block too
- step = size/(PtrSize*wordsPerBitmapByte);
- for(i = 0; i != n; i++, b -= step)
- *b = bitBoundary|(BitsDead<<2);
-}
-
-// unmark the span of memory at v of length n bytes.
-void
-runtime·unmarkspan(void *v, uintptr n)
-{
- uintptr off;
- byte *b;
-
- if((byte*)v+n > (byte*)runtime·mheap.arena_used || (byte*)v < runtime·mheap.arena_start)
- runtime·throw("markspan: bad pointer");
-
- off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset
- if((off % (PtrSize*wordsPerBitmapByte)) != 0)
- runtime·throw("markspan: unaligned pointer");
- b = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
- n /= PtrSize;
- if(n%(PtrSize*wordsPerBitmapByte) != 0)
- runtime·throw("unmarkspan: unaligned length");
- // Okay to use non-atomic ops here, because we control
- // the entire span, and each bitmap word has bits for only
- // one span, so no other goroutines are changing these
- // bitmap words.
- n /= wordsPerBitmapByte;
- runtime·memclr(b - n + 1, n);
-}
-
-void
-runtime·MHeap_MapBits(MHeap *h)
-{
- // Caller has added extra mappings to the arena.
- // Add extra mappings of bitmap words as needed.
- // We allocate extra bitmap pieces in chunks of bitmapChunk.
- enum {
- bitmapChunk = 8192
- };
- uintptr n;
-
- n = (h->arena_used - h->arena_start) / (PtrSize*wordsPerBitmapByte);
- n = ROUND(n, bitmapChunk);
- n = ROUND(n, PhysPageSize);
- if(h->bitmap_mapped >= n)
- return;
-
- runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats.gc_sys);
- h->bitmap_mapped = n;
-}
-
-static bool
-getgcmaskcb(Stkframe *frame, void *ctxt)
-{
- Stkframe *frame0;
-
- frame0 = ctxt;
- if(frame->sp <= frame0->sp && frame0->sp < frame->varp) {
- *frame0 = *frame;
- return false;
- }
- return true;
-}
-
-// Returns GC type info for object p for testing.
-void
-runtime·getgcmask(byte *p, Type *t, byte **mask, uintptr *len)
-{
- Stkframe frame;
- uintptr i, n, off;
- byte *base, bits, shift, *b;
- bool (*cb)(Stkframe*, void*);
-
- *mask = nil;
- *len = 0;
-
- // data
- if(p >= runtime·data && p < runtime·edata) {
- n = ((PtrType*)t)->elem->size;
- *len = n/PtrSize;
- *mask = runtime·mallocgc(*len, nil, FlagNoScan);
- for(i = 0; i < n; i += PtrSize) {
- off = (p+i-runtime·data)/PtrSize;
- bits = (runtime·gcdatamask.bytedata[off/PointersPerByte] >> ((off%PointersPerByte)*BitsPerPointer))&BitsMask;
- (*mask)[i/PtrSize] = bits;
- }
- return;
- }
- // bss
- if(p >= runtime·bss && p < runtime·ebss) {
- n = ((PtrType*)t)->elem->size;
- *len = n/PtrSize;
- *mask = runtime·mallocgc(*len, nil, FlagNoScan);
- for(i = 0; i < n; i += PtrSize) {
- off = (p+i-runtime·bss)/PtrSize;
- bits = (runtime·gcbssmask.bytedata[off/PointersPerByte] >> ((off%PointersPerByte)*BitsPerPointer))&BitsMask;
- (*mask)[i/PtrSize] = bits;
- }
- return;
- }
- // heap
- if(runtime·mlookup(p, &base, &n, nil)) {
- *len = n/PtrSize;
- *mask = runtime·mallocgc(*len, nil, FlagNoScan);
- for(i = 0; i < n; i += PtrSize) {
- off = (uintptr*)(base+i) - (uintptr*)runtime·mheap.arena_start;
- b = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
- shift = (off % wordsPerBitmapByte) * gcBits;
- bits = (*b >> (shift+2))&BitsMask;
- (*mask)[i/PtrSize] = bits;
- }
- return;
- }
- // stack
- frame.fn = nil;
- frame.sp = (uintptr)p;
- cb = getgcmaskcb;
- runtime·gentraceback(g->m->curg->sched.pc, g->m->curg->sched.sp, 0, g->m->curg, 0, nil, 1000, &cb, &frame, 0);
- if(frame.fn != nil) {
- Func *f;
- StackMap *stackmap;
- BitVector bv;
- uintptr size;
- uintptr targetpc;
- int32 pcdata;
-
- f = frame.fn;
- targetpc = frame.continpc;
- if(targetpc == 0)
- return;
- if(targetpc != f->entry)
- targetpc--;
- pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, targetpc);
- if(pcdata == -1)
- return;
- stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
- if(stackmap == nil || stackmap->n <= 0)
- return;
- bv = runtime·stackmapdata(stackmap, pcdata);
- size = bv.n/BitsPerPointer*PtrSize;
- n = ((PtrType*)t)->elem->size;
- *len = n/PtrSize;
- *mask = runtime·mallocgc(*len, nil, FlagNoScan);
- for(i = 0; i < n; i += PtrSize) {
- off = (p+i-(byte*)frame.varp+size)/PtrSize;
- bits = (bv.bytedata[off*BitsPerPointer/8] >> ((off*BitsPerPointer)%8))&BitsMask;
- (*mask)[i/PtrSize] = bits;
- }
- }
-}
-
-void runtime·gc_unixnanotime(int64 *now);
-
-int64
-runtime·unixnanotime(void)
-{
- int64 now;
-
- runtime·gc_unixnanotime(&now);
- return now;
-}
diff --git a/src/runtime/mgc0.go b/src/runtime/mgc0.go
index dc4eec519..00e64c0ff 100644
--- a/src/runtime/mgc0.go
+++ b/src/runtime/mgc0.go
@@ -28,7 +28,7 @@ func gc_unixnanotime(now *int64) {
func freeOSMemory() {
gogc(2) // force GC and do eager sweep
- onM(scavenge_m)
+ systemstack(scavenge_m)
}
var poolcleanup func()
@@ -60,10 +60,8 @@ func clearpools() {
}
}
-func gosweepone() uintptr
-func gosweepdone() bool
-
func bgsweep() {
+ sweep.g = getg()
getg().issystem = true
for {
for gosweepone() != ^uintptr(0) {
@@ -105,7 +103,7 @@ func writebarrierptr_nostore(dst *uintptr, src uintptr) {
}
if src != 0 && (src < _PageSize || src == _PoisonGC || src == _PoisonStack) {
- onM(func() { gothrow("bad pointer in write barrier") })
+ systemstack(func() { gothrow("bad pointer in write barrier") })
}
mp := acquirem()
@@ -114,13 +112,9 @@ func writebarrierptr_nostore(dst *uintptr, src uintptr) {
return
}
mp.inwb = true
- oldscalar0 := mp.scalararg[0]
- oldscalar1 := mp.scalararg[1]
- mp.scalararg[0] = uintptr(unsafe.Pointer(dst))
- mp.scalararg[1] = src
- onM_signalok(gcmarkwb_m)
- mp.scalararg[0] = oldscalar0
- mp.scalararg[1] = oldscalar1
+ systemstack(func() {
+ gcmarkwb_m(dst, src)
+ })
mp.inwb = false
releasem(mp)
}
diff --git a/src/runtime/mgc0.h b/src/runtime/mgc0.h
index 519d7206e..dd0c46024 100644
--- a/src/runtime/mgc0.h
+++ b/src/runtime/mgc0.h
@@ -2,81 +2,21 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Garbage collector (GC)
+// Used by cmd/gc.
enum {
- // Four bits per word (see #defines below).
gcBits = 4,
- wordsPerBitmapByte = 8/gcBits,
-
- // GC type info programs.
- // The programs allow to store type info required for GC in a compact form.
- // Most importantly arrays take O(1) space instead of O(n).
- // The program grammar is:
- //
- // Program = {Block} "insEnd"
- // Block = Data | Array
- // Data = "insData" DataSize DataBlock
- // DataSize = int // size of the DataBlock in bit pairs, 1 byte
- // DataBlock = binary // dense GC mask (2 bits per word) of size ]DataSize/4[ bytes
- // Array = "insArray" ArrayLen Block "insArrayEnd"
- // ArrayLen = int // length of the array, 8 bytes (4 bytes for 32-bit arch)
- //
- // Each instruction (insData, insArray, etc) is 1 byte.
- // For example, for type struct { x []byte; y [20]struct{ z int; w *byte }; }
- // the program looks as:
- //
- // insData 3 (BitsMultiWord BitsSlice BitsScalar)
- // insArray 20 insData 2 (BitsScalar BitsPointer) insArrayEnd insEnd
- //
- // Total size of the program is 17 bytes (13 bytes on 32-bits).
- // The corresponding GC mask would take 43 bytes (it would be repeated
- // because the type has odd number of words).
+ BitsPerPointer = 2,
+ BitsDead = 0,
+ BitsScalar = 1,
+ BitsPointer = 2,
+ BitsMask = 3,
+ PointersPerByte = 8/BitsPerPointer,
insData = 1,
insArray,
insArrayEnd,
insEnd,
- // Pointer map
- BitsPerPointer = 2,
- BitsMask = (1<<BitsPerPointer)-1,
- PointersPerByte = 8/BitsPerPointer,
-
- // If you change these, also change scanblock.
- // scanblock does "if(bits == BitsScalar || bits == BitsDead)" as "if(bits <= BitsScalar)".
- BitsDead = 0,
- BitsScalar = 1, // 01
- BitsPointer = 2, // 10
- BitsCheckMarkXor = 1, // 10
- BitsScalarMarked = BitsScalar ^ BitsCheckMarkXor, // 00
- BitsPointerMarked = BitsPointer ^ BitsCheckMarkXor, // 11
-
- BitsMultiWord = 3,
- // BitsMultiWord will be set for the first word of a multi-word item.
- // When it is set, one of the following will be set for the second word.
- // NOT USED ANYMORE: BitsString = 0,
- // NOT USED ANYMORE: BitsSlice = 1,
- BitsIface = 2,
- BitsEface = 3,
-
// 64 bytes cover objects of size 1024/512 on 64/32 bits, respectively.
MaxGCMask = 65536, // TODO(rsc): change back to 64
};
-
-// Bits in per-word bitmap.
-// #defines because we shift the values beyond 32 bits.
-//
-// Each word in the bitmap describes wordsPerBitmapWord words
-// of heap memory. There are 4 bitmap bits dedicated to each heap word,
-// so on a 64-bit system there is one bitmap word per 16 heap words.
-//
-// The bitmap starts at mheap.arena_start and extends *backward* from
-// there. On a 64-bit system the off'th word in the arena is tracked by
-// the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
-// the only difference is that the divisor is 8.)
-enum {
- bitBoundary = 1, // boundary of an object
- bitMarked = 2, // marked object
- bitMask = bitBoundary | bitMarked,
- bitPtrMask = BitsMask<<2,
-};
diff --git a/src/runtime/mgc1.go b/src/runtime/mgc1.go
new file mode 100644
index 000000000..04a5207e5
--- /dev/null
+++ b/src/runtime/mgc1.go
@@ -0,0 +1,80 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector (GC)
+
+package runtime
+
+const (
+ // Four bits per word (see #defines below).
+ gcBits = 4
+ wordsPerBitmapByte = 8 / gcBits
+)
+
+const (
+ // GC type info programs.
+ // The programs allow to store type info required for GC in a compact form.
+ // Most importantly arrays take O(1) space instead of O(n).
+ // The program grammar is:
+ //
+ // Program = {Block} "insEnd"
+ // Block = Data | Array
+ // Data = "insData" DataSize DataBlock
+ // DataSize = int // size of the DataBlock in bit pairs, 1 byte
+ // DataBlock = binary // dense GC mask (2 bits per word) of size ]DataSize/4[ bytes
+ // Array = "insArray" ArrayLen Block "insArrayEnd"
+ // ArrayLen = int // length of the array, 8 bytes (4 bytes for 32-bit arch)
+ //
+ // Each instruction (insData, insArray, etc) is 1 byte.
+ // For example, for type struct { x []byte; y [20]struct{ z int; w *byte }; }
+ // the program looks as:
+ //
+ // insData 3 (BitsPointer BitsScalar BitsScalar)
+ // insArray 20 insData 2 (BitsScalar BitsPointer) insArrayEnd insEnd
+ //
+ // Total size of the program is 17 bytes (13 bytes on 32-bits).
+ // The corresponding GC mask would take 43 bytes (it would be repeated
+ // because the type has odd number of words).
+ insData = 1 + iota
+ insArray
+ insArrayEnd
+ insEnd
+)
+
+const (
+ // Pointer map
+ _BitsPerPointer = 2
+ _BitsMask = (1 << _BitsPerPointer) - 1
+ _PointersPerByte = 8 / _BitsPerPointer
+
+ // If you change these, also change scanblock.
+ // scanblock does "if(bits == BitsScalar || bits == BitsDead)" as "if(bits <= BitsScalar)".
+ _BitsDead = 0
+ _BitsScalar = 1 // 01
+ _BitsPointer = 2 // 10
+ _BitsCheckMarkXor = 1 // 10
+ _BitsScalarMarked = _BitsScalar ^ _BitsCheckMarkXor // 00
+ _BitsPointerMarked = _BitsPointer ^ _BitsCheckMarkXor // 11
+
+ // 64 bytes cover objects of size 1024/512 on 64/32 bits, respectively.
+ _MaxGCMask = 65536 // TODO(rsc): change back to 64
+)
+
+// Bits in per-word bitmap.
+// #defines because we shift the values beyond 32 bits.
+//
+// Each word in the bitmap describes wordsPerBitmapWord words
+// of heap memory. There are 4 bitmap bits dedicated to each heap word,
+// so on a 64-bit system there is one bitmap word per 16 heap words.
+//
+// The bitmap starts at mheap.arena_start and extends *backward* from
+// there. On a 64-bit system the off'th word in the arena is tracked by
+// the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
+// the only difference is that the divisor is 8.)
+const (
+ bitBoundary = 1 // boundary of an object
+ bitMarked = 2 // marked object
+ bitMask = bitBoundary | bitMarked
+ bitPtrMask = _BitsMask << 2
+)
diff --git a/src/runtime/mheap.c b/src/runtime/mheap.c
deleted file mode 100644
index bb203d5ce..000000000
--- a/src/runtime/mheap.c
+++ /dev/null
@@ -1,889 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Page heap.
-//
-// See malloc.h for overview.
-//
-// When a MSpan is in the heap free list, state == MSpanFree
-// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
-//
-// When a MSpan is allocated, state == MSpanInUse or MSpanStack
-// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-
-static MSpan *MHeap_AllocSpanLocked(MHeap*, uintptr);
-static void MHeap_FreeSpanLocked(MHeap*, MSpan*, bool, bool);
-static bool MHeap_Grow(MHeap*, uintptr);
-static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
-static MSpan *BestFit(MSpan*, uintptr, MSpan*);
-
-static void
-RecordSpan(void *vh, byte *p)
-{
- MHeap *h;
- MSpan *s;
- MSpan **all;
- uint32 cap;
-
- h = vh;
- s = (MSpan*)p;
- if(h->nspan >= h->nspancap) {
- cap = 64*1024/sizeof(all[0]);
- if(cap < h->nspancap*3/2)
- cap = h->nspancap*3/2;
- all = (MSpan**)runtime·sysAlloc(cap*sizeof(all[0]), &mstats.other_sys);
- if(all == nil)
- runtime·throw("runtime: cannot allocate memory");
- if(h->allspans) {
- runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
- // Don't free the old array if it's referenced by sweep.
- // See the comment in mgc0.c.
- if(h->allspans != runtime·mheap.gcspans)
- runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
- }
- h->allspans = all;
- h->nspancap = cap;
- }
- h->allspans[h->nspan++] = s;
-}
-
-// Initialize the heap; fetch memory using alloc.
-void
-runtime·MHeap_Init(MHeap *h)
-{
- uint32 i;
-
- runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys);
- runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys);
- runtime·FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &mstats.other_sys);
- runtime·FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &mstats.other_sys);
- // h->mapcache needs no init
- for(i=0; i<nelem(h->free); i++) {
- runtime·MSpanList_Init(&h->free[i]);
- runtime·MSpanList_Init(&h->busy[i]);
- }
- runtime·MSpanList_Init(&h->freelarge);
- runtime·MSpanList_Init(&h->busylarge);
- for(i=0; i<nelem(h->central); i++)
- runtime·MCentral_Init(&h->central[i].mcentral, i);
-}
-
-void
-runtime·MHeap_MapSpans(MHeap *h)
-{
- uintptr n;
-
- // Map spans array, PageSize at a time.
- n = (uintptr)h->arena_used;
- n -= (uintptr)h->arena_start;
- n = n / PageSize * sizeof(h->spans[0]);
- n = ROUND(n, PhysPageSize);
- if(h->spans_mapped >= n)
- return;
- runtime·SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats.other_sys);
- h->spans_mapped = n;
-}
-
-// Sweeps spans in list until reclaims at least npages into heap.
-// Returns the actual number of pages reclaimed.
-static uintptr
-MHeap_ReclaimList(MHeap *h, MSpan *list, uintptr npages)
-{
- MSpan *s;
- uintptr n;
- uint32 sg;
-
- n = 0;
- sg = runtime·mheap.sweepgen;
-retry:
- for(s = list->next; s != list; s = s->next) {
- if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
- runtime·MSpanList_Remove(s);
- // swept spans are at the end of the list
- runtime·MSpanList_InsertBack(list, s);
- runtime·unlock(&h->lock);
- n += runtime·MSpan_Sweep(s, false);
- runtime·lock(&h->lock);
- if(n >= npages)
- return n;
- // the span could have been moved elsewhere
- goto retry;
- }
- if(s->sweepgen == sg-1) {
- // the span is being sweept by background sweeper, skip
- continue;
- }
- // already swept empty span,
- // all subsequent ones must also be either swept or in process of sweeping
- break;
- }
- return n;
-}
-
-// Sweeps and reclaims at least npage pages into heap.
-// Called before allocating npage pages.
-static void
-MHeap_Reclaim(MHeap *h, uintptr npage)
-{
- uintptr reclaimed, n;
-
- // First try to sweep busy spans with large objects of size >= npage,
- // this has good chances of reclaiming the necessary space.
- for(n=npage; n < nelem(h->busy); n++) {
- if(MHeap_ReclaimList(h, &h->busy[n], npage))
- return; // Bingo!
- }
-
- // Then -- even larger objects.
- if(MHeap_ReclaimList(h, &h->busylarge, npage))
- return; // Bingo!
-
- // Now try smaller objects.
- // One such object is not enough, so we need to reclaim several of them.
- reclaimed = 0;
- for(n=0; n < npage && n < nelem(h->busy); n++) {
- reclaimed += MHeap_ReclaimList(h, &h->busy[n], npage-reclaimed);
- if(reclaimed >= npage)
- return;
- }
-
- // Now sweep everything that is not yet swept.
- runtime·unlock(&h->lock);
- for(;;) {
- n = runtime·sweepone();
- if(n == -1) // all spans are swept
- break;
- reclaimed += n;
- if(reclaimed >= npage)
- break;
- }
- runtime·lock(&h->lock);
-}
-
-// Allocate a new span of npage pages from the heap for GC'd memory
-// and record its size class in the HeapMap and HeapMapCache.
-static MSpan*
-mheap_alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large)
-{
- MSpan *s;
-
- if(g != g->m->g0)
- runtime·throw("mheap_alloc not on M stack");
- runtime·lock(&h->lock);
-
- // To prevent excessive heap growth, before allocating n pages
- // we need to sweep and reclaim at least n pages.
- if(!h->sweepdone)
- MHeap_Reclaim(h, npage);
-
- // transfer stats from cache to global
- mstats.heap_alloc += g->m->mcache->local_cachealloc;
- g->m->mcache->local_cachealloc = 0;
- mstats.tinyallocs += g->m->mcache->local_tinyallocs;
- g->m->mcache->local_tinyallocs = 0;
-
- s = MHeap_AllocSpanLocked(h, npage);
- if(s != nil) {
- // Record span info, because gc needs to be
- // able to map interior pointer to containing span.
- runtime·atomicstore(&s->sweepgen, h->sweepgen);
- s->state = MSpanInUse;
- s->freelist = nil;
- s->ref = 0;
- s->sizeclass = sizeclass;
- s->elemsize = (sizeclass==0 ? s->npages<<PageShift : runtime·class_to_size[sizeclass]);
-
- // update stats, sweep lists
- if(large) {
- mstats.heap_objects++;
- mstats.heap_alloc += npage<<PageShift;
- // Swept spans are at the end of lists.
- if(s->npages < nelem(h->free))
- runtime·MSpanList_InsertBack(&h->busy[s->npages], s);
- else
- runtime·MSpanList_InsertBack(&h->busylarge, s);
- }
- }
- runtime·unlock(&h->lock);
- return s;
-}
-
-static void
-mheap_alloc_m(G *gp)
-{
- MHeap *h;
- MSpan *s;
-
- h = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- s = mheap_alloc(h, g->m->scalararg[0], g->m->scalararg[1], g->m->scalararg[2]);
- g->m->ptrarg[0] = s;
-
- runtime·gogo(&gp->sched);
-}
-
-MSpan*
-runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
-{
- MSpan *s;
- void (*fn)(G*);
-
- // Don't do any operations that lock the heap on the G stack.
- // It might trigger stack growth, and the stack growth code needs
- // to be able to allocate heap.
- if(g == g->m->g0) {
- s = mheap_alloc(h, npage, sizeclass, large);
- } else {
- g->m->ptrarg[0] = h;
- g->m->scalararg[0] = npage;
- g->m->scalararg[1] = sizeclass;
- g->m->scalararg[2] = large;
- fn = mheap_alloc_m;
- runtime·mcall(&fn);
- s = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- }
- if(s != nil) {
- if(needzero && s->needzero)
- runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
- s->needzero = 0;
- }
- return s;
-}
-
-MSpan*
-runtime·MHeap_AllocStack(MHeap *h, uintptr npage)
-{
- MSpan *s;
-
- if(g != g->m->g0)
- runtime·throw("mheap_allocstack not on M stack");
- runtime·lock(&h->lock);
- s = MHeap_AllocSpanLocked(h, npage);
- if(s != nil) {
- s->state = MSpanStack;
- s->freelist = nil;
- s->ref = 0;
- mstats.stacks_inuse += s->npages<<PageShift;
- }
- runtime·unlock(&h->lock);
- return s;
-}
-
-// Allocates a span of the given size. h must be locked.
-// The returned span has been removed from the
-// free list, but its state is still MSpanFree.
-static MSpan*
-MHeap_AllocSpanLocked(MHeap *h, uintptr npage)
-{
- uintptr n;
- MSpan *s, *t;
- pageID p;
-
- // Try in fixed-size lists up to max.
- for(n=npage; n < nelem(h->free); n++) {
- if(!runtime·MSpanList_IsEmpty(&h->free[n])) {
- s = h->free[n].next;
- goto HaveSpan;
- }
- }
-
- // Best fit in list of large spans.
- if((s = MHeap_AllocLarge(h, npage)) == nil) {
- if(!MHeap_Grow(h, npage))
- return nil;
- if((s = MHeap_AllocLarge(h, npage)) == nil)
- return nil;
- }
-
-HaveSpan:
- // Mark span in use.
- if(s->state != MSpanFree)
- runtime·throw("MHeap_AllocLocked - MSpan not free");
- if(s->npages < npage)
- runtime·throw("MHeap_AllocLocked - bad npages");
- runtime·MSpanList_Remove(s);
- if(s->next != nil || s->prev != nil)
- runtime·throw("still in list");
- if(s->npreleased > 0) {
- runtime·SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
- mstats.heap_released -= s->npreleased<<PageShift;
- s->npreleased = 0;
- }
-
- if(s->npages > npage) {
- // Trim extra and put it back in the heap.
- t = runtime·FixAlloc_Alloc(&h->spanalloc);
- runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
- s->npages = npage;
- p = t->start;
- p -= ((uintptr)h->arena_start>>PageShift);
- if(p > 0)
- h->spans[p-1] = s;
- h->spans[p] = t;
- h->spans[p+t->npages-1] = t;
- t->needzero = s->needzero;
- s->state = MSpanStack; // prevent coalescing with s
- t->state = MSpanStack;
- MHeap_FreeSpanLocked(h, t, false, false);
- t->unusedsince = s->unusedsince; // preserve age (TODO: wrong: t is possibly merged and/or deallocated at this point)
- s->state = MSpanFree;
- }
- s->unusedsince = 0;
-
- p = s->start;
- p -= ((uintptr)h->arena_start>>PageShift);
- for(n=0; n<npage; n++)
- h->spans[p+n] = s;
-
- mstats.heap_inuse += npage<<PageShift;
- mstats.heap_idle -= npage<<PageShift;
-
- //runtime·printf("spanalloc %p\n", s->start << PageShift);
- if(s->next != nil || s->prev != nil)
- runtime·throw("still in list");
- return s;
-}
-
-// Allocate a span of exactly npage pages from the list of large spans.
-static MSpan*
-MHeap_AllocLarge(MHeap *h, uintptr npage)
-{
- return BestFit(&h->freelarge, npage, nil);
-}
-
-// Search list for smallest span with >= npage pages.
-// If there are multiple smallest spans, take the one
-// with the earliest starting address.
-static MSpan*
-BestFit(MSpan *list, uintptr npage, MSpan *best)
-{
- MSpan *s;
-
- for(s=list->next; s != list; s=s->next) {
- if(s->npages < npage)
- continue;
- if(best == nil
- || s->npages < best->npages
- || (s->npages == best->npages && s->start < best->start))
- best = s;
- }
- return best;
-}
-
-// Try to add at least npage pages of memory to the heap,
-// returning whether it worked.
-static bool
-MHeap_Grow(MHeap *h, uintptr npage)
-{
- uintptr ask;
- void *v;
- MSpan *s;
- pageID p;
-
- // Ask for a big chunk, to reduce the number of mappings
- // the operating system needs to track; also amortizes
- // the overhead of an operating system mapping.
- // Allocate a multiple of 64kB.
- npage = ROUND(npage, (64<<10)/PageSize);
- ask = npage<<PageShift;
- if(ask < HeapAllocChunk)
- ask = HeapAllocChunk;
-
- v = runtime·MHeap_SysAlloc(h, ask);
- if(v == nil) {
- if(ask > (npage<<PageShift)) {
- ask = npage<<PageShift;
- v = runtime·MHeap_SysAlloc(h, ask);
- }
- if(v == nil) {
- runtime·printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys);
- return false;
- }
- }
-
- // Create a fake "in use" span and free it, so that the
- // right coalescing happens.
- s = runtime·FixAlloc_Alloc(&h->spanalloc);
- runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
- p = s->start;
- p -= ((uintptr)h->arena_start>>PageShift);
- h->spans[p] = s;
- h->spans[p + s->npages - 1] = s;
- runtime·atomicstore(&s->sweepgen, h->sweepgen);
- s->state = MSpanInUse;
- MHeap_FreeSpanLocked(h, s, false, true);
- return true;
-}
-
-// Look up the span at the given address.
-// Address is guaranteed to be in map
-// and is guaranteed to be start or end of span.
-MSpan*
-runtime·MHeap_Lookup(MHeap *h, void *v)
-{
- uintptr p;
-
- p = (uintptr)v;
- p -= (uintptr)h->arena_start;
- return h->spans[p >> PageShift];
-}
-
-// Look up the span at the given address.
-// Address is *not* guaranteed to be in map
-// and may be anywhere in the span.
-// Map entries for the middle of a span are only
-// valid for allocated spans. Free spans may have
-// other garbage in their middles, so we have to
-// check for that.
-MSpan*
-runtime·MHeap_LookupMaybe(MHeap *h, void *v)
-{
- MSpan *s;
- pageID p, q;
-
- if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
- return nil;
- p = (uintptr)v>>PageShift;
- q = p;
- q -= (uintptr)h->arena_start >> PageShift;
- s = h->spans[q];
- if(s == nil || p < s->start || v >= s->limit || s->state != MSpanInUse)
- return nil;
- return s;
-}
-
-// Free the span back into the heap.
-static void
-mheap_free(MHeap *h, MSpan *s, int32 acct)
-{
- if(g != g->m->g0)
- runtime·throw("mheap_free not on M stack");
- runtime·lock(&h->lock);
- mstats.heap_alloc += g->m->mcache->local_cachealloc;
- g->m->mcache->local_cachealloc = 0;
- mstats.tinyallocs += g->m->mcache->local_tinyallocs;
- g->m->mcache->local_tinyallocs = 0;
- if(acct) {
- mstats.heap_alloc -= s->npages<<PageShift;
- mstats.heap_objects--;
- }
- MHeap_FreeSpanLocked(h, s, true, true);
- runtime·unlock(&h->lock);
-}
-
-static void
-mheap_free_m(G *gp)
-{
- MHeap *h;
- MSpan *s;
-
- h = g->m->ptrarg[0];
- s = g->m->ptrarg[1];
- g->m->ptrarg[0] = nil;
- g->m->ptrarg[1] = nil;
- mheap_free(h, s, g->m->scalararg[0]);
- runtime·gogo(&gp->sched);
-}
-
-void
-runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct)
-{
- void (*fn)(G*);
-
- if(g == g->m->g0) {
- mheap_free(h, s, acct);
- } else {
- g->m->ptrarg[0] = h;
- g->m->ptrarg[1] = s;
- g->m->scalararg[0] = acct;
- fn = mheap_free_m;
- runtime·mcall(&fn);
- }
-}
-
-void
-runtime·MHeap_FreeStack(MHeap *h, MSpan *s)
-{
- if(g != g->m->g0)
- runtime·throw("mheap_freestack not on M stack");
- s->needzero = 1;
- runtime·lock(&h->lock);
- mstats.stacks_inuse -= s->npages<<PageShift;
- MHeap_FreeSpanLocked(h, s, true, true);
- runtime·unlock(&h->lock);
-}
-
-static void
-MHeap_FreeSpanLocked(MHeap *h, MSpan *s, bool acctinuse, bool acctidle)
-{
- MSpan *t;
- pageID p;
-
- switch(s->state) {
- case MSpanStack:
- if(s->ref != 0)
- runtime·throw("MHeap_FreeSpanLocked - invalid stack free");
- break;
- case MSpanInUse:
- if(s->ref != 0 || s->sweepgen != h->sweepgen) {
- runtime·printf("MHeap_FreeSpanLocked - span %p ptr %p ref %d sweepgen %d/%d\n",
- s, s->start<<PageShift, s->ref, s->sweepgen, h->sweepgen);
- runtime·throw("MHeap_FreeSpanLocked - invalid free");
- }
- break;
- default:
- runtime·throw("MHeap_FreeSpanLocked - invalid span state");
- break;
- }
- if(acctinuse)
- mstats.heap_inuse -= s->npages<<PageShift;
- if(acctidle)
- mstats.heap_idle += s->npages<<PageShift;
- s->state = MSpanFree;
- runtime·MSpanList_Remove(s);
- // Stamp newly unused spans. The scavenger will use that
- // info to potentially give back some pages to the OS.
- s->unusedsince = runtime·nanotime();
- s->npreleased = 0;
-
- // Coalesce with earlier, later spans.
- p = s->start;
- p -= (uintptr)h->arena_start >> PageShift;
- if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse && t->state != MSpanStack) {
- s->start = t->start;
- s->npages += t->npages;
- s->npreleased = t->npreleased; // absorb released pages
- s->needzero |= t->needzero;
- p -= t->npages;
- h->spans[p] = s;
- runtime·MSpanList_Remove(t);
- t->state = MSpanDead;
- runtime·FixAlloc_Free(&h->spanalloc, t);
- }
- if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse && t->state != MSpanStack) {
- s->npages += t->npages;
- s->npreleased += t->npreleased;
- s->needzero |= t->needzero;
- h->spans[p + s->npages - 1] = s;
- runtime·MSpanList_Remove(t);
- t->state = MSpanDead;
- runtime·FixAlloc_Free(&h->spanalloc, t);
- }
-
- // Insert s into appropriate list.
- if(s->npages < nelem(h->free))
- runtime·MSpanList_Insert(&h->free[s->npages], s);
- else
- runtime·MSpanList_Insert(&h->freelarge, s);
-}
-
-static uintptr
-scavengelist(MSpan *list, uint64 now, uint64 limit)
-{
- uintptr released, sumreleased;
- MSpan *s;
-
- if(runtime·MSpanList_IsEmpty(list))
- return 0;
-
- sumreleased = 0;
- for(s=list->next; s != list; s=s->next) {
- if((now - s->unusedsince) > limit && s->npreleased != s->npages) {
- released = (s->npages - s->npreleased) << PageShift;
- mstats.heap_released += released;
- sumreleased += released;
- s->npreleased = s->npages;
- runtime·SysUnused((void*)(s->start << PageShift), s->npages << PageShift);
- }
- }
- return sumreleased;
-}
-
-void
-runtime·MHeap_Scavenge(int32 k, uint64 now, uint64 limit)
-{
- uint32 i;
- uintptr sumreleased;
- MHeap *h;
-
- h = &runtime·mheap;
- runtime·lock(&h->lock);
- sumreleased = 0;
- for(i=0; i < nelem(h->free); i++)
- sumreleased += scavengelist(&h->free[i], now, limit);
- sumreleased += scavengelist(&h->freelarge, now, limit);
- runtime·unlock(&h->lock);
-
- if(runtime·debug.gctrace > 0) {
- if(sumreleased > 0)
- runtime·printf("scvg%d: %D MB released\n", k, (uint64)sumreleased>>20);
- // TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap.
- // But we can't call ReadMemStats on g0 holding locks.
- runtime·printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
- k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20,
- mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20);
- }
-}
-
-void
-runtime·scavenge_m(void)
-{
- runtime·MHeap_Scavenge(-1, ~(uintptr)0, 0);
-}
-
-// Initialize a new span with the given start and npages.
-void
-runtime·MSpan_Init(MSpan *span, pageID start, uintptr npages)
-{
- span->next = nil;
- span->prev = nil;
- span->start = start;
- span->npages = npages;
- span->freelist = nil;
- span->ref = 0;
- span->sizeclass = 0;
- span->incache = false;
- span->elemsize = 0;
- span->state = MSpanDead;
- span->unusedsince = 0;
- span->npreleased = 0;
- span->specialLock.key = 0;
- span->specials = nil;
- span->needzero = 0;
-}
-
-// Initialize an empty doubly-linked list.
-void
-runtime·MSpanList_Init(MSpan *list)
-{
- list->state = MSpanListHead;
- list->next = list;
- list->prev = list;
-}
-
-void
-runtime·MSpanList_Remove(MSpan *span)
-{
- if(span->prev == nil && span->next == nil)
- return;
- span->prev->next = span->next;
- span->next->prev = span->prev;
- span->prev = nil;
- span->next = nil;
-}
-
-bool
-runtime·MSpanList_IsEmpty(MSpan *list)
-{
- return list->next == list;
-}
-
-void
-runtime·MSpanList_Insert(MSpan *list, MSpan *span)
-{
- if(span->next != nil || span->prev != nil) {
- runtime·printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
- runtime·throw("MSpanList_Insert");
- }
- span->next = list->next;
- span->prev = list;
- span->next->prev = span;
- span->prev->next = span;
-}
-
-void
-runtime·MSpanList_InsertBack(MSpan *list, MSpan *span)
-{
- if(span->next != nil || span->prev != nil) {
- runtime·printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
- runtime·throw("MSpanList_Insert");
- }
- span->next = list;
- span->prev = list->prev;
- span->next->prev = span;
- span->prev->next = span;
-}
-
-// Adds the special record s to the list of special records for
-// the object p. All fields of s should be filled in except for
-// offset & next, which this routine will fill in.
-// Returns true if the special was successfully added, false otherwise.
-// (The add will fail only if a record with the same p and s->kind
-// already exists.)
-static bool
-addspecial(void *p, Special *s)
-{
- MSpan *span;
- Special **t, *x;
- uintptr offset;
- byte kind;
-
- span = runtime·MHeap_LookupMaybe(&runtime·mheap, p);
- if(span == nil)
- runtime·throw("addspecial on invalid pointer");
-
- // Ensure that the span is swept.
- // GC accesses specials list w/o locks. And it's just much safer.
- g->m->locks++;
- runtime·MSpan_EnsureSwept(span);
-
- offset = (uintptr)p - (span->start << PageShift);
- kind = s->kind;
-
- runtime·lock(&span->specialLock);
-
- // Find splice point, check for existing record.
- t = &span->specials;
- while((x = *t) != nil) {
- if(offset == x->offset && kind == x->kind) {
- runtime·unlock(&span->specialLock);
- g->m->locks--;
- return false; // already exists
- }
- if(offset < x->offset || (offset == x->offset && kind < x->kind))
- break;
- t = &x->next;
- }
- // Splice in record, fill in offset.
- s->offset = offset;
- s->next = x;
- *t = s;
- runtime·unlock(&span->specialLock);
- g->m->locks--;
- return true;
-}
-
-// Removes the Special record of the given kind for the object p.
-// Returns the record if the record existed, nil otherwise.
-// The caller must FixAlloc_Free the result.
-static Special*
-removespecial(void *p, byte kind)
-{
- MSpan *span;
- Special *s, **t;
- uintptr offset;
-
- span = runtime·MHeap_LookupMaybe(&runtime·mheap, p);
- if(span == nil)
- runtime·throw("removespecial on invalid pointer");
-
- // Ensure that the span is swept.
- // GC accesses specials list w/o locks. And it's just much safer.
- g->m->locks++;
- runtime·MSpan_EnsureSwept(span);
-
- offset = (uintptr)p - (span->start << PageShift);
-
- runtime·lock(&span->specialLock);
- t = &span->specials;
- while((s = *t) != nil) {
- // This function is used for finalizers only, so we don't check for
- // "interior" specials (p must be exactly equal to s->offset).
- if(offset == s->offset && kind == s->kind) {
- *t = s->next;
- runtime·unlock(&span->specialLock);
- g->m->locks--;
- return s;
- }
- t = &s->next;
- }
- runtime·unlock(&span->specialLock);
- g->m->locks--;
- return nil;
-}
-
-// Adds a finalizer to the object p. Returns true if it succeeded.
-bool
-runtime·addfinalizer(void *p, FuncVal *f, uintptr nret, Type *fint, PtrType *ot)
-{
- SpecialFinalizer *s;
-
- runtime·lock(&runtime·mheap.speciallock);
- s = runtime·FixAlloc_Alloc(&runtime·mheap.specialfinalizeralloc);
- runtime·unlock(&runtime·mheap.speciallock);
- s->special.kind = KindSpecialFinalizer;
- s->fn = f;
- s->nret = nret;
- s->fint = fint;
- s->ot = ot;
- if(addspecial(p, &s->special))
- return true;
-
- // There was an old finalizer
- runtime·lock(&runtime·mheap.speciallock);
- runtime·FixAlloc_Free(&runtime·mheap.specialfinalizeralloc, s);
- runtime·unlock(&runtime·mheap.speciallock);
- return false;
-}
-
-// Removes the finalizer (if any) from the object p.
-void
-runtime·removefinalizer(void *p)
-{
- SpecialFinalizer *s;
-
- s = (SpecialFinalizer*)removespecial(p, KindSpecialFinalizer);
- if(s == nil)
- return; // there wasn't a finalizer to remove
- runtime·lock(&runtime·mheap.speciallock);
- runtime·FixAlloc_Free(&runtime·mheap.specialfinalizeralloc, s);
- runtime·unlock(&runtime·mheap.speciallock);
-}
-
-// Set the heap profile bucket associated with addr to b.
-void
-runtime·setprofilebucket_m(void)
-{
- void *p;
- Bucket *b;
- SpecialProfile *s;
-
- p = g->m->ptrarg[0];
- b = g->m->ptrarg[1];
- g->m->ptrarg[0] = nil;
- g->m->ptrarg[1] = nil;
-
- runtime·lock(&runtime·mheap.speciallock);
- s = runtime·FixAlloc_Alloc(&runtime·mheap.specialprofilealloc);
- runtime·unlock(&runtime·mheap.speciallock);
- s->special.kind = KindSpecialProfile;
- s->b = b;
- if(!addspecial(p, &s->special))
- runtime·throw("setprofilebucket: profile already set");
-}
-
-// Do whatever cleanup needs to be done to deallocate s. It has
-// already been unlinked from the MSpan specials list.
-// Returns true if we should keep working on deallocating p.
-bool
-runtime·freespecial(Special *s, void *p, uintptr size, bool freed)
-{
- SpecialFinalizer *sf;
- SpecialProfile *sp;
-
- switch(s->kind) {
- case KindSpecialFinalizer:
- sf = (SpecialFinalizer*)s;
- runtime·queuefinalizer(p, sf->fn, sf->nret, sf->fint, sf->ot);
- runtime·lock(&runtime·mheap.speciallock);
- runtime·FixAlloc_Free(&runtime·mheap.specialfinalizeralloc, sf);
- runtime·unlock(&runtime·mheap.speciallock);
- return false; // don't free p until finalizer is done
- case KindSpecialProfile:
- sp = (SpecialProfile*)s;
- runtime·mProf_Free(sp->b, size, freed);
- runtime·lock(&runtime·mheap.speciallock);
- runtime·FixAlloc_Free(&runtime·mheap.specialprofilealloc, sp);
- runtime·unlock(&runtime·mheap.speciallock);
- return true;
- default:
- runtime·throw("bad special kind");
- return true;
- }
-}
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
new file mode 100644
index 000000000..fedcd69c5
--- /dev/null
+++ b/src/runtime/mheap.go
@@ -0,0 +1,785 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Page heap.
+//
+// See malloc.h for overview.
+//
+// When a MSpan is in the heap free list, state == MSpanFree
+// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
+//
+// When a MSpan is allocated, state == MSpanInUse or MSpanStack
+// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
+
+package runtime
+
+import "unsafe"
+
+var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go
+var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go
+
+func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
+ h := (*mheap)(vh)
+ s := (*mspan)(p)
+ if len(h_allspans) >= cap(h_allspans) {
+ n := 64 * 1024 / ptrSize
+ if n < cap(h_allspans)*3/2 {
+ n = cap(h_allspans) * 3 / 2
+ }
+ var new []*mspan
+ sp := (*slice)(unsafe.Pointer(&new))
+ sp.array = (*byte)(sysAlloc(uintptr(n)*ptrSize, &memstats.other_sys))
+ if sp.array == nil {
+ gothrow("runtime: cannot allocate memory")
+ }
+ sp.len = uint(len(h_allspans))
+ sp.cap = uint(n)
+ if len(h_allspans) > 0 {
+ copy(new, h_allspans)
+ // Don't free the old array if it's referenced by sweep.
+ // See the comment in mgc0.c.
+ if h.allspans != mheap_.gcspans {
+ sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*ptrSize, &memstats.other_sys)
+ }
+ }
+ h_allspans = new
+ h.allspans = (**mspan)(unsafe.Pointer(sp.array))
+ }
+ h_allspans = append(h_allspans, s)
+ h.nspan = uint32(len(h_allspans))
+}
+
+// Initialize the heap.
+func mHeap_Init(h *mheap, spans_size uintptr) {
+ fixAlloc_Init(&h.spanalloc, unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
+ fixAlloc_Init(&h.cachealloc, unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
+ fixAlloc_Init(&h.specialfinalizeralloc, unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
+ fixAlloc_Init(&h.specialprofilealloc, unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
+
+ // h->mapcache needs no init
+ for i := range h.free {
+ mSpanList_Init(&h.free[i])
+ mSpanList_Init(&h.busy[i])
+ }
+
+ mSpanList_Init(&h.freelarge)
+ mSpanList_Init(&h.busylarge)
+ for i := range h.central {
+ mCentral_Init(&h.central[i].mcentral, int32(i))
+ }
+
+ sp := (*slice)(unsafe.Pointer(&h_spans))
+ sp.array = (*byte)(unsafe.Pointer(h.spans))
+ sp.len = uint(spans_size / ptrSize)
+ sp.cap = uint(spans_size / ptrSize)
+}
+
+func mHeap_MapSpans(h *mheap) {
+ // Map spans array, PageSize at a time.
+ n := uintptr(unsafe.Pointer(h.arena_used))
+ n -= uintptr(unsafe.Pointer(h.arena_start))
+ n = n / _PageSize * ptrSize
+ n = round(n, _PhysPageSize)
+ if h.spans_mapped >= n {
+ return
+ }
+ sysMap(add(unsafe.Pointer(h.spans), h.spans_mapped), n-h.spans_mapped, h.arena_reserved, &memstats.other_sys)
+ h.spans_mapped = n
+}
+
+// Sweeps spans in list until reclaims at least npages into heap.
+// Returns the actual number of pages reclaimed.
+func mHeap_ReclaimList(h *mheap, list *mspan, npages uintptr) uintptr {
+ n := uintptr(0)
+ sg := mheap_.sweepgen
+retry:
+ for s := list.next; s != list; s = s.next {
+ if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
+ mSpanList_Remove(s)
+ // swept spans are at the end of the list
+ mSpanList_InsertBack(list, s)
+ unlock(&h.lock)
+ if mSpan_Sweep(s, false) {
+ // TODO(rsc,dvyukov): This is probably wrong.
+ // It is undercounting the number of pages reclaimed.
+ // See golang.org/issue/9048.
+ // Note that if we want to add the true count of s's pages,
+ // we must record that before calling mSpan_Sweep,
+ // because if mSpan_Sweep returns true the span has
+ // been
+ n++
+ }
+ lock(&h.lock)
+ if n >= npages {
+ return n
+ }
+ // the span could have been moved elsewhere
+ goto retry
+ }
+ if s.sweepgen == sg-1 {
+ // the span is being sweept by background sweeper, skip
+ continue
+ }
+ // already swept empty span,
+ // all subsequent ones must also be either swept or in process of sweeping
+ break
+ }
+ return n
+}
+
+// Sweeps and reclaims at least npage pages into heap.
+// Called before allocating npage pages.
+func mHeap_Reclaim(h *mheap, npage uintptr) {
+ // First try to sweep busy spans with large objects of size >= npage,
+ // this has good chances of reclaiming the necessary space.
+ for i := int(npage); i < len(h.busy); i++ {
+ if mHeap_ReclaimList(h, &h.busy[i], npage) != 0 {
+ return // Bingo!
+ }
+ }
+
+ // Then -- even larger objects.
+ if mHeap_ReclaimList(h, &h.busylarge, npage) != 0 {
+ return // Bingo!
+ }
+
+ // Now try smaller objects.
+ // One such object is not enough, so we need to reclaim several of them.
+ reclaimed := uintptr(0)
+ for i := 0; i < int(npage) && i < len(h.busy); i++ {
+ reclaimed += mHeap_ReclaimList(h, &h.busy[i], npage-reclaimed)
+ if reclaimed >= npage {
+ return
+ }
+ }
+
+ // Now sweep everything that is not yet swept.
+ unlock(&h.lock)
+ for {
+ n := sweepone()
+ if n == ^uintptr(0) { // all spans are swept
+ break
+ }
+ reclaimed += n
+ if reclaimed >= npage {
+ break
+ }
+ }
+ lock(&h.lock)
+}
+
+// Allocate a new span of npage pages from the heap for GC'd memory
+// and record its size class in the HeapMap and HeapMapCache.
+func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan {
+ _g_ := getg()
+ if _g_ != _g_.m.g0 {
+ gothrow("_mheap_alloc not on g0 stack")
+ }
+ lock(&h.lock)
+
+ // To prevent excessive heap growth, before allocating n pages
+ // we need to sweep and reclaim at least n pages.
+ if h.sweepdone == 0 {
+ mHeap_Reclaim(h, npage)
+ }
+
+ // transfer stats from cache to global
+ memstats.heap_alloc += uint64(_g_.m.mcache.local_cachealloc)
+ _g_.m.mcache.local_cachealloc = 0
+ memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
+ _g_.m.mcache.local_tinyallocs = 0
+
+ s := mHeap_AllocSpanLocked(h, npage)
+ if s != nil {
+ // Record span info, because gc needs to be
+ // able to map interior pointer to containing span.
+ atomicstore(&s.sweepgen, h.sweepgen)
+ s.state = _MSpanInUse
+ s.freelist = nil
+ s.ref = 0
+ s.sizeclass = uint8(sizeclass)
+ if sizeclass == 0 {
+ s.elemsize = s.npages << _PageShift
+ } else {
+ s.elemsize = uintptr(class_to_size[sizeclass])
+ }
+
+ // update stats, sweep lists
+ if large {
+ memstats.heap_objects++
+ memstats.heap_alloc += uint64(npage << _PageShift)
+ // Swept spans are at the end of lists.
+ if s.npages < uintptr(len(h.free)) {
+ mSpanList_InsertBack(&h.busy[s.npages], s)
+ } else {
+ mSpanList_InsertBack(&h.busylarge, s)
+ }
+ }
+ }
+ unlock(&h.lock)
+ return s
+}
+
+func mHeap_Alloc(h *mheap, npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
+ // Don't do any operations that lock the heap on the G stack.
+ // It might trigger stack growth, and the stack growth code needs
+ // to be able to allocate heap.
+ var s *mspan
+ systemstack(func() {
+ s = mHeap_Alloc_m(h, npage, sizeclass, large)
+ })
+
+ if s != nil {
+ if needzero && s.needzero != 0 {
+ memclr(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
+ }
+ s.needzero = 0
+ }
+ return s
+}
+
+func mHeap_AllocStack(h *mheap, npage uintptr) *mspan {
+ _g_ := getg()
+ if _g_ != _g_.m.g0 {
+ gothrow("mheap_allocstack not on g0 stack")
+ }
+ lock(&h.lock)
+ s := mHeap_AllocSpanLocked(h, npage)
+ if s != nil {
+ s.state = _MSpanStack
+ s.freelist = nil
+ s.ref = 0
+ memstats.stacks_inuse += uint64(s.npages << _PageShift)
+ }
+ unlock(&h.lock)
+ return s
+}
+
+// Allocates a span of the given size. h must be locked.
+// The returned span has been removed from the
+// free list, but its state is still MSpanFree.
+func mHeap_AllocSpanLocked(h *mheap, npage uintptr) *mspan {
+ var s *mspan
+
+ // Try in fixed-size lists up to max.
+ for i := int(npage); i < len(h.free); i++ {
+ if !mSpanList_IsEmpty(&h.free[i]) {
+ s = h.free[i].next
+ goto HaveSpan
+ }
+ }
+
+ // Best fit in list of large spans.
+ s = mHeap_AllocLarge(h, npage)
+ if s == nil {
+ if !mHeap_Grow(h, npage) {
+ return nil
+ }
+ s = mHeap_AllocLarge(h, npage)
+ if s == nil {
+ return nil
+ }
+ }
+
+HaveSpan:
+ // Mark span in use.
+ if s.state != _MSpanFree {
+ gothrow("MHeap_AllocLocked - MSpan not free")
+ }
+ if s.npages < npage {
+ gothrow("MHeap_AllocLocked - bad npages")
+ }
+ mSpanList_Remove(s)
+ if s.next != nil || s.prev != nil {
+ gothrow("still in list")
+ }
+ if s.npreleased > 0 {
+ sysUsed((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
+ memstats.heap_released -= uint64(s.npreleased << _PageShift)
+ s.npreleased = 0
+ }
+
+ if s.npages > npage {
+ // Trim extra and put it back in the heap.
+ t := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
+ mSpan_Init(t, s.start+pageID(npage), s.npages-npage)
+ s.npages = npage
+ p := uintptr(t.start)
+ p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
+ if p > 0 {
+ h_spans[p-1] = s
+ }
+ h_spans[p] = t
+ h_spans[p+t.npages-1] = t
+ t.needzero = s.needzero
+ s.state = _MSpanStack // prevent coalescing with s
+ t.state = _MSpanStack
+ mHeap_FreeSpanLocked(h, t, false, false)
+ t.unusedsince = s.unusedsince // preserve age (TODO: wrong: t is possibly merged and/or deallocated at this point)
+ s.state = _MSpanFree
+ }
+ s.unusedsince = 0
+
+ p := uintptr(s.start)
+ p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
+ for n := uintptr(0); n < npage; n++ {
+ h_spans[p+n] = s
+ }
+
+ memstats.heap_inuse += uint64(npage << _PageShift)
+ memstats.heap_idle -= uint64(npage << _PageShift)
+
+ //println("spanalloc", hex(s.start<<_PageShift))
+ if s.next != nil || s.prev != nil {
+ gothrow("still in list")
+ }
+ return s
+}
+
+// Allocate a span of exactly npage pages from the list of large spans.
+func mHeap_AllocLarge(h *mheap, npage uintptr) *mspan {
+ return bestFit(&h.freelarge, npage, nil)
+}
+
+// Search list for smallest span with >= npage pages.
+// If there are multiple smallest spans, take the one
+// with the earliest starting address.
+func bestFit(list *mspan, npage uintptr, best *mspan) *mspan {
+ for s := list.next; s != list; s = s.next {
+ if s.npages < npage {
+ continue
+ }
+ if best == nil || s.npages < best.npages || (s.npages == best.npages && s.start < best.start) {
+ best = s
+ }
+ }
+ return best
+}
+
+// Try to add at least npage pages of memory to the heap,
+// returning whether it worked.
+func mHeap_Grow(h *mheap, npage uintptr) bool {
+ // Ask for a big chunk, to reduce the number of mappings
+ // the operating system needs to track; also amortizes
+ // the overhead of an operating system mapping.
+ // Allocate a multiple of 64kB.
+ npage = round(npage, (64<<10)/_PageSize)
+ ask := npage << _PageShift
+ if ask < _HeapAllocChunk {
+ ask = _HeapAllocChunk
+ }
+
+ v := mHeap_SysAlloc(h, ask)
+ if v == nil {
+ if ask > npage<<_PageShift {
+ ask = npage << _PageShift
+ v = mHeap_SysAlloc(h, ask)
+ }
+ if v == nil {
+ print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
+ return false
+ }
+ }
+
+ // Create a fake "in use" span and free it, so that the
+ // right coalescing happens.
+ s := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
+ mSpan_Init(s, pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
+ p := uintptr(s.start)
+ p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
+ h_spans[p] = s
+ h_spans[p+s.npages-1] = s
+ atomicstore(&s.sweepgen, h.sweepgen)
+ s.state = _MSpanInUse
+ mHeap_FreeSpanLocked(h, s, false, true)
+ return true
+}
+
+// Look up the span at the given address.
+// Address is guaranteed to be in map
+// and is guaranteed to be start or end of span.
+func mHeap_Lookup(h *mheap, v unsafe.Pointer) *mspan {
+ p := uintptr(v)
+ p -= uintptr(unsafe.Pointer(h.arena_start))
+ return h_spans[p>>_PageShift]
+}
+
+// Look up the span at the given address.
+// Address is *not* guaranteed to be in map
+// and may be anywhere in the span.
+// Map entries for the middle of a span are only
+// valid for allocated spans. Free spans may have
+// other garbage in their middles, so we have to
+// check for that.
+func mHeap_LookupMaybe(h *mheap, v unsafe.Pointer) *mspan {
+ if uintptr(v) < uintptr(unsafe.Pointer(h.arena_start)) || uintptr(v) >= uintptr(unsafe.Pointer(h.arena_used)) {
+ return nil
+ }
+ p := uintptr(v) >> _PageShift
+ q := p
+ q -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
+ s := h_spans[q]
+ if s == nil || p < uintptr(s.start) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
+ return nil
+ }
+ return s
+}
+
+// Free the span back into the heap.
+func mHeap_Free(h *mheap, s *mspan, acct int32) {
+ systemstack(func() {
+ mp := getg().m
+ lock(&h.lock)
+ memstats.heap_alloc += uint64(mp.mcache.local_cachealloc)
+ mp.mcache.local_cachealloc = 0
+ memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
+ mp.mcache.local_tinyallocs = 0
+ if acct != 0 {
+ memstats.heap_alloc -= uint64(s.npages << _PageShift)
+ memstats.heap_objects--
+ }
+ mHeap_FreeSpanLocked(h, s, true, true)
+ unlock(&h.lock)
+ })
+}
+
+func mHeap_FreeStack(h *mheap, s *mspan) {
+ _g_ := getg()
+ if _g_ != _g_.m.g0 {
+ gothrow("mheap_freestack not on g0 stack")
+ }
+ s.needzero = 1
+ lock(&h.lock)
+ memstats.stacks_inuse -= uint64(s.npages << _PageShift)
+ mHeap_FreeSpanLocked(h, s, true, true)
+ unlock(&h.lock)
+}
+
+func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool) {
+ switch s.state {
+ case _MSpanStack:
+ if s.ref != 0 {
+ gothrow("MHeap_FreeSpanLocked - invalid stack free")
+ }
+ case _MSpanInUse:
+ if s.ref != 0 || s.sweepgen != h.sweepgen {
+ print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
+ gothrow("MHeap_FreeSpanLocked - invalid free")
+ }
+ default:
+ gothrow("MHeap_FreeSpanLocked - invalid span state")
+ }
+
+ if acctinuse {
+ memstats.heap_inuse -= uint64(s.npages << _PageShift)
+ }
+ if acctidle {
+ memstats.heap_idle += uint64(s.npages << _PageShift)
+ }
+ s.state = _MSpanFree
+ mSpanList_Remove(s)
+
+ // Stamp newly unused spans. The scavenger will use that
+ // info to potentially give back some pages to the OS.
+ s.unusedsince = nanotime()
+ s.npreleased = 0
+
+ // Coalesce with earlier, later spans.
+ p := uintptr(s.start)
+ p -= uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift
+ if p > 0 {
+ t := h_spans[p-1]
+ if t != nil && t.state != _MSpanInUse && t.state != _MSpanStack {
+ s.start = t.start
+ s.npages += t.npages
+ s.npreleased = t.npreleased // absorb released pages
+ s.needzero |= t.needzero
+ p -= t.npages
+ h_spans[p] = s
+ mSpanList_Remove(t)
+ t.state = _MSpanDead
+ fixAlloc_Free(&h.spanalloc, (unsafe.Pointer)(t))
+ }
+ }
+ if (p+s.npages)*ptrSize < h.spans_mapped {
+ t := h_spans[p+s.npages]
+ if t != nil && t.state != _MSpanInUse && t.state != _MSpanStack {
+ s.npages += t.npages
+ s.npreleased += t.npreleased
+ s.needzero |= t.needzero
+ h_spans[p+s.npages-1] = s
+ mSpanList_Remove(t)
+ t.state = _MSpanDead
+ fixAlloc_Free(&h.spanalloc, (unsafe.Pointer)(t))
+ }
+ }
+
+ // Insert s into appropriate list.
+ if s.npages < uintptr(len(h.free)) {
+ mSpanList_Insert(&h.free[s.npages], s)
+ } else {
+ mSpanList_Insert(&h.freelarge, s)
+ }
+}
+
+func scavengelist(list *mspan, now, limit uint64) uintptr {
+ if mSpanList_IsEmpty(list) {
+ return 0
+ }
+
+ var sumreleased uintptr
+ for s := list.next; s != list; s = s.next {
+ if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
+ released := (s.npages - s.npreleased) << _PageShift
+ memstats.heap_released += uint64(released)
+ sumreleased += released
+ s.npreleased = s.npages
+ sysUnused((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
+ }
+ }
+ return sumreleased
+}
+
+func mHeap_Scavenge(k int32, now, limit uint64) {
+ h := &mheap_
+ lock(&h.lock)
+ var sumreleased uintptr
+ for i := 0; i < len(h.free); i++ {
+ sumreleased += scavengelist(&h.free[i], now, limit)
+ }
+ sumreleased += scavengelist(&h.freelarge, now, limit)
+ unlock(&h.lock)
+
+ if debug.gctrace > 0 {
+ if sumreleased > 0 {
+ print("scvg", k, ": ", sumreleased>>20, " MB released\n")
+ }
+ // TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap.
+ // But we can't call ReadMemStats on g0 holding locks.
+ print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
+ }
+}
+
+func scavenge_m() {
+ mHeap_Scavenge(-1, ^uint64(0), 0)
+}
+
+// Initialize a new span with the given start and npages.
+func mSpan_Init(span *mspan, start pageID, npages uintptr) {
+ span.next = nil
+ span.prev = nil
+ span.start = start
+ span.npages = npages
+ span.freelist = nil
+ span.ref = 0
+ span.sizeclass = 0
+ span.incache = false
+ span.elemsize = 0
+ span.state = _MSpanDead
+ span.unusedsince = 0
+ span.npreleased = 0
+ span.speciallock.key = 0
+ span.specials = nil
+ span.needzero = 0
+}
+
+// Initialize an empty doubly-linked list.
+func mSpanList_Init(list *mspan) {
+ list.state = _MSpanListHead
+ list.next = list
+ list.prev = list
+}
+
+func mSpanList_Remove(span *mspan) {
+ if span.prev == nil && span.next == nil {
+ return
+ }
+ span.prev.next = span.next
+ span.next.prev = span.prev
+ span.prev = nil
+ span.next = nil
+}
+
+func mSpanList_IsEmpty(list *mspan) bool {
+ return list.next == list
+}
+
+func mSpanList_Insert(list *mspan, span *mspan) {
+ if span.next != nil || span.prev != nil {
+ println("failed MSpanList_Insert", span, span.next, span.prev)
+ gothrow("MSpanList_Insert")
+ }
+ span.next = list.next
+ span.prev = list
+ span.next.prev = span
+ span.prev.next = span
+}
+
+func mSpanList_InsertBack(list *mspan, span *mspan) {
+ if span.next != nil || span.prev != nil {
+ println("failed MSpanList_InsertBack", span, span.next, span.prev)
+ gothrow("MSpanList_InsertBack")
+ }
+ span.next = list
+ span.prev = list.prev
+ span.next.prev = span
+ span.prev.next = span
+}
+
+// Adds the special record s to the list of special records for
+// the object p. All fields of s should be filled in except for
+// offset & next, which this routine will fill in.
+// Returns true if the special was successfully added, false otherwise.
+// (The add will fail only if a record with the same p and s->kind
+// already exists.)
+func addspecial(p unsafe.Pointer, s *special) bool {
+ span := mHeap_LookupMaybe(&mheap_, p)
+ if span == nil {
+ gothrow("addspecial on invalid pointer")
+ }
+
+ // Ensure that the span is swept.
+ // GC accesses specials list w/o locks. And it's just much safer.
+ mp := acquirem()
+ mSpan_EnsureSwept(span)
+
+ offset := uintptr(p) - uintptr(span.start<<_PageShift)
+ kind := s.kind
+
+ lock(&span.speciallock)
+
+ // Find splice point, check for existing record.
+ t := &span.specials
+ for {
+ x := *t
+ if x == nil {
+ break
+ }
+ if offset == uintptr(x.offset) && kind == x.kind {
+ unlock(&span.speciallock)
+ releasem(mp)
+ return false // already exists
+ }
+ if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
+ break
+ }
+ t = &x.next
+ }
+
+ // Splice in record, fill in offset.
+ s.offset = uint16(offset)
+ s.next = *t
+ *t = s
+ unlock(&span.speciallock)
+ releasem(mp)
+
+ return true
+}
+
+// Removes the Special record of the given kind for the object p.
+// Returns the record if the record existed, nil otherwise.
+// The caller must FixAlloc_Free the result.
+func removespecial(p unsafe.Pointer, kind uint8) *special {
+ span := mHeap_LookupMaybe(&mheap_, p)
+ if span == nil {
+ gothrow("removespecial on invalid pointer")
+ }
+
+ // Ensure that the span is swept.
+ // GC accesses specials list w/o locks. And it's just much safer.
+ mp := acquirem()
+ mSpan_EnsureSwept(span)
+
+ offset := uintptr(p) - uintptr(span.start<<_PageShift)
+
+ lock(&span.speciallock)
+ t := &span.specials
+ for {
+ s := *t
+ if s == nil {
+ break
+ }
+ // This function is used for finalizers only, so we don't check for
+ // "interior" specials (p must be exactly equal to s->offset).
+ if offset == uintptr(s.offset) && kind == s.kind {
+ *t = s.next
+ unlock(&span.speciallock)
+ releasem(mp)
+ return s
+ }
+ t = &s.next
+ }
+ unlock(&span.speciallock)
+ releasem(mp)
+ return nil
+}
+
+// Adds a finalizer to the object p. Returns true if it succeeded.
+func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
+ lock(&mheap_.speciallock)
+ s := (*specialfinalizer)(fixAlloc_Alloc(&mheap_.specialfinalizeralloc))
+ unlock(&mheap_.speciallock)
+ s.special.kind = _KindSpecialFinalizer
+ s.fn = f
+ s.nret = nret
+ s.fint = fint
+ s.ot = ot
+ if addspecial(p, &s.special) {
+ return true
+ }
+
+ // There was an old finalizer
+ lock(&mheap_.speciallock)
+ fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(s))
+ unlock(&mheap_.speciallock)
+ return false
+}
+
+// Removes the finalizer (if any) from the object p.
+func removefinalizer(p unsafe.Pointer) {
+ s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
+ if s == nil {
+ return // there wasn't a finalizer to remove
+ }
+ lock(&mheap_.speciallock)
+ fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(s))
+ unlock(&mheap_.speciallock)
+}
+
+// Set the heap profile bucket associated with addr to b.
+func setprofilebucket(p unsafe.Pointer, b *bucket) {
+ lock(&mheap_.speciallock)
+ s := (*specialprofile)(fixAlloc_Alloc(&mheap_.specialprofilealloc))
+ unlock(&mheap_.speciallock)
+ s.special.kind = _KindSpecialProfile
+ s.b = b
+ if !addspecial(p, &s.special) {
+ gothrow("setprofilebucket: profile already set")
+ }
+}
+
+// Do whatever cleanup needs to be done to deallocate s. It has
+// already been unlinked from the MSpan specials list.
+// Returns true if we should keep working on deallocating p.
+func freespecial(s *special, p unsafe.Pointer, size uintptr, freed bool) bool {
+ switch s.kind {
+ case _KindSpecialFinalizer:
+ sf := (*specialfinalizer)(unsafe.Pointer(s))
+ queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
+ lock(&mheap_.speciallock)
+ fixAlloc_Free(&mheap_.specialfinalizeralloc, (unsafe.Pointer)(sf))
+ unlock(&mheap_.speciallock)
+ return false // don't free p until finalizer is done
+ case _KindSpecialProfile:
+ sp := (*specialprofile)(unsafe.Pointer(s))
+ mProf_Free(sp.b, size, freed)
+ lock(&mheap_.speciallock)
+ fixAlloc_Free(&mheap_.specialprofilealloc, (unsafe.Pointer)(sp))
+ unlock(&mheap_.speciallock)
+ return true
+ default:
+ gothrow("bad special kind")
+ panic("not reached")
+ }
+}
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index d409c6c30..ba989b1b8 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -190,8 +190,6 @@ func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket
return b
}
-func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer
-
func eqslice(x, y []uintptr) bool {
if len(x) != len(y) {
return false
@@ -246,16 +244,9 @@ func mProf_Malloc(p unsafe.Pointer, size uintptr) {
// This reduces potential contention and chances of deadlocks.
// Since the object must be alive during call to mProf_Malloc,
// it's fine to do this non-atomically.
- setprofilebucket(p, b)
-}
-
-func setprofilebucket_m() // mheap.c
-
-func setprofilebucket(p unsafe.Pointer, b *bucket) {
- g := getg()
- g.m.ptrarg[0] = p
- g.m.ptrarg[1] = unsafe.Pointer(b)
- onM(setprofilebucket_m)
+ systemstack(func() {
+ setprofilebucket(p, b)
+ })
}
// Called when freeing a profiled block.
@@ -519,8 +510,6 @@ func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
return
}
-var allgs []*g // proc.c
-
// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
// If len(p) < n, GoroutineProfile does not change p and returns n, false.
@@ -534,7 +523,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
gp := getg()
semacquire(&worldsema, false)
gp.m.gcing = 1
- onM(stoptheworld)
+ systemstack(stoptheworld)
n = NumGoroutine()
if n <= len(p) {
@@ -542,7 +531,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
r := p
sp := getcallersp(unsafe.Pointer(&p))
pc := getcallerpc(unsafe.Pointer(&p))
- onM(func() {
+ systemstack(func() {
saveg(pc, sp, gp, &r[0])
})
r = r[1:]
@@ -557,7 +546,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
gp.m.gcing = 0
semrelease(&worldsema)
- onM(starttheworld)
+ systemstack(starttheworld)
}
return n, ok
@@ -581,7 +570,7 @@ func Stack(buf []byte, all bool) int {
semacquire(&worldsema, false)
mp.gcing = 1
releasem(mp)
- onM(stoptheworld)
+ systemstack(stoptheworld)
if mp != acquirem() {
gothrow("Stack: rescheduled")
}
@@ -591,7 +580,7 @@ func Stack(buf []byte, all bool) int {
if len(buf) > 0 {
sp := getcallersp(unsafe.Pointer(&buf))
pc := getcallerpc(unsafe.Pointer(&buf))
- onM(func() {
+ systemstack(func() {
g0 := getg()
g0.writebuf = buf[0:0:len(buf)]
goroutineheader(gp)
@@ -607,7 +596,7 @@ func Stack(buf []byte, all bool) int {
if all {
mp.gcing = 0
semrelease(&worldsema)
- onM(starttheworld)
+ systemstack(starttheworld)
}
releasem(mp)
return n
@@ -630,7 +619,7 @@ func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
goroutineheader(gp)
pc := getcallerpc(unsafe.Pointer(&p))
sp := getcallersp(unsafe.Pointer(&p))
- onM(func() {
+ systemstack(func() {
traceback(pc, sp, 0, gp)
})
} else {
@@ -650,7 +639,7 @@ func tracefree(p unsafe.Pointer, size uintptr) {
goroutineheader(gp)
pc := getcallerpc(unsafe.Pointer(&p))
sp := getcallersp(unsafe.Pointer(&p))
- onM(func() {
+ systemstack(func() {
traceback(pc, sp, 0, gp)
})
print("\n")
diff --git a/src/runtime/msize.c b/src/runtime/msize.c
deleted file mode 100644
index 7cb65dad0..000000000
--- a/src/runtime/msize.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Malloc small size classes.
-//
-// See malloc.h for overview.
-//
-// The size classes are chosen so that rounding an allocation
-// request up to the next size class wastes at most 12.5% (1.125x).
-//
-// Each size class has its own page count that gets allocated
-// and chopped up when new objects of the size class are needed.
-// That page count is chosen so that chopping up the run of
-// pages into objects of the given size wastes at most 12.5% (1.125x)
-// of the memory. It is not necessary that the cutoff here be
-// the same as above.
-//
-// The two sources of waste multiply, so the worst possible case
-// for the above constraints would be that allocations of some
-// size might have a 26.6% (1.266x) overhead.
-// In practice, only one of the wastes comes into play for a
-// given size (sizes < 512 waste mainly on the round-up,
-// sizes > 512 waste mainly on the page chopping).
-//
-// TODO(rsc): Compute max waste for any given size.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-#include "textflag.h"
-
-#pragma dataflag NOPTR
-int32 runtime·class_to_size[NumSizeClasses];
-#pragma dataflag NOPTR
-int32 runtime·class_to_allocnpages[NumSizeClasses];
-
-// The SizeToClass lookup is implemented using two arrays,
-// one mapping sizes <= 1024 to their class and one mapping
-// sizes >= 1024 and <= MaxSmallSize to their class.
-// All objects are 8-aligned, so the first array is indexed by
-// the size divided by 8 (rounded up). Objects >= 1024 bytes
-// are 128-aligned, so the second array is indexed by the
-// size divided by 128 (rounded up). The arrays are filled in
-// by InitSizes.
-
-#pragma dataflag NOPTR
-int8 runtime·size_to_class8[1024/8 + 1];
-#pragma dataflag NOPTR
-int8 runtime·size_to_class128[(MaxSmallSize-1024)/128 + 1];
-
-void runtime·testdefersizes(void);
-
-int32
-runtime·SizeToClass(int32 size)
-{
- if(size > MaxSmallSize)
- runtime·throw("SizeToClass - invalid size");
- if(size > 1024-8)
- return runtime·size_to_class128[(size-1024+127) >> 7];
- return runtime·size_to_class8[(size+7)>>3];
-}
-
-void
-runtime·InitSizes(void)
-{
- int32 align, sizeclass, size, nextsize, n;
- uint32 i;
- uintptr allocsize, npages;
-
- // Initialize the runtime·class_to_size table (and choose class sizes in the process).
- runtime·class_to_size[0] = 0;
- sizeclass = 1; // 0 means no class
- align = 8;
- for(size = align; size <= MaxSmallSize; size += align) {
- if((size&(size-1)) == 0) { // bump alignment once in a while
- if(size >= 2048)
- align = 256;
- else if(size >= 128)
- align = size / 8;
- else if(size >= 16)
- align = 16; // required for x86 SSE instructions, if we want to use them
- }
- if((align&(align-1)) != 0)
- runtime·throw("InitSizes - bug");
-
- // Make the allocnpages big enough that
- // the leftover is less than 1/8 of the total,
- // so wasted space is at most 12.5%.
- allocsize = PageSize;
- while(allocsize%size > allocsize/8)
- allocsize += PageSize;
- npages = allocsize >> PageShift;
-
- // If the previous sizeclass chose the same
- // allocation size and fit the same number of
- // objects into the page, we might as well
- // use just this size instead of having two
- // different sizes.
- if(sizeclass > 1 &&
- npages == runtime·class_to_allocnpages[sizeclass-1] &&
- allocsize/size == allocsize/runtime·class_to_size[sizeclass-1]) {
- runtime·class_to_size[sizeclass-1] = size;
- continue;
- }
-
- runtime·class_to_allocnpages[sizeclass] = npages;
- runtime·class_to_size[sizeclass] = size;
- sizeclass++;
- }
- if(sizeclass != NumSizeClasses) {
- runtime·printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses);
- runtime·throw("InitSizes - bad NumSizeClasses");
- }
-
- // Initialize the size_to_class tables.
- nextsize = 0;
- for (sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
- for(; nextsize < 1024 && nextsize <= runtime·class_to_size[sizeclass]; nextsize+=8)
- runtime·size_to_class8[nextsize/8] = sizeclass;
- if(nextsize >= 1024)
- for(; nextsize <= runtime·class_to_size[sizeclass]; nextsize += 128)
- runtime·size_to_class128[(nextsize-1024)/128] = sizeclass;
- }
-
- // Double-check SizeToClass.
- if(0) {
- for(n=0; n < MaxSmallSize; n++) {
- sizeclass = runtime·SizeToClass(n);
- if(sizeclass < 1 || sizeclass >= NumSizeClasses || runtime·class_to_size[sizeclass] < n) {
- runtime·printf("size=%d sizeclass=%d runtime·class_to_size=%d\n", n, sizeclass, runtime·class_to_size[sizeclass]);
- runtime·printf("incorrect SizeToClass");
- goto dump;
- }
- if(sizeclass > 1 && runtime·class_to_size[sizeclass-1] >= n) {
- runtime·printf("size=%d sizeclass=%d runtime·class_to_size=%d\n", n, sizeclass, runtime·class_to_size[sizeclass]);
- runtime·printf("SizeToClass too big");
- goto dump;
- }
- }
- }
-
- runtime·testdefersizes();
-
- // Copy out for statistics table.
- for(i=0; i<nelem(runtime·class_to_size); i++)
- mstats.by_size[i].size = runtime·class_to_size[i];
- return;
-
-dump:
- if(1){
- runtime·printf("NumSizeClasses=%d\n", NumSizeClasses);
- runtime·printf("runtime·class_to_size:");
- for(sizeclass=0; sizeclass<NumSizeClasses; sizeclass++)
- runtime·printf(" %d", runtime·class_to_size[sizeclass]);
- runtime·printf("\n\n");
- runtime·printf("size_to_class8:");
- for(i=0; i<nelem(runtime·size_to_class8); i++)
- runtime·printf(" %d=>%d(%d)\n", i*8, runtime·size_to_class8[i],
- runtime·class_to_size[runtime·size_to_class8[i]]);
- runtime·printf("\n");
- runtime·printf("size_to_class128:");
- for(i=0; i<nelem(runtime·size_to_class128); i++)
- runtime·printf(" %d=>%d(%d)\n", i*128, runtime·size_to_class128[i],
- runtime·class_to_size[runtime·size_to_class128[i]]);
- runtime·printf("\n");
- }
- runtime·throw("InitSizes failed");
-}
-
-// Returns size of the memory block that mallocgc will allocate if you ask for the size.
-uintptr
-runtime·roundupsize(uintptr size)
-{
- if(size < MaxSmallSize) {
- if(size <= 1024-8)
- return runtime·class_to_size[runtime·size_to_class8[(size+7)>>3]];
- else
- return runtime·class_to_size[runtime·size_to_class128[(size-1024+127) >> 7]];
- }
- if(size + PageSize < size)
- return size;
- return ROUND(size, PageSize);
-}
diff --git a/src/runtime/msize.go b/src/runtime/msize.go
new file mode 100644
index 000000000..aa2b43e90
--- /dev/null
+++ b/src/runtime/msize.go
@@ -0,0 +1,174 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Malloc small size classes.
+//
+// See malloc.h for overview.
+//
+// The size classes are chosen so that rounding an allocation
+// request up to the next size class wastes at most 12.5% (1.125x).
+//
+// Each size class has its own page count that gets allocated
+// and chopped up when new objects of the size class are needed.
+// That page count is chosen so that chopping up the run of
+// pages into objects of the given size wastes at most 12.5% (1.125x)
+// of the memory. It is not necessary that the cutoff here be
+// the same as above.
+//
+// The two sources of waste multiply, so the worst possible case
+// for the above constraints would be that allocations of some
+// size might have a 26.6% (1.266x) overhead.
+// In practice, only one of the wastes comes into play for a
+// given size (sizes < 512 waste mainly on the round-up,
+// sizes > 512 waste mainly on the page chopping).
+//
+// TODO(rsc): Compute max waste for any given size.
+
+package runtime
+
+//var class_to_size [_NumSizeClasses]int32
+//var class_to_allocnpages [_NumSizeClasses]int32
+
+// The SizeToClass lookup is implemented using two arrays,
+// one mapping sizes <= 1024 to their class and one mapping
+// sizes >= 1024 and <= MaxSmallSize to their class.
+// All objects are 8-aligned, so the first array is indexed by
+// the size divided by 8 (rounded up). Objects >= 1024 bytes
+// are 128-aligned, so the second array is indexed by the
+// size divided by 128 (rounded up). The arrays are filled in
+// by InitSizes.
+//var size_to_class8 [1024/8 + 1]int8
+//var size_to_class128 [(_MaxSmallSize-1024)/128 + 1]int8
+
+func sizeToClass(size int32) int32 {
+ if size > _MaxSmallSize {
+ gothrow("SizeToClass - invalid size")
+ }
+ if size > 1024-8 {
+ return int32(size_to_class128[(size-1024+127)>>7])
+ }
+ return int32(size_to_class8[(size+7)>>3])
+}
+
+func initSizes() {
+ // Initialize the runtime·class_to_size table (and choose class sizes in the process).
+ class_to_size[0] = 0
+ sizeclass := 1 // 0 means no class
+ align := 8
+ for size := align; size <= _MaxSmallSize; size += align {
+ if size&(size-1) == 0 { // bump alignment once in a while
+ if size >= 2048 {
+ align = 256
+ } else if size >= 128 {
+ align = size / 8
+ } else if size >= 16 {
+ align = 16 // required for x86 SSE instructions, if we want to use them
+ }
+ }
+ if align&(align-1) != 0 {
+ gothrow("InitSizes - bug")
+ }
+
+ // Make the allocnpages big enough that
+ // the leftover is less than 1/8 of the total,
+ // so wasted space is at most 12.5%.
+ allocsize := _PageSize
+ for allocsize%size > allocsize/8 {
+ allocsize += _PageSize
+ }
+ npages := allocsize >> _PageShift
+
+ // If the previous sizeclass chose the same
+ // allocation size and fit the same number of
+ // objects into the page, we might as well
+ // use just this size instead of having two
+ // different sizes.
+ if sizeclass > 1 && npages == int(class_to_allocnpages[sizeclass-1]) && allocsize/size == allocsize/int(class_to_size[sizeclass-1]) {
+ class_to_size[sizeclass-1] = int32(size)
+ continue
+ }
+
+ class_to_allocnpages[sizeclass] = int32(npages)
+ class_to_size[sizeclass] = int32(size)
+ sizeclass++
+ }
+ if sizeclass != _NumSizeClasses {
+ print("sizeclass=", sizeclass, " NumSizeClasses=", _NumSizeClasses, "\n")
+ gothrow("InitSizes - bad NumSizeClasses")
+ }
+
+ // Initialize the size_to_class tables.
+ nextsize := 0
+ for sizeclass = 1; sizeclass < _NumSizeClasses; sizeclass++ {
+ for ; nextsize < 1024 && nextsize <= int(class_to_size[sizeclass]); nextsize += 8 {
+ size_to_class8[nextsize/8] = int8(sizeclass)
+ }
+ if nextsize >= 1024 {
+ for ; nextsize <= int(class_to_size[sizeclass]); nextsize += 128 {
+ size_to_class128[(nextsize-1024)/128] = int8(sizeclass)
+ }
+ }
+ }
+
+ // Double-check SizeToClass.
+ if false {
+ for n := int32(0); n < _MaxSmallSize; n++ {
+ sizeclass := sizeToClass(n)
+ if sizeclass < 1 || sizeclass >= _NumSizeClasses || class_to_size[sizeclass] < n {
+ print("size=", n, " sizeclass=", sizeclass, " runtime·class_to_size=", class_to_size[sizeclass], "\n")
+ print("incorrect SizeToClass\n")
+ goto dump
+ }
+ if sizeclass > 1 && class_to_size[sizeclass-1] >= n {
+ print("size=", n, " sizeclass=", sizeclass, " runtime·class_to_size=", class_to_size[sizeclass], "\n")
+ print("SizeToClass too big\n")
+ goto dump
+ }
+ }
+ }
+
+ testdefersizes()
+
+ // Copy out for statistics table.
+ for i := 0; i < len(class_to_size); i++ {
+ memstats.by_size[i].size = uint32(class_to_size[i])
+ }
+ return
+
+dump:
+ if true {
+ print("NumSizeClasses=", _NumSizeClasses, "\n")
+ print("runtime·class_to_size:")
+ for sizeclass = 0; sizeclass < _NumSizeClasses; sizeclass++ {
+ print(" ", class_to_size[sizeclass], "")
+ }
+ print("\n\n")
+ print("size_to_class8:")
+ for i := 0; i < len(size_to_class8); i++ {
+ print(" ", i*8, "=>", size_to_class8[i], "(", class_to_size[size_to_class8[i]], ")\n")
+ }
+ print("\n")
+ print("size_to_class128:")
+ for i := 0; i < len(size_to_class128); i++ {
+ print(" ", i*128, "=>", size_to_class128[i], "(", class_to_size[size_to_class128[i]], ")\n")
+ }
+ print("\n")
+ }
+ gothrow("InitSizes failed")
+}
+
+// Returns size of the memory block that mallocgc will allocate if you ask for the size.
+func roundupsize(size uintptr) uintptr {
+ if size < _MaxSmallSize {
+ if size <= 1024-8 {
+ return uintptr(class_to_size[size_to_class8[(size+7)>>3]])
+ } else {
+ return uintptr(class_to_size[size_to_class128[(size-1024+127)>>7]])
+ }
+ }
+ if size+_PageSize < size {
+ return size
+ }
+ return round(size, _PageSize)
+}
diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go
index 3456e0208..7a99f18ad 100644
--- a/src/runtime/netpoll.go
+++ b/src/runtime/netpoll.go
@@ -49,14 +49,14 @@ type pollDesc struct {
lock mutex // protectes the following fields
fd uintptr
closing bool
- seq uintptr // protects from stale timers and ready notifications
- rg uintptr // pdReady, pdWait, G waiting for read or nil
- rt timer // read deadline timer (set if rt.f != nil)
- rd int64 // read deadline
- wg uintptr // pdReady, pdWait, G waiting for write or nil
- wt timer // write deadline timer
- wd int64 // write deadline
- user unsafe.Pointer // user settable cookie
+ seq uintptr // protects from stale timers and ready notifications
+ rg uintptr // pdReady, pdWait, G waiting for read or nil
+ rt timer // read deadline timer (set if rt.f != nil)
+ rd int64 // read deadline
+ wg uintptr // pdReady, pdWait, G waiting for write or nil
+ wt timer // write deadline timer
+ wd int64 // write deadline
+ user uint32 // user settable cookie
}
type pollCache struct {
@@ -72,7 +72,7 @@ type pollCache struct {
var pollcache pollCache
func netpollServerInit() {
- onM(netpollinit)
+ netpollinit()
}
func netpollOpen(fd uintptr) (*pollDesc, int) {
@@ -94,9 +94,7 @@ func netpollOpen(fd uintptr) (*pollDesc, int) {
unlock(&pd.lock)
var errno int32
- onM(func() {
- errno = netpollopen(fd, pd)
- })
+ errno = netpollopen(fd, pd)
return pd, int(errno)
}
@@ -110,9 +108,7 @@ func netpollClose(pd *pollDesc) {
if pd.rg != 0 && pd.rg != pdReady {
gothrow("netpollClose: blocked read on closing descriptor")
}
- onM(func() {
- netpollclose(uintptr(pd.fd))
- })
+ netpollclose(uintptr(pd.fd))
pollcache.free(pd)
}
@@ -143,9 +139,7 @@ func netpollWait(pd *pollDesc, mode int) int {
}
// As for now only Solaris uses level-triggered IO.
if GOOS == "solaris" {
- onM(func() {
- netpollarm(pd, mode)
- })
+ netpollarm(pd, mode)
}
for !netpollblock(pd, int32(mode), false) {
err = netpollcheckerr(pd, int32(mode))
@@ -263,26 +257,6 @@ func netpollUnblock(pd *pollDesc) {
}
}
-func netpollfd(pd *pollDesc) uintptr {
- return pd.fd
-}
-
-func netpolluser(pd *pollDesc) *unsafe.Pointer {
- return &pd.user
-}
-
-func netpollclosing(pd *pollDesc) bool {
- return pd.closing
-}
-
-func netpolllock(pd *pollDesc) {
- lock(&pd.lock)
-}
-
-func netpollunlock(pd *pollDesc) {
- unlock(&pd.lock)
-}
-
// make pd ready, newly runnable goroutines (if any) are returned in rg/wg
func netpollready(gpp **g, pd *pollDesc, mode int32) {
var rg, wg *g
@@ -343,8 +317,7 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
// do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
if waitio || netpollcheckerr(pd, mode) == 0 {
- f := netpollblockcommit
- gopark(**(**unsafe.Pointer)(unsafe.Pointer(&f)), unsafe.Pointer(gpp), "IO wait")
+ gopark(netpollblockcommit, unsafe.Pointer(gpp), "IO wait")
}
// be careful to not lose concurrent READY notification
old := xchguintptr(gpp, 0)
diff --git a/src/runtime/netpoll_solaris.c b/src/runtime/netpoll_solaris.go
index d422719cf..40e8a1a65 100644
--- a/src/runtime/netpoll_solaris.c
+++ b/src/runtime/netpoll_solaris.go
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
+package runtime
+
+import "unsafe"
// Solaris runtime-integrated network poller.
-//
+//
// Solaris uses event ports for scalable network I/O. Event
// ports are level-triggered, unlike epoll and kqueue which
// can be configured in both level-triggered and edge-triggered
@@ -18,7 +17,7 @@
// events for that descriptor. When doing this we must keep track of
// what kind of events the goroutines are currently interested in,
// for example a fd may be open both for reading and writing.
-//
+//
// A description of the high level operation of this code
// follows. Networking code will get a file descriptor by some means
// and will register it with the netpolling mechanism by a code path
@@ -29,7 +28,7 @@
// readiness notification at some point in the future. If I/O becomes
// ready when nobody is listening, when we finally care about it,
// nobody will tell us anymore.
-//
+//
// Beside calling runtime·netpollopen, the networking code paths
// will call runtime·netpollarm each time goroutines are interested
// in doing network I/O. Because now we know what kind of I/O we
@@ -39,7 +38,7 @@
// when we now call port_associate, we will unblock the main poller
// loop (in runtime·netpoll) right away if the socket is actually
// ready for I/O.
-//
+//
// The main poller loop runs in its own thread waiting for events
// using port_getn. When an event happens, it will tell the scheduler
// about it using runtime·netpollready. Besides doing this, it must
@@ -47,7 +46,7 @@
// notification with the file descriptor. Failing to do this would
// mean each notification will prevent concurrent code using the
// same file descriptor in parallel.
-//
+//
// The logic dealing with re-associations is encapsulated in
// runtime·netpollupdate. This function takes care to associate the
// descriptor only with the subset of events that were previously
@@ -56,7 +55,7 @@
// are level triggered so it would cause a busy loop. Instead, that
// association is effected only by the runtime·netpollarm code path,
// when Go code actually asks for I/O.
-//
+//
// The open and arming mechanisms are serialized using the lock
// inside PollDesc. This is required because the netpoll loop runs
// asynchonously in respect to other Go code and by the time we get
@@ -68,179 +67,157 @@
// again we know for sure we are always talking about the same file
// descriptor and can safely access the data we want (the event set).
-#pragma dynimport libc·fcntl fcntl "libc.so"
-#pragma dynimport libc·port_create port_create "libc.so"
-#pragma dynimport libc·port_associate port_associate "libc.so"
-#pragma dynimport libc·port_dissociate port_dissociate "libc.so"
-#pragma dynimport libc·port_getn port_getn "libc.so"
-extern uintptr libc·fcntl;
-extern uintptr libc·port_create;
-extern uintptr libc·port_associate;
-extern uintptr libc·port_dissociate;
-extern uintptr libc·port_getn;
-
-#define errno (*g->m->perrno)
+//go:cgo_import_dynamic libc_port_create port_create "libc.so"
+//go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
+//go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
+//go:cgo_import_dynamic libc_port_getn port_getn "libc.so"
+
+//go:linkname libc_port_create libc_port_create
+//go:linkname libc_port_associate libc_port_associate
+//go:linkname libc_port_dissociate libc_port_dissociate
+//go:linkname libc_port_getn libc_port_getn
+
+var (
+ libc_port_create,
+ libc_port_associate,
+ libc_port_dissociate,
+ libc_port_getn libcFunc
+)
+
+func errno() int32 {
+ return *getg().m.perrno
+}
-int32
-runtime·fcntl(int32 fd, int32 cmd, uintptr arg)
-{
- return runtime·sysvicall3(libc·fcntl, (uintptr)fd, (uintptr)cmd, (uintptr)arg);
+func fcntl(fd, cmd int32, arg uintptr) int32 {
+ return int32(sysvicall3(libc_fcntl, uintptr(fd), uintptr(cmd), arg))
}
-int32
-runtime·port_create(void)
-{
- return runtime·sysvicall0(libc·port_create);
+func port_create() int32 {
+ return int32(sysvicall0(libc_port_create))
}
-int32
-runtime·port_associate(int32 port, int32 source, uintptr object, int32 events, uintptr user)
-{
- return runtime·sysvicall5(libc·port_associate, (uintptr)port, (uintptr)source, object, (uintptr)events, user);
+func port_associate(port, source int32, object uintptr, events uint32, user uintptr) int32 {
+ return int32(sysvicall5(libc_port_associate, uintptr(port), uintptr(source), object, uintptr(events), user))
}
-int32
-runtime·port_dissociate(int32 port, int32 source, uintptr object)
-{
- return runtime·sysvicall3(libc·port_dissociate, (uintptr)port, (uintptr)source, object);
+func port_dissociate(port, source int32, object uintptr) int32 {
+ return int32(sysvicall3(libc_port_dissociate, uintptr(port), uintptr(source), object))
}
-int32
-runtime·port_getn(int32 port, PortEvent *evs, uint32 max, uint32 *nget, Timespec *timeout)
-{
- return runtime·sysvicall5(libc·port_getn, (uintptr)port, (uintptr)evs, (uintptr)max, (uintptr)nget, (uintptr)timeout);
+func port_getn(port int32, evs *portevent, max uint32, nget *uint32, timeout *timespec) int32 {
+ return int32(sysvicall5(libc_port_getn, uintptr(port), uintptr(unsafe.Pointer(evs)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout))))
}
-static int32 portfd = -1;
+var portfd int32 = -1
-void
-runtime·netpollinit(void)
-{
- if((portfd = runtime·port_create()) >= 0) {
- runtime·fcntl(portfd, F_SETFD, FD_CLOEXEC);
- return;
+func netpollinit() {
+ portfd = port_create()
+ if portfd >= 0 {
+ fcntl(portfd, _F_SETFD, _FD_CLOEXEC)
+ return
}
- runtime·printf("netpollinit: failed to create port (%d)\n", errno);
- runtime·throw("netpollinit: failed to create port");
+ print("netpollinit: failed to create port (", errno(), ")\n")
+ gothrow("netpollinit: failed to create port")
}
-int32
-runtime·netpollopen(uintptr fd, PollDesc *pd)
-{
- int32 r;
-
- runtime·netpolllock(pd);
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+ lock(&pd.lock)
// We don't register for any specific type of events yet, that's
// netpollarm's job. We merely ensure we call port_associate before
// asynchonous connect/accept completes, so when we actually want
// to do any I/O, the call to port_associate (from netpollarm,
// with the interested event set) will unblock port_getn right away
// because of the I/O readiness notification.
- *runtime·netpolluser(pd) = 0;
- r = runtime·port_associate(portfd, PORT_SOURCE_FD, fd, 0, (uintptr)pd);
- runtime·netpollunlock(pd);
- return r;
+ pd.user = 0
+ r := port_associate(portfd, _PORT_SOURCE_FD, fd, 0, uintptr(unsafe.Pointer(pd)))
+ unlock(&pd.lock)
+ return r
}
-int32
-runtime·netpollclose(uintptr fd)
-{
- return runtime·port_dissociate(portfd, PORT_SOURCE_FD, fd);
+func netpollclose(fd uintptr) int32 {
+ return port_dissociate(portfd, _PORT_SOURCE_FD, fd)
}
// Updates the association with a new set of interested events. After
// this call, port_getn will return one and only one event for that
// particular descriptor, so this function needs to be called again.
-void
-runtime·netpollupdate(PollDesc* pd, uint32 set, uint32 clear)
-{
- uint32 *ep, old, events;
- uintptr fd = runtime·netpollfd(pd);
- ep = (uint32*)runtime·netpolluser(pd);
-
- if(runtime·netpollclosing(pd))
- return;
+func netpollupdate(pd *pollDesc, set, clear uint32) {
+ if pd.closing {
+ return
+ }
- old = *ep;
- events = (old & ~clear) | set;
- if(old == events)
- return;
+ old := pd.user
+ events := (old & ^clear) | set
+ if old == events {
+ return
+ }
- if(events && runtime·port_associate(portfd, PORT_SOURCE_FD, fd, events, (uintptr)pd) != 0) {
- runtime·printf("netpollupdate: failed to associate (%d)\n", errno);
- runtime·throw("netpollupdate: failed to associate");
- }
- *ep = events;
+ if events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {
+ print("netpollupdate: failed to associate (", errno(), ")\n")
+ gothrow("netpollupdate: failed to associate")
+ }
+ pd.user = events
}
// subscribe the fd to the port such that port_getn will return one event.
-void
-runtime·netpollarm(PollDesc* pd, int32 mode)
-{
- runtime·netpolllock(pd);
- switch(mode) {
+func netpollarm(pd *pollDesc, mode int) {
+ lock(&pd.lock)
+ switch mode {
case 'r':
- runtime·netpollupdate(pd, POLLIN, 0);
- break;
+ netpollupdate(pd, _POLLIN, 0)
case 'w':
- runtime·netpollupdate(pd, POLLOUT, 0);
- break;
+ netpollupdate(pd, _POLLOUT, 0)
default:
- runtime·throw("netpollarm: bad mode");
+ gothrow("netpollarm: bad mode")
}
- runtime·netpollunlock(pd);
+ unlock(&pd.lock)
}
+// netpolllasterr holds the last error code returned by port_getn to prevent log spamming
+var netpolllasterr int32
+
// polls for ready network connections
// returns list of goroutines that become runnable
-G*
-runtime·netpoll(bool block)
-{
- static int32 lasterr;
- PortEvent events[128], *ev;
- PollDesc *pd;
- int32 i, mode, clear;
- uint32 n;
- Timespec *wait = nil, zero;
- G *gp;
-
- if(portfd == -1)
- return (nil);
+func netpoll(block bool) (gp *g) {
+ if portfd == -1 {
+ return
+ }
- if(!block) {
- zero.tv_sec = 0;
- zero.tv_nsec = 0;
- wait = &zero;
+ var wait *timespec
+ var zero timespec
+ if !block {
+ wait = &zero
}
+ var events [128]portevent
retry:
- n = 1;
- if(runtime·port_getn(portfd, events, nelem(events), &n, wait) < 0) {
- if(errno != EINTR && errno != lasterr) {
- lasterr = errno;
- runtime·printf("runtime: port_getn on fd %d failed with %d\n", portfd, errno);
+ var n uint32 = 1
+ if port_getn(portfd, &events[0], uint32(len(events)), &n, wait) < 0 {
+ if e := errno(); e != _EINTR && e != netpolllasterr {
+ netpolllasterr = e
+ print("runtime: port_getn on fd ", portfd, " failed with ", e, "\n")
}
- goto retry;
+ goto retry
}
- gp = nil;
- for(i = 0; i < n; i++) {
- ev = &events[i];
+ gp = nil
+ for i := 0; i < int(n); i++ {
+ ev := &events[i]
- if(ev->portev_events == 0)
- continue;
- pd = (PollDesc *)ev->portev_user;
+ if ev.portev_events == 0 {
+ continue
+ }
+ pd := (*pollDesc)(unsafe.Pointer(ev.portev_user))
- mode = 0;
- clear = 0;
- if(ev->portev_events & (POLLIN|POLLHUP|POLLERR)) {
- mode += 'r';
- clear |= POLLIN;
+ var mode, clear int32
+ if (ev.portev_events & (_POLLIN | _POLLHUP | _POLLERR)) != 0 {
+ mode += 'r'
+ clear |= _POLLIN
}
- if(ev->portev_events & (POLLOUT|POLLHUP|POLLERR)) {
- mode += 'w';
- clear |= POLLOUT;
+ if (ev.portev_events & (_POLLOUT | _POLLHUP | _POLLERR)) != 0 {
+ mode += 'w'
+ clear |= _POLLOUT
}
// To effect edge-triggered events, we need to be sure to
// update our association with whatever events were not
@@ -248,17 +225,19 @@ retry:
// for POLLIN|POLLOUT, and we get POLLIN, besides waking
// the goroutine interested in POLLIN we have to not forget
// about the one interested in POLLOUT.
- if(clear != 0) {
- runtime·netpolllock(pd);
- runtime·netpollupdate(pd, 0, clear);
- runtime·netpollunlock(pd);
+ if clear != 0 {
+ lock(&pd.lock)
+ netpollupdate(pd, 0, uint32(clear))
+ unlock(&pd.lock)
}
- if(mode)
- runtime·netpollready(&gp, pd, mode);
+ if mode != 0 {
+ netpollready((**g)(noescape(unsafe.Pointer(&gp))), pd, mode)
+ }
}
- if(block && gp == nil)
- goto retry;
- return gp;
+ if block && gp == nil {
+ goto retry
+ }
+ return gp
}
diff --git a/src/runtime/netpoll_windows.c b/src/runtime/netpoll_windows.c
deleted file mode 100644
index 64da41ad9..000000000
--- a/src/runtime/netpoll_windows.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-
-#define DWORD_MAX 0xffffffff
-
-#pragma dynimport runtime·CreateIoCompletionPort CreateIoCompletionPort "kernel32.dll"
-#pragma dynimport runtime·GetQueuedCompletionStatus GetQueuedCompletionStatus "kernel32.dll"
-#pragma dynimport runtime·WSAGetOverlappedResult WSAGetOverlappedResult "ws2_32.dll"
-
-extern void *runtime·CreateIoCompletionPort;
-extern void *runtime·GetQueuedCompletionStatus;
-extern void *runtime·WSAGetOverlappedResult;
-
-#define INVALID_HANDLE_VALUE ((uintptr)-1)
-
-// net_op must be the same as beginning of net.operation. Keep these in sync.
-typedef struct net_op net_op;
-struct net_op
-{
- // used by windows
- Overlapped o;
- // used by netpoll
- PollDesc* pd;
- int32 mode;
- int32 errno;
- uint32 qty;
-};
-
-typedef struct OverlappedEntry OverlappedEntry;
-struct OverlappedEntry
-{
- uintptr key;
- net_op* op; // In reality it's Overlapped*, but we cast it to net_op* anyway.
- uintptr internal;
- uint32 qty;
-};
-
-static void handlecompletion(G **gpp, net_op *o, int32 errno, uint32 qty);
-
-static uintptr iocphandle = INVALID_HANDLE_VALUE; // completion port io handle
-
-void
-runtime·netpollinit(void)
-{
- iocphandle = (uintptr)runtime·stdcall4(runtime·CreateIoCompletionPort, INVALID_HANDLE_VALUE, 0, 0, DWORD_MAX);
- if(iocphandle == 0) {
- runtime·printf("netpoll: failed to create iocp handle (errno=%d)\n", runtime·getlasterror());
- runtime·throw("netpoll: failed to create iocp handle");
- }
- return;
-}
-
-int32
-runtime·netpollopen(uintptr fd, PollDesc *pd)
-{
- USED(pd);
- if(runtime·stdcall4(runtime·CreateIoCompletionPort, fd, iocphandle, 0, 0) == 0)
- return -runtime·getlasterror();
- return 0;
-}
-
-int32
-runtime·netpollclose(uintptr fd)
-{
- // nothing to do
- USED(fd);
- return 0;
-}
-
-void
-runtime·netpollarm(PollDesc* pd, int32 mode)
-{
- USED(pd, mode);
- runtime·throw("unused");
-}
-
-// Polls for completed network IO.
-// Returns list of goroutines that become runnable.
-G*
-runtime·netpoll(bool block)
-{
- OverlappedEntry entries[64];
- uint32 wait, qty, key, flags, n, i;
- int32 errno;
- net_op *op;
- G *gp;
-
- if(iocphandle == INVALID_HANDLE_VALUE)
- return nil;
- gp = nil;
- wait = 0;
- if(block)
- wait = INFINITE;
-retry:
- if(runtime·GetQueuedCompletionStatusEx != nil) {
- n = nelem(entries) / runtime·gomaxprocs;
- if(n < 8)
- n = 8;
- if(block)
- g->m->blocked = true;
- if(runtime·stdcall6(runtime·GetQueuedCompletionStatusEx, iocphandle, (uintptr)entries, n, (uintptr)&n, wait, 0) == 0) {
- g->m->blocked = false;
- errno = runtime·getlasterror();
- if(!block && errno == WAIT_TIMEOUT)
- return nil;
- runtime·printf("netpoll: GetQueuedCompletionStatusEx failed (errno=%d)\n", errno);
- runtime·throw("netpoll: GetQueuedCompletionStatusEx failed");
- }
- g->m->blocked = false;
- for(i = 0; i < n; i++) {
- op = entries[i].op;
- errno = 0;
- qty = 0;
- if(runtime·stdcall5(runtime·WSAGetOverlappedResult, runtime·netpollfd(op->pd), (uintptr)op, (uintptr)&qty, 0, (uintptr)&flags) == 0)
- errno = runtime·getlasterror();
- handlecompletion(&gp, op, errno, qty);
- }
- } else {
- op = nil;
- errno = 0;
- qty = 0;
- if(block)
- g->m->blocked = true;
- if(runtime·stdcall5(runtime·GetQueuedCompletionStatus, iocphandle, (uintptr)&qty, (uintptr)&key, (uintptr)&op, wait) == 0) {
- g->m->blocked = false;
- errno = runtime·getlasterror();
- if(!block && errno == WAIT_TIMEOUT)
- return nil;
- if(op == nil) {
- runtime·printf("netpoll: GetQueuedCompletionStatus failed (errno=%d)\n", errno);
- runtime·throw("netpoll: GetQueuedCompletionStatus failed");
- }
- // dequeued failed IO packet, so report that
- }
- g->m->blocked = false;
- handlecompletion(&gp, op, errno, qty);
- }
- if(block && gp == nil)
- goto retry;
- return gp;
-}
-
-static void
-handlecompletion(G **gpp, net_op *op, int32 errno, uint32 qty)
-{
- int32 mode;
-
- if(op == nil)
- runtime·throw("netpoll: GetQueuedCompletionStatus returned op == nil");
- mode = op->mode;
- if(mode != 'r' && mode != 'w') {
- runtime·printf("netpoll: GetQueuedCompletionStatus returned invalid mode=%d\n", mode);
- runtime·throw("netpoll: GetQueuedCompletionStatus returned invalid mode");
- }
- op->errno = errno;
- op->qty = qty;
- runtime·netpollready(gpp, op->pd, mode);
-}
diff --git a/src/runtime/netpoll_windows.go b/src/runtime/netpoll_windows.go
new file mode 100644
index 000000000..88e878137
--- /dev/null
+++ b/src/runtime/netpoll_windows.go
@@ -0,0 +1,156 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+const _DWORD_MAX = 0xffffffff
+
+//go:cgo_import_dynamic runtime._CreateIoCompletionPort CreateIoCompletionPort "kernel32.dll"
+//go:cgo_import_dynamic runtime._GetQueuedCompletionStatus GetQueuedCompletionStatus "kernel32.dll"
+//go:cgo_import_dynamic runtime._WSAGetOverlappedResult WSAGetOverlappedResult "ws2_32.dll"
+
+var (
+ _CreateIoCompletionPort,
+ _GetQueuedCompletionStatus,
+ _WSAGetOverlappedResult stdFunction
+)
+
+const _INVALID_HANDLE_VALUE = ^uintptr(0)
+
+// net_op must be the same as beginning of net.operation. Keep these in sync.
+type net_op struct {
+ // used by windows
+ o overlapped
+ // used by netpoll
+ pd *pollDesc
+ mode int32
+ errno int32
+ qty uint32
+}
+
+type overlappedEntry struct {
+ key uintptr
+ op *net_op // In reality it's *overlapped, but we cast it to *net_op anyway.
+ internal uintptr
+ qty uint32
+}
+
+var iocphandle uintptr = _INVALID_HANDLE_VALUE // completion port io handle
+
+func netpollinit() {
+ iocphandle = uintptr(stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX))
+ if iocphandle == 0 {
+ println("netpoll: failed to create iocp handle (errno=", getlasterror(), ")")
+ gothrow("netpoll: failed to create iocp handle")
+ }
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+ if stdcall4(_CreateIoCompletionPort, fd, iocphandle, 0, 0) == 0 {
+ return -int32(getlasterror())
+ }
+ return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+ // nothing to do
+ return 0
+}
+
+func netpollarm(pd *pollDesc, mode int) {
+ gothrow("unused")
+}
+
+// Polls for completed network IO.
+// Returns list of goroutines that become runnable.
+func netpoll(block bool) *g {
+ var entries [64]overlappedEntry
+ var wait, qty, key, flags, n, i uint32
+ var errno int32
+ var op *net_op
+ var gp *g
+
+ mp := getg().m
+
+ if iocphandle == _INVALID_HANDLE_VALUE {
+ return nil
+ }
+ gp = nil
+ wait = 0
+ if block {
+ wait = _INFINITE
+ }
+retry:
+ if _GetQueuedCompletionStatusEx != nil {
+ n = uint32(len(entries) / int(gomaxprocs))
+ if n < 8 {
+ n = 8
+ }
+ if block {
+ mp.blocked = true
+ }
+ if stdcall6(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 {
+ mp.blocked = false
+ errno = int32(getlasterror())
+ if !block && errno == _WAIT_TIMEOUT {
+ return nil
+ }
+ println("netpoll: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
+ gothrow("netpoll: GetQueuedCompletionStatusEx failed")
+ }
+ mp.blocked = false
+ for i = 0; i < n; i++ {
+ op = entries[i].op
+ errno = 0
+ qty = 0
+ if stdcall5(_WSAGetOverlappedResult, netpollfd(op.pd), uintptr(unsafe.Pointer(op)), uintptr(unsafe.Pointer(&qty)), 0, uintptr(unsafe.Pointer(&flags))) == 0 {
+ errno = int32(getlasterror())
+ }
+ handlecompletion(&gp, op, errno, qty)
+ }
+ } else {
+ op = nil
+ errno = 0
+ qty = 0
+ if block {
+ mp.blocked = true
+ }
+ if stdcall5(_GetQueuedCompletionStatus, iocphandle, uintptr(unsafe.Pointer(&qty)), uintptr(unsafe.Pointer(&key)), uintptr(unsafe.Pointer(&op)), uintptr(wait)) == 0 {
+ mp.blocked = false
+ errno = int32(getlasterror())
+ if !block && errno == _WAIT_TIMEOUT {
+ return nil
+ }
+ if op == nil {
+ println("netpoll: GetQueuedCompletionStatus failed (errno=", errno, ")")
+ gothrow("netpoll: GetQueuedCompletionStatus failed")
+ }
+ // dequeued failed IO packet, so report that
+ }
+ mp.blocked = false
+ handlecompletion(&gp, op, errno, qty)
+ }
+ if block && gp == nil {
+ goto retry
+ }
+ return gp
+}
+
+func handlecompletion(gpp **g, op *net_op, errno int32, qty uint32) {
+ if op == nil {
+ gothrow("netpoll: GetQueuedCompletionStatus returned op == nil")
+ }
+ mode := op.mode
+ if mode != 'r' && mode != 'w' {
+ println("netpoll: GetQueuedCompletionStatus returned invalid mode=", mode)
+ gothrow("netpoll: GetQueuedCompletionStatus returned invalid mode")
+ }
+ op.errno = errno
+ op.qty = qty
+ netpollready(gpp, op.pd, mode)
+}
diff --git a/src/runtime/norace_test.go b/src/runtime/norace_test.go
index 3b171877a..3681bf190 100644
--- a/src/runtime/norace_test.go
+++ b/src/runtime/norace_test.go
@@ -34,12 +34,12 @@ func benchmarkSyscall(b *testing.B, work, excess int) {
b.RunParallel(func(pb *testing.PB) {
foo := 42
for pb.Next() {
- runtime.Entersyscall()
+ runtime.Entersyscall(0)
for i := 0; i < work; i++ {
foo *= 2
foo /= 2
}
- runtime.Exitsyscall()
+ runtime.Exitsyscall(0)
}
_ = foo
})
diff --git a/src/runtime/os1_darwin.go b/src/runtime/os1_darwin.go
new file mode 100644
index 000000000..2fbf2cae0
--- /dev/null
+++ b/src/runtime/os1_darwin.go
@@ -0,0 +1,423 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+//extern SigTabTT runtime·sigtab[];
+
+var sigset_none = uint32(0)
+var sigset_all = ^uint32(0)
+
+func unimplemented(name string) {
+ println(name, "not implemented")
+ *(*int)(unsafe.Pointer(uintptr(1231))) = 1231
+}
+
+//go:nosplit
+func semawakeup(mp *m) {
+ mach_semrelease(uint32(mp.waitsema))
+}
+
+//go:nosplit
+func semacreate() uintptr {
+ var x uintptr
+ systemstack(func() {
+ x = uintptr(mach_semcreate())
+ })
+ return x
+}
+
+// BSD interface for threading.
+func osinit() {
+ // bsdthread_register delayed until end of goenvs so that we
+ // can look at the environment first.
+
+ // Use sysctl to fetch hw.ncpu.
+ mib := [2]uint32{6, 3}
+ out := uint32(0)
+ nout := unsafe.Sizeof(out)
+ ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
+ if ret >= 0 {
+ ncpu = int32(out)
+ }
+}
+
+var urandom_data [_HashRandomBytes]byte
+var urandom_dev = []byte("/dev/random\x00")
+
+//go:nosplit
+func get_random_data(rnd *unsafe.Pointer, rnd_len *int32) {
+ fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
+ if read(fd, unsafe.Pointer(&urandom_data), _HashRandomBytes) == _HashRandomBytes {
+ *rnd = unsafe.Pointer(&urandom_data[0])
+ *rnd_len = _HashRandomBytes
+ } else {
+ *rnd = nil
+ *rnd_len = 0
+ }
+ close(fd)
+}
+
+func goenvs() {
+ goenvs_unix()
+
+ // Register our thread-creation callback (see sys_darwin_{amd64,386}.s)
+ // but only if we're not using cgo. If we are using cgo we need
+ // to let the C pthread library install its own thread-creation callback.
+ if !iscgo {
+ if bsdthread_register() != 0 {
+ if gogetenv("DYLD_INSERT_LIBRARIES") != "" {
+ gothrow("runtime: bsdthread_register error (unset DYLD_INSERT_LIBRARIES)")
+ }
+ gothrow("runtime: bsdthread_register error")
+ }
+ }
+}
+
+func newosproc(mp *m, stk unsafe.Pointer) {
+ mp.tls[0] = uintptr(mp.id) // so 386 asm can find it
+ if false {
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, "/", int(mp.tls[0]), " ostk=", &mp, "\n")
+ }
+
+ var oset uint32
+ sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
+ errno := bsdthread_create(stk, mp, mp.g0, funcPC(mstart))
+ sigprocmask(_SIG_SETMASK, &oset, nil)
+
+ if errno < 0 {
+ print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -errno, ")\n")
+ gothrow("runtime.newosproc")
+ }
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
+func mpreinit(mp *m) {
+ mp.gsignal = malg(32 * 1024) // OS X wants >= 8K
+ mp.gsignal.m = mp
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the new thread, can not allocate memory.
+func minit() {
+ // Initialize signal handling.
+ _g_ := getg()
+ signalstack((*byte)(unsafe.Pointer(_g_.m.gsignal.stack.lo)), 32*1024)
+ sigprocmask(_SIG_SETMASK, &sigset_none, nil)
+}
+
+// Called from dropm to undo the effect of an minit.
+func unminit() {
+ signalstack(nil, 0)
+}
+
+// Mach IPC, to get at semaphores
+// Definitions are in /usr/include/mach on a Mac.
+
+func macherror(r int32, fn string) {
+ print("mach error ", fn, ": ", r, "\n")
+ gothrow("mach error")
+}
+
+const _DebugMach = false
+
+var zerondr machndr
+
+func mach_msgh_bits(a, b uint32) uint32 {
+ return a | b<<8
+}
+
+func mach_msg(h *machheader, op int32, send_size, rcv_size, rcv_name, timeout, notify uint32) int32 {
+ // TODO: Loop on interrupt.
+ return mach_msg_trap(unsafe.Pointer(h), op, send_size, rcv_size, rcv_name, timeout, notify)
+}
+
+// Mach RPC (MIG)
+const (
+ _MinMachMsg = 48
+ _MachReply = 100
+)
+
+type codemsg struct {
+ h machheader
+ ndr machndr
+ code int32
+}
+
+func machcall(h *machheader, maxsize int32, rxsize int32) int32 {
+ _g_ := getg()
+ port := _g_.m.machport
+ if port == 0 {
+ port = mach_reply_port()
+ _g_.m.machport = port
+ }
+
+ h.msgh_bits |= mach_msgh_bits(_MACH_MSG_TYPE_COPY_SEND, _MACH_MSG_TYPE_MAKE_SEND_ONCE)
+ h.msgh_local_port = port
+ h.msgh_reserved = 0
+ id := h.msgh_id
+
+ if _DebugMach {
+ p := (*[10000]unsafe.Pointer)(unsafe.Pointer(h))
+ print("send:\t")
+ var i uint32
+ for i = 0; i < h.msgh_size/uint32(unsafe.Sizeof(p[0])); i++ {
+ print(" ", p[i])
+ if i%8 == 7 {
+ print("\n\t")
+ }
+ }
+ if i%8 != 0 {
+ print("\n")
+ }
+ }
+ ret := mach_msg(h, _MACH_SEND_MSG|_MACH_RCV_MSG, h.msgh_size, uint32(maxsize), port, 0, 0)
+ if ret != 0 {
+ if _DebugMach {
+ print("mach_msg error ", ret, "\n")
+ }
+ return ret
+ }
+ if _DebugMach {
+ p := (*[10000]unsafe.Pointer)(unsafe.Pointer(h))
+ var i uint32
+ for i = 0; i < h.msgh_size/uint32(unsafe.Sizeof(p[0])); i++ {
+ print(" ", p[i])
+ if i%8 == 7 {
+ print("\n\t")
+ }
+ }
+ if i%8 != 0 {
+ print("\n")
+ }
+ }
+ if h.msgh_id != id+_MachReply {
+ if _DebugMach {
+ print("mach_msg _MachReply id mismatch ", h.msgh_id, " != ", id+_MachReply, "\n")
+ }
+ return -303 // MIG_REPLY_MISMATCH
+ }
+ // Look for a response giving the return value.
+ // Any call can send this back with an error,
+ // and some calls only have return values so they
+ // send it back on success too. I don't quite see how
+ // you know it's one of these and not the full response
+ // format, so just look if the message is right.
+ c := (*codemsg)(unsafe.Pointer(h))
+ if uintptr(h.msgh_size) == unsafe.Sizeof(*c) && h.msgh_bits&_MACH_MSGH_BITS_COMPLEX == 0 {
+ if _DebugMach {
+ print("mig result ", c.code, "\n")
+ }
+ return c.code
+ }
+ if h.msgh_size != uint32(rxsize) {
+ if _DebugMach {
+ print("mach_msg _MachReply size mismatch ", h.msgh_size, " != ", rxsize, "\n")
+ }
+ return -307 // MIG_ARRAY_TOO_LARGE
+ }
+ return 0
+}
+
+// Semaphores!
+
+const (
+ tmach_semcreate = 3418
+ rmach_semcreate = tmach_semcreate + _MachReply
+
+ tmach_semdestroy = 3419
+ rmach_semdestroy = tmach_semdestroy + _MachReply
+
+ _KERN_ABORTED = 14
+ _KERN_OPERATION_TIMED_OUT = 49
+)
+
+type tmach_semcreatemsg struct {
+ h machheader
+ ndr machndr
+ policy int32
+ value int32
+}
+
+type rmach_semcreatemsg struct {
+ h machheader
+ body machbody
+ semaphore machport
+}
+
+type tmach_semdestroymsg struct {
+ h machheader
+ body machbody
+ semaphore machport
+}
+
+func mach_semcreate() uint32 {
+ var m [256]uint8
+ tx := (*tmach_semcreatemsg)(unsafe.Pointer(&m))
+ rx := (*rmach_semcreatemsg)(unsafe.Pointer(&m))
+
+ tx.h.msgh_bits = 0
+ tx.h.msgh_size = uint32(unsafe.Sizeof(*tx))
+ tx.h.msgh_remote_port = mach_task_self()
+ tx.h.msgh_id = tmach_semcreate
+ tx.ndr = zerondr
+
+ tx.policy = 0 // 0 = SYNC_POLICY_FIFO
+ tx.value = 0
+
+ for {
+ r := machcall(&tx.h, int32(unsafe.Sizeof(m)), int32(unsafe.Sizeof(*rx)))
+ if r == 0 {
+ break
+ }
+ if r == _KERN_ABORTED { // interrupted
+ continue
+ }
+ macherror(r, "semaphore_create")
+ }
+ if rx.body.msgh_descriptor_count != 1 {
+ unimplemented("mach_semcreate desc count")
+ }
+ return rx.semaphore.name
+}
+
+func mach_semdestroy(sem uint32) {
+ var m [256]uint8
+ tx := (*tmach_semdestroymsg)(unsafe.Pointer(&m))
+
+ tx.h.msgh_bits = _MACH_MSGH_BITS_COMPLEX
+ tx.h.msgh_size = uint32(unsafe.Sizeof(*tx))
+ tx.h.msgh_remote_port = mach_task_self()
+ tx.h.msgh_id = tmach_semdestroy
+ tx.body.msgh_descriptor_count = 1
+ tx.semaphore.name = sem
+ tx.semaphore.disposition = _MACH_MSG_TYPE_MOVE_SEND
+ tx.semaphore._type = 0
+
+ for {
+ r := machcall(&tx.h, int32(unsafe.Sizeof(m)), 0)
+ if r == 0 {
+ break
+ }
+ if r == _KERN_ABORTED { // interrupted
+ continue
+ }
+ macherror(r, "semaphore_destroy")
+ }
+}
+
+// The other calls have simple system call traps in sys_darwin_{amd64,386}.s
+
+func mach_semaphore_wait(sema uint32) int32
+func mach_semaphore_timedwait(sema, sec, nsec uint32) int32
+func mach_semaphore_signal(sema uint32) int32
+func mach_semaphore_signal_all(sema uint32) int32
+
+func semasleep1(ns int64) int32 {
+ _g_ := getg()
+
+ if ns >= 0 {
+ var nsecs int32
+ secs := timediv(ns, 1000000000, &nsecs)
+ r := mach_semaphore_timedwait(uint32(_g_.m.waitsema), uint32(secs), uint32(nsecs))
+ if r == _KERN_ABORTED || r == _KERN_OPERATION_TIMED_OUT {
+ return -1
+ }
+ if r != 0 {
+ macherror(r, "semaphore_wait")
+ }
+ return 0
+ }
+
+ for {
+ r := mach_semaphore_wait(uint32(_g_.m.waitsema))
+ if r == 0 {
+ break
+ }
+ if r == _KERN_ABORTED { // interrupted
+ continue
+ }
+ macherror(r, "semaphore_wait")
+ }
+ return 0
+}
+
+//go:nosplit
+func semasleep(ns int64) int32 {
+ var r int32
+ systemstack(func() {
+ r = semasleep1(ns)
+ })
+ return r
+}
+
+//go:nosplit
+func mach_semrelease(sem uint32) {
+ for {
+ r := mach_semaphore_signal(sem)
+ if r == 0 {
+ break
+ }
+ if r == _KERN_ABORTED { // interrupted
+ continue
+ }
+
+ // mach_semrelease must be completely nosplit,
+ // because it is called from Go code.
+ // If we're going to die, start that process on the system stack
+ // to avoid a Go stack split.
+ systemstack(func() { macherror(r, "semaphore_signal") })
+ }
+}
+
+//go:nosplit
+func osyield() {
+ usleep(1)
+}
+
+func memlimit() uintptr {
+ // NOTE(rsc): Could use getrlimit here,
+ // like on FreeBSD or Linux, but Darwin doesn't enforce
+ // ulimit -v, so it's unclear why we'd try to stay within
+ // the limit.
+ return 0
+}
+
+func setsig(i int32, fn uintptr, restart bool) {
+ var sa sigactiont
+ memclr(unsafe.Pointer(&sa), unsafe.Sizeof(sa))
+ sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK
+ if restart {
+ sa.sa_flags |= _SA_RESTART
+ }
+ sa.sa_mask = ^uint32(0)
+ sa.sa_tramp = unsafe.Pointer(funcPC(sigtramp)) // runtime·sigtramp's job is to call into real handler
+ *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = fn
+ sigaction(uint32(i), &sa, nil)
+}
+
+func getsig(i int32) uintptr {
+ var sa sigactiont
+ memclr(unsafe.Pointer(&sa), unsafe.Sizeof(sa))
+ sigaction(uint32(i), nil, &sa)
+ return *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u))
+}
+
+func signalstack(p *byte, n int32) {
+ var st stackt
+ st.ss_sp = p
+ st.ss_size = uintptr(n)
+ st.ss_flags = 0
+ if p == nil {
+ st.ss_flags = _SS_DISABLE
+ }
+ sigaltstack(&st, nil)
+}
+
+func unblocksignals() {
+ sigprocmask(_SIG_SETMASK, &sigset_none, nil)
+}
diff --git a/src/runtime/os1_dragonfly.go b/src/runtime/os1_dragonfly.go
new file mode 100644
index 000000000..82bb45b9b
--- /dev/null
+++ b/src/runtime/os1_dragonfly.go
@@ -0,0 +1,220 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// From DragonFly's <sys/sysctl.h>
+const (
+ _CTL_HW = 6
+ _HW_NCPU = 3
+)
+
+var sigset_none = sigset{}
+var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
+
+func getncpu() int32 {
+ mib := [2]uint32{_CTL_HW, _HW_NCPU}
+ out := uint32(0)
+ nout := unsafe.Sizeof(out)
+ ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
+ if ret >= 0 {
+ return int32(out)
+ }
+ return 1
+}
+
+//go:nosplit
+func futexsleep(addr *uint32, val uint32, ns int64) {
+ systemstack(func() {
+ futexsleep1(addr, val, ns)
+ })
+}
+
+func futexsleep1(addr *uint32, val uint32, ns int64) {
+ var timeout int32
+ if ns >= 0 {
+ // The timeout is specified in microseconds - ensure that we
+ // do not end up dividing to zero, which would put us to sleep
+ // indefinitely...
+ timeout = timediv(ns, 1000, nil)
+ if timeout == 0 {
+ timeout = 1
+ }
+ }
+
+ // sys_umtx_sleep will return EWOULDBLOCK (EAGAIN) when the timeout
+ // expires or EBUSY if the mutex value does not match.
+ ret := sys_umtx_sleep(addr, int32(val), timeout)
+ if ret >= 0 || ret == -_EINTR || ret == -_EAGAIN || ret == -_EBUSY {
+ return
+ }
+
+ print("umtx_sleep addr=", addr, " val=", val, " ret=", ret, "\n")
+ *(*int32)(unsafe.Pointer(uintptr(0x1005))) = 0x1005
+}
+
+//go:nosplit
+func futexwakeup(addr *uint32, cnt uint32) {
+ ret := sys_umtx_wakeup(addr, int32(cnt))
+ if ret >= 0 {
+ return
+ }
+
+ systemstack(func() {
+ print("umtx_wake_addr=", addr, " ret=", ret, "\n")
+ *(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
+ })
+}
+
+func lwp_start(uintptr)
+
+func newosproc(mp *m, stk unsafe.Pointer) {
+ if false {
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " lwp_start=", funcPC(lwp_start), " id=", mp.id, "/", mp.tls[0], " ostk=", &mp, "\n")
+ }
+
+ var oset sigset
+ sigprocmask(&sigset_all, &oset)
+
+ params := lwpparams{
+ start_func: funcPC(lwp_start),
+ arg: unsafe.Pointer(mp),
+ stack: uintptr(stk),
+ tid1: unsafe.Pointer(&mp.procid),
+ tid2: nil,
+ }
+
+ mp.tls[0] = uintptr(mp.id) // so 386 asm can find it
+
+ lwp_create(&params)
+ sigprocmask(&oset, nil)
+}
+
+func osinit() {
+ ncpu = getncpu()
+}
+
+var urandom_data [_HashRandomBytes]byte
+var urandom_dev = []byte("/dev/urandom\x00")
+
+//go:nosplit
+func get_random_data(rnd *unsafe.Pointer, rnd_len *int32) {
+ fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
+ if read(fd, unsafe.Pointer(&urandom_data), _HashRandomBytes) == _HashRandomBytes {
+ *rnd = unsafe.Pointer(&urandom_data[0])
+ *rnd_len = _HashRandomBytes
+ } else {
+ *rnd = nil
+ *rnd_len = 0
+ }
+ close(fd)
+}
+
+func goenvs() {
+ goenvs_unix()
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
+func mpreinit(mp *m) {
+ mp.gsignal = malg(32 * 1024)
+ mp.gsignal.m = mp
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the new thread, can not allocate memory.
+func minit() {
+ _g_ := getg()
+
+ // m.procid is a uint64, but lwp_start writes an int32. Fix it up.
+ _g_.m.procid = uint64(*(*int32)(unsafe.Pointer(&_g_.m.procid)))
+
+ // Initialize signal handling
+ signalstack((*byte)(unsafe.Pointer(_g_.m.gsignal.stack.lo)), 32*1024)
+ sigprocmask(&sigset_none, nil)
+}
+
+// Called from dropm to undo the effect of an minit.
+func unminit() {
+ signalstack(nil, 0)
+}
+
+func memlimit() uintptr {
+ /*
+ TODO: Convert to Go when something actually uses the result.
+
+ Rlimit rl;
+ extern byte runtime·text[], runtime·end[];
+ uintptr used;
+
+ if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
+ return 0;
+ if(rl.rlim_cur >= 0x7fffffff)
+ return 0;
+
+ // Estimate our VM footprint excluding the heap.
+ // Not an exact science: use size of binary plus
+ // some room for thread stacks.
+ used = runtime·end - runtime·text + (64<<20);
+ if(used >= rl.rlim_cur)
+ return 0;
+
+ // If there's not at least 16 MB left, we're probably
+ // not going to be able to do much. Treat as no limit.
+ rl.rlim_cur -= used;
+ if(rl.rlim_cur < (16<<20))
+ return 0;
+
+ return rl.rlim_cur - used;
+ */
+ return 0
+}
+
+func sigtramp()
+
+type sigactiont struct {
+ sa_sigaction uintptr
+ sa_flags int32
+ sa_mask sigset
+}
+
+func setsig(i int32, fn uintptr, restart bool) {
+ var sa sigactiont
+ sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK
+ if restart {
+ sa.sa_flags |= _SA_RESTART
+ }
+ sa.sa_mask = sigset_all
+ if fn == funcPC(sighandler) {
+ fn = funcPC(sigtramp)
+ }
+ sa.sa_sigaction = fn
+ sigaction(i, &sa, nil)
+}
+
+func getsig(i int32) uintptr {
+ var sa sigactiont
+ sigaction(i, nil, &sa)
+ if sa.sa_sigaction == funcPC(sigtramp) {
+ return funcPC(sighandler)
+ }
+ return sa.sa_sigaction
+}
+
+func signalstack(p *byte, n int32) {
+ var st sigaltstackt
+ st.ss_sp = uintptr(unsafe.Pointer(p))
+ st.ss_size = uintptr(n)
+ st.ss_flags = 0
+ if p == nil {
+ st.ss_flags = _SS_DISABLE
+ }
+ sigaltstack(&st, nil)
+}
+
+func unblocksignals() {
+ sigprocmask(&sigset_none, nil)
+}
diff --git a/src/runtime/os1_freebsd.go b/src/runtime/os1_freebsd.go
new file mode 100644
index 000000000..2cacfbae6
--- /dev/null
+++ b/src/runtime/os1_freebsd.go
@@ -0,0 +1,221 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// From FreeBSD's <sys/sysctl.h>
+const (
+ _CTL_HW = 6
+ _HW_NCPU = 3
+)
+
+var sigset_none = sigset{}
+var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
+
+func getncpu() int32 {
+ mib := [2]uint32{_CTL_HW, _HW_NCPU}
+ out := uint32(0)
+ nout := unsafe.Sizeof(out)
+ ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
+ if ret >= 0 {
+ return int32(out)
+ }
+ return 1
+}
+
+// FreeBSD's umtx_op syscall is effectively the same as Linux's futex, and
+// thus the code is largely similar. See Linux implementation
+// and lock_futex.c for comments.
+
+//go:nosplit
+func futexsleep(addr *uint32, val uint32, ns int64) {
+ systemstack(func() {
+ futexsleep1(addr, val, ns)
+ })
+}
+
+func futexsleep1(addr *uint32, val uint32, ns int64) {
+ var tsp *timespec
+ if ns >= 0 {
+ var ts timespec
+ ts.tv_nsec = 0
+ ts.set_sec(int64(timediv(ns, 1000000000, (*int32)(unsafe.Pointer(&ts.tv_nsec)))))
+ tsp = &ts
+ }
+ ret := sys_umtx_op(addr, _UMTX_OP_WAIT_UINT_PRIVATE, val, nil, tsp)
+ if ret >= 0 || ret == -_EINTR {
+ return
+ }
+ print("umtx_wait addr=", addr, " val=", val, " ret=", ret, "\n")
+ *(*int32)(unsafe.Pointer(uintptr(0x1005))) = 0x1005
+}
+
+//go:nosplit
+func futexwakeup(addr *uint32, cnt uint32) {
+ ret := sys_umtx_op(addr, _UMTX_OP_WAKE_PRIVATE, cnt, nil, nil)
+ if ret >= 0 {
+ return
+ }
+
+ systemstack(func() {
+ print("umtx_wake_addr=", addr, " ret=", ret, "\n")
+ })
+}
+
+func thr_start()
+
+func newosproc(mp *m, stk unsafe.Pointer) {
+ if false {
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " thr_start=", funcPC(thr_start), " id=", mp.id, "/", mp.tls[0], " ostk=", &mp, "\n")
+ }
+
+ // NOTE(rsc): This code is confused. stackbase is the top of the stack
+ // and is equal to stk. However, it's working, so I'm not changing it.
+ param := thrparam{
+ start_func: funcPC(thr_start),
+ arg: unsafe.Pointer(mp),
+ stack_base: mp.g0.stack.hi,
+ stack_size: uintptr(stk) - mp.g0.stack.hi,
+ child_tid: unsafe.Pointer(&mp.procid),
+ parent_tid: nil,
+ tls_base: unsafe.Pointer(&mp.tls[0]),
+ tls_size: unsafe.Sizeof(mp.tls),
+ }
+ mp.tls[0] = uintptr(mp.id) // so 386 asm can find it
+
+ var oset sigset
+ sigprocmask(&sigset_all, &oset)
+ thr_new(&param, int32(unsafe.Sizeof(param)))
+ sigprocmask(&oset, nil)
+}
+
+func osinit() {
+ ncpu = getncpu()
+}
+
+var urandom_data [_HashRandomBytes]byte
+var urandom_dev = []byte("/dev/random\x00")
+
+//go:nosplit
+func get_random_data(rnd *unsafe.Pointer, rnd_len *int32) {
+ fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
+ if read(fd, unsafe.Pointer(&urandom_data), _HashRandomBytes) == _HashRandomBytes {
+ *rnd = unsafe.Pointer(&urandom_data[0])
+ *rnd_len = _HashRandomBytes
+ } else {
+ *rnd = nil
+ *rnd_len = 0
+ }
+ close(fd)
+}
+
+func goenvs() {
+ goenvs_unix()
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
+func mpreinit(mp *m) {
+ mp.gsignal = malg(32 * 1024)
+ mp.gsignal.m = mp
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the new thread, can not allocate memory.
+func minit() {
+ _g_ := getg()
+
+ // m.procid is a uint64, but thr_new writes a uint32 on 32-bit systems.
+ // Fix it up. (Only matters on big-endian, but be clean anyway.)
+ if ptrSize == 4 {
+ _g_.m.procid = uint64(*(*uint32)(unsafe.Pointer(&_g_.m.procid)))
+ }
+
+ // Initialize signal handling.
+ signalstack((*byte)(unsafe.Pointer(_g_.m.gsignal.stack.lo)), 32*1024)
+ sigprocmask(&sigset_none, nil)
+}
+
+// Called from dropm to undo the effect of an minit.
+func unminit() {
+ signalstack(nil, 0)
+}
+
+func memlimit() uintptr {
+ /*
+ TODO: Convert to Go when something actually uses the result.
+ Rlimit rl;
+ extern byte runtime·text[], runtime·end[];
+ uintptr used;
+
+ if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
+ return 0;
+ if(rl.rlim_cur >= 0x7fffffff)
+ return 0;
+
+ // Estimate our VM footprint excluding the heap.
+ // Not an exact science: use size of binary plus
+ // some room for thread stacks.
+ used = runtime·end - runtime·text + (64<<20);
+ if(used >= rl.rlim_cur)
+ return 0;
+
+ // If there's not at least 16 MB left, we're probably
+ // not going to be able to do much. Treat as no limit.
+ rl.rlim_cur -= used;
+ if(rl.rlim_cur < (16<<20))
+ return 0;
+
+ return rl.rlim_cur - used;
+ */
+
+ return 0
+}
+
+func sigtramp()
+
+type sigactiont struct {
+ sa_handler uintptr
+ sa_flags int32
+ sa_mask sigset
+}
+
+func setsig(i int32, fn uintptr, restart bool) {
+ var sa sigactiont
+ sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK
+ if restart {
+ sa.sa_flags |= _SA_RESTART
+ }
+ sa.sa_mask = sigset_all
+ if fn == funcPC(sighandler) {
+ fn = funcPC(sigtramp)
+ }
+ sa.sa_handler = fn
+ sigaction(i, &sa, nil)
+}
+func getsig(i int32) uintptr {
+ var sa sigactiont
+ sigaction(i, nil, &sa)
+ if sa.sa_handler == funcPC(sigtramp) {
+ return funcPC(sighandler)
+ }
+ return sa.sa_handler
+}
+
+func signalstack(p *byte, n int32) {
+ var st stackt
+ st.ss_sp = uintptr(unsafe.Pointer(p))
+ st.ss_size = uintptr(n)
+ st.ss_flags = 0
+ if p == nil {
+ st.ss_flags = _SS_DISABLE
+ }
+ sigaltstack(&st, nil)
+}
+
+func unblocksignals() {
+ sigprocmask(&sigset_none, nil)
+}
diff --git a/src/runtime/os1_linux.go b/src/runtime/os1_linux.go
new file mode 100644
index 000000000..67fa6391e
--- /dev/null
+++ b/src/runtime/os1_linux.go
@@ -0,0 +1,287 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+var sigset_none sigset
+var sigset_all sigset = sigset{^uint32(0), ^uint32(0)}
+
+// Linux futex.
+//
+// futexsleep(uint32 *addr, uint32 val)
+// futexwakeup(uint32 *addr)
+//
+// Futexsleep atomically checks if *addr == val and if so, sleeps on addr.
+// Futexwakeup wakes up threads sleeping on addr.
+// Futexsleep is allowed to wake up spuriously.
+
+const (
+ _FUTEX_WAIT = 0
+ _FUTEX_WAKE = 1
+)
+
+// Atomically,
+// if(*addr == val) sleep
+// Might be woken up spuriously; that's allowed.
+// Don't sleep longer than ns; ns < 0 means forever.
+//go:nosplit
+func futexsleep(addr *uint32, val uint32, ns int64) {
+ var ts timespec
+
+ // Some Linux kernels have a bug where futex of
+ // FUTEX_WAIT returns an internal error code
+ // as an errno. Libpthread ignores the return value
+ // here, and so can we: as it says a few lines up,
+ // spurious wakeups are allowed.
+ if ns < 0 {
+ futex(unsafe.Pointer(addr), _FUTEX_WAIT, val, nil, nil, 0)
+ return
+ }
+
+ // It's difficult to live within the no-split stack limits here.
+ // On ARM and 386, a 64-bit divide invokes a general software routine
+ // that needs more stack than we can afford. So we use timediv instead.
+ // But on real 64-bit systems, where words are larger but the stack limit
+ // is not, even timediv is too heavy, and we really need to use just an
+ // ordinary machine instruction.
+ if ptrSize == 8 {
+ ts.set_sec(ns / 1000000000)
+ ts.set_nsec(int32(ns % 1000000000))
+ } else {
+ ts.tv_nsec = 0
+ ts.set_sec(int64(timediv(ns, 1000000000, (*int32)(unsafe.Pointer(&ts.tv_nsec)))))
+ }
+ futex(unsafe.Pointer(addr), _FUTEX_WAIT, val, unsafe.Pointer(&ts), nil, 0)
+}
+
+// If any procs are sleeping on addr, wake up at most cnt.
+//go:nosplit
+func futexwakeup(addr *uint32, cnt uint32) {
+ ret := futex(unsafe.Pointer(addr), _FUTEX_WAKE, cnt, nil, nil, 0)
+ if ret >= 0 {
+ return
+ }
+
+ // I don't know that futex wakeup can return
+ // EAGAIN or EINTR, but if it does, it would be
+ // safe to loop and call futex again.
+ systemstack(func() {
+ print("futexwakeup addr=", addr, " returned ", ret, "\n")
+ })
+
+ *(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
+}
+
+func getproccount() int32 {
+ var buf [16]uintptr
+ r := sched_getaffinity(0, unsafe.Sizeof(buf), &buf[0])
+ n := int32(0)
+ for _, v := range buf[:r/ptrSize] {
+ for i := 0; i < 64; i++ {
+ n += int32(v & 1)
+ v >>= 1
+ }
+ }
+ if n == 0 {
+ n = 1
+ }
+ return n
+}
+
+// Clone, the Linux rfork.
+const (
+ _CLONE_VM = 0x100
+ _CLONE_FS = 0x200
+ _CLONE_FILES = 0x400
+ _CLONE_SIGHAND = 0x800
+ _CLONE_PTRACE = 0x2000
+ _CLONE_VFORK = 0x4000
+ _CLONE_PARENT = 0x8000
+ _CLONE_THREAD = 0x10000
+ _CLONE_NEWNS = 0x20000
+ _CLONE_SYSVSEM = 0x40000
+ _CLONE_SETTLS = 0x80000
+ _CLONE_PARENT_SETTID = 0x100000
+ _CLONE_CHILD_CLEARTID = 0x200000
+ _CLONE_UNTRACED = 0x800000
+ _CLONE_CHILD_SETTID = 0x1000000
+ _CLONE_STOPPED = 0x2000000
+ _CLONE_NEWUTS = 0x4000000
+ _CLONE_NEWIPC = 0x8000000
+)
+
+func newosproc(mp *m, stk unsafe.Pointer) {
+ /*
+ * note: strace gets confused if we use CLONE_PTRACE here.
+ */
+ var flags int32 = _CLONE_VM | /* share memory */
+ _CLONE_FS | /* share cwd, etc */
+ _CLONE_FILES | /* share fd table */
+ _CLONE_SIGHAND | /* share sig handler table */
+ _CLONE_THREAD /* revisit - okay for now */
+
+ mp.tls[0] = uintptr(mp.id) // so 386 asm can find it
+ if false {
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", funcPC(clone), " id=", mp.id, "/", mp.tls[0], " ostk=", &mp, "\n")
+ }
+
+ // Disable signals during clone, so that the new thread starts
+ // with signals disabled. It will enable them in minit.
+ var oset sigset
+ rtsigprocmask(_SIG_SETMASK, &sigset_all, &oset, int32(unsafe.Sizeof(oset)))
+ ret := clone(flags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(funcPC(mstart)))
+ rtsigprocmask(_SIG_SETMASK, &oset, nil, int32(unsafe.Sizeof(oset)))
+
+ if ret < 0 {
+ print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -ret, ")\n")
+ gothrow("newosproc")
+ }
+}
+
+func osinit() {
+ ncpu = getproccount()
+}
+
+// Random bytes initialized at startup. These come
+// from the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.c).
+// byte* runtime·startup_random_data;
+// uint32 runtime·startup_random_data_len;
+
+var urandom_data [_HashRandomBytes]byte
+var urandom_dev = []byte("/dev/random\x00")
+
+//go:nosplit
+func get_random_data(rnd *unsafe.Pointer, rnd_len *int32) {
+ if startup_random_data != nil {
+ *rnd = unsafe.Pointer(startup_random_data)
+ *rnd_len = int32(startup_random_data_len)
+ return
+ }
+ fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
+ if read(fd, unsafe.Pointer(&urandom_data), _HashRandomBytes) == _HashRandomBytes {
+ *rnd = unsafe.Pointer(&urandom_data[0])
+ *rnd_len = _HashRandomBytes
+ } else {
+ *rnd = nil
+ *rnd_len = 0
+ }
+ close(fd)
+}
+
+func goenvs() {
+ goenvs_unix()
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
+func mpreinit(mp *m) {
+ mp.gsignal = malg(32 * 1024) // Linux wants >= 2K
+ mp.gsignal.m = mp
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the new thread, can not allocate memory.
+func minit() {
+ // Initialize signal handling.
+ _g_ := getg()
+ signalstack((*byte)(unsafe.Pointer(_g_.m.gsignal.stack.lo)), 32*1024)
+ rtsigprocmask(_SIG_SETMASK, &sigset_none, nil, int32(unsafe.Sizeof(sigset_none)))
+}
+
+// Called from dropm to undo the effect of an minit.
+func unminit() {
+ signalstack(nil, 0)
+}
+
+func memlimit() uintptr {
+ /*
+ TODO: Convert to Go when something actually uses the result.
+
+ Rlimit rl;
+ extern byte runtime·text[], runtime·end[];
+ uintptr used;
+
+ if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
+ return 0;
+ if(rl.rlim_cur >= 0x7fffffff)
+ return 0;
+
+ // Estimate our VM footprint excluding the heap.
+ // Not an exact science: use size of binary plus
+ // some room for thread stacks.
+ used = runtime·end - runtime·text + (64<<20);
+ if(used >= rl.rlim_cur)
+ return 0;
+
+ // If there's not at least 16 MB left, we're probably
+ // not going to be able to do much. Treat as no limit.
+ rl.rlim_cur -= used;
+ if(rl.rlim_cur < (16<<20))
+ return 0;
+
+ return rl.rlim_cur - used;
+ */
+
+ return 0
+}
+
+//#ifdef GOARCH_386
+//#define sa_handler k_sa_handler
+//#endif
+
+func sigreturn()
+func sigtramp()
+
+func setsig(i int32, fn uintptr, restart bool) {
+ var sa sigactiont
+ memclr(unsafe.Pointer(&sa), unsafe.Sizeof(sa))
+ sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTORER
+ if restart {
+ sa.sa_flags |= _SA_RESTART
+ }
+ sa.sa_mask = ^uint64(0)
+ // Although Linux manpage says "sa_restorer element is obsolete and
+ // should not be used". x86_64 kernel requires it. Only use it on
+ // x86.
+ if GOARCH == "386" || GOARCH == "amd64" {
+ sa.sa_restorer = funcPC(sigreturn)
+ }
+ if fn == funcPC(sighandler) {
+ fn = funcPC(sigtramp)
+ }
+ sa.sa_handler = fn
+ if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
+ gothrow("rt_sigaction failure")
+ }
+}
+
+func getsig(i int32) uintptr {
+ var sa sigactiont
+
+ memclr(unsafe.Pointer(&sa), unsafe.Sizeof(sa))
+ if rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask)) != 0 {
+ gothrow("rt_sigaction read failure")
+ }
+ if sa.sa_handler == funcPC(sigtramp) {
+ return funcPC(sighandler)
+ }
+ return sa.sa_handler
+}
+
+func signalstack(p *byte, n int32) {
+ var st sigaltstackt
+ st.ss_sp = p
+ st.ss_size = uintptr(n)
+ st.ss_flags = 0
+ if p == nil {
+ st.ss_flags = _SS_DISABLE
+ }
+ sigaltstack(&st, nil)
+}
+
+func unblocksignals() {
+ rtsigprocmask(_SIG_SETMASK, &sigset_none, nil, int32(unsafe.Sizeof(sigset_none)))
+}
diff --git a/src/runtime/os1_openbsd.go b/src/runtime/os1_openbsd.go
new file mode 100644
index 000000000..d5ffe10a8
--- /dev/null
+++ b/src/runtime/os1_openbsd.go
@@ -0,0 +1,235 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+const (
+ ESRCH = 3
+ EAGAIN = 35
+ EWOULDBLOCK = EAGAIN
+ ENOTSUP = 91
+
+ // From OpenBSD's sys/time.h
+ CLOCK_REALTIME = 0
+ CLOCK_VIRTUAL = 1
+ CLOCK_PROF = 2
+ CLOCK_MONOTONIC = 3
+)
+
+var sigset_none = uint32(0)
+var sigset_all = ^sigset_none
+
+// From OpenBSD's <sys/sysctl.h>
+const (
+ CTL_HW = 6
+ HW_NCPU = 3
+)
+
+func getncpu() int32 {
+ mib := [2]uint32{CTL_HW, HW_NCPU}
+ out := uint32(0)
+ nout := unsafe.Sizeof(out)
+
+ // Fetch hw.ncpu via sysctl.
+ ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
+ if ret >= 0 {
+ return int32(out)
+ }
+ return 1
+}
+
+//go:nosplit
+func semacreate() uintptr {
+ return 1
+}
+
+//go:nosplit
+func semasleep(ns int64) int32 {
+ _g_ := getg()
+
+ // Compute sleep deadline.
+ var tsp *timespec
+ if ns >= 0 {
+ var ts timespec
+ var nsec int32
+ ns += nanotime()
+ ts.set_sec(int64(timediv(ns, 1000000000, &nsec)))
+ ts.set_nsec(nsec)
+ tsp = &ts
+ }
+
+ for {
+ // spin-mutex lock
+ for {
+ if xchg(&_g_.m.waitsemalock, 1) == 0 {
+ break
+ }
+ osyield()
+ }
+
+ if _g_.m.waitsemacount != 0 {
+ // semaphore is available.
+ _g_.m.waitsemacount--
+ // spin-mutex unlock
+ atomicstore(&_g_.m.waitsemalock, 0)
+ return 0 // semaphore acquired
+ }
+
+ // sleep until semaphore != 0 or timeout.
+ // thrsleep unlocks m.waitsemalock.
+ ret := thrsleep((uintptr)(unsafe.Pointer(&_g_.m.waitsemacount)), CLOCK_MONOTONIC, tsp, (uintptr)(unsafe.Pointer(&_g_.m.waitsemalock)), (*int32)(unsafe.Pointer(&_g_.m.waitsemacount)))
+ if ret == EWOULDBLOCK {
+ return -1
+ }
+ }
+}
+
+//go:nosplit
+func semawakeup(mp *m) {
+ // spin-mutex lock
+ for {
+ if xchg(&mp.waitsemalock, 1) == 0 {
+ break
+ }
+ osyield()
+ }
+ mp.waitsemacount++
+ ret := thrwakeup(uintptr(unsafe.Pointer(&mp.waitsemacount)), 1)
+ if ret != 0 && ret != ESRCH {
+ // semawakeup can be called on signal stack.
+ systemstack(func() {
+ print("thrwakeup addr=", &mp.waitsemacount, " sem=", mp.waitsemacount, " ret=", ret, "\n")
+ })
+ }
+ // spin-mutex unlock
+ atomicstore(&mp.waitsemalock, 0)
+}
+
+func newosproc(mp *m, stk unsafe.Pointer) {
+ if false {
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, "/", int32(mp.tls[0]), " ostk=", &mp, "\n")
+ }
+
+ mp.tls[0] = uintptr(mp.id) // so 386 asm can find it
+
+ param := tforkt{
+ tf_tcb: unsafe.Pointer(&mp.tls[0]),
+ tf_tid: (*int32)(unsafe.Pointer(&mp.procid)),
+ tf_stack: uintptr(stk),
+ }
+
+ oset := sigprocmask(_SIG_SETMASK, sigset_all)
+ ret := tfork(&param, unsafe.Sizeof(param), mp, mp.g0, funcPC(mstart))
+ sigprocmask(_SIG_SETMASK, oset)
+
+ if ret < 0 {
+ print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", -ret, ")\n")
+ if ret == -ENOTSUP {
+ print("runtime: is kern.rthreads disabled?\n")
+ }
+ gothrow("runtime.newosproc")
+ }
+}
+
+func osinit() {
+ ncpu = getncpu()
+}
+
+var urandom_data [_HashRandomBytes]byte
+var urandom_dev = []byte("/dev/urandom\x00")
+
+//go:nosplit
+func get_random_data(rnd *unsafe.Pointer, rnd_len *int32) {
+ fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
+ if read(fd, unsafe.Pointer(&urandom_data), _HashRandomBytes) == _HashRandomBytes {
+ *rnd = unsafe.Pointer(&urandom_data[0])
+ *rnd_len = _HashRandomBytes
+ } else {
+ *rnd = nil
+ *rnd_len = 0
+ }
+ close(fd)
+}
+
+func goenvs() {
+ goenvs_unix()
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
+func mpreinit(mp *m) {
+ mp.gsignal = malg(32 * 1024)
+ mp.gsignal.m = mp
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the new thread, can not allocate memory.
+func minit() {
+ _g_ := getg()
+
+ // m.procid is a uint64, but tfork writes an int32. Fix it up.
+ _g_.m.procid = uint64(*(*int32)(unsafe.Pointer(&_g_.m.procid)))
+
+ // Initialize signal handling
+ signalstack((*byte)(unsafe.Pointer(_g_.m.gsignal.stack.lo)), 32*1024)
+ sigprocmask(_SIG_SETMASK, sigset_none)
+}
+
+// Called from dropm to undo the effect of an minit.
+func unminit() {
+ signalstack(nil, 0)
+}
+
+func memlimit() uintptr {
+ return 0
+}
+
+func sigtramp()
+
+type sigactiont struct {
+ sa_sigaction uintptr
+ sa_mask uint32
+ sa_flags int32
+}
+
+func setsig(i int32, fn uintptr, restart bool) {
+ var sa sigactiont
+ sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK
+ if restart {
+ sa.sa_flags |= _SA_RESTART
+ }
+ sa.sa_mask = sigset_all
+ if fn == funcPC(sighandler) {
+ fn = funcPC(sigtramp)
+ }
+ sa.sa_sigaction = fn
+ sigaction(i, &sa, nil)
+}
+
+func getsig(i int32) uintptr {
+ var sa sigactiont
+ sigaction(i, nil, &sa)
+ if sa.sa_sigaction == funcPC(sigtramp) {
+ return funcPC(sighandler)
+ }
+ return sa.sa_sigaction
+}
+
+func signalstack(p *byte, n int32) {
+ var st stackt
+
+ st.ss_sp = uintptr(unsafe.Pointer(p))
+ st.ss_size = uintptr(n)
+ st.ss_flags = 0
+ if p == nil {
+ st.ss_flags = _SS_DISABLE
+ }
+ sigaltstack(&st, nil)
+}
+
+func unblocksignals() {
+ sigprocmask(_SIG_SETMASK, sigset_none)
+}
diff --git a/src/runtime/os2_darwin.go b/src/runtime/os2_darwin.go
new file mode 100644
index 000000000..542bd7421
--- /dev/null
+++ b/src/runtime/os2_darwin.go
@@ -0,0 +1,14 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _NSIG = 32
+ _SI_USER = 0 /* empirically true, but not what headers say */
+ _SIG_BLOCK = 1
+ _SIG_UNBLOCK = 2
+ _SIG_SETMASK = 3
+ _SS_DISABLE = 4
+)
diff --git a/src/runtime/os2_dragonfly.go b/src/runtime/os2_dragonfly.go
new file mode 100644
index 000000000..0a20ed43f
--- /dev/null
+++ b/src/runtime/os2_dragonfly.go
@@ -0,0 +1,12 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _NSIG = 33
+ _SI_USER = 0x10001
+ _SS_DISABLE = 4
+ _RLIMIT_AS = 10
+)
diff --git a/src/runtime/os2_freebsd.go b/src/runtime/os2_freebsd.go
new file mode 100644
index 000000000..f67211fdf
--- /dev/null
+++ b/src/runtime/os2_freebsd.go
@@ -0,0 +1,12 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _SS_DISABLE = 4
+ _NSIG = 33
+ _SI_USER = 0x10001
+ _RLIMIT_AS = 10
+)
diff --git a/src/runtime/os2_linux.go b/src/runtime/os2_linux.go
new file mode 100644
index 000000000..eaa9f0e83
--- /dev/null
+++ b/src/runtime/os2_linux.go
@@ -0,0 +1,23 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _SS_DISABLE = 2
+ _NSIG = 65
+ _SI_USER = 0
+ _SIG_SETMASK = 2
+ _RLIMIT_AS = 9
+)
+
+// It's hard to tease out exactly how big a Sigset is, but
+// rt_sigprocmask crashes if we get it wrong, so if binaries
+// are running, this is right.
+type sigset [2]uint32
+
+type rlimit struct {
+ rlim_cur uintptr
+ rlim_max uintptr
+}
diff --git a/src/runtime/os2_openbsd.go b/src/runtime/os2_openbsd.go
new file mode 100644
index 000000000..1e785ad51
--- /dev/null
+++ b/src/runtime/os2_openbsd.go
@@ -0,0 +1,14 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _SS_DISABLE = 4
+ _SIG_BLOCK = 1
+ _SIG_UNBLOCK = 2
+ _SIG_SETMASK = 3
+ _NSIG = 33
+ _SI_USER = 0
+)
diff --git a/src/runtime/os2_solaris.go b/src/runtime/os2_solaris.go
new file mode 100644
index 000000000..26ca15f62
--- /dev/null
+++ b/src/runtime/os2_solaris.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _SS_DISABLE = 2
+ _SIG_SETMASK = 3
+ _NSIG = 73 /* number of signals in sigtable array */
+ _SI_USER = 0
+ _RLIMIT_AS = 10
+)
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
new file mode 100644
index 000000000..1df74faad
--- /dev/null
+++ b/src/runtime/os3_solaris.go
@@ -0,0 +1,493 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+//go:cgo_export_dynamic runtime.end _end
+//go:cgo_export_dynamic runtime.etext _etext
+//go:cgo_export_dynamic runtime.edata _edata
+
+//go:cgo_import_dynamic libc____errno ___errno "libc.so"
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
+//go:cgo_import_dynamic libc_close close "libc.so"
+//go:cgo_import_dynamic libc_exit exit "libc.so"
+//go:cgo_import_dynamic libc_fstat fstat "libc.so"
+//go:cgo_import_dynamic libc_getcontext getcontext "libc.so"
+//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so"
+//go:cgo_import_dynamic libc_madvise madvise "libc.so"
+//go:cgo_import_dynamic libc_malloc malloc "libc.so"
+//go:cgo_import_dynamic libc_mmap mmap "libc.so"
+//go:cgo_import_dynamic libc_munmap munmap "libc.so"
+//go:cgo_import_dynamic libc_open open "libc.so"
+//go:cgo_import_dynamic libc_pthread_attr_destroy pthread_attr_destroy "libc.so"
+//go:cgo_import_dynamic libc_pthread_attr_getstack pthread_attr_getstack "libc.so"
+//go:cgo_import_dynamic libc_pthread_attr_init pthread_attr_init "libc.so"
+//go:cgo_import_dynamic libc_pthread_attr_setdetachstate pthread_attr_setdetachstate "libc.so"
+//go:cgo_import_dynamic libc_pthread_attr_setstack pthread_attr_setstack "libc.so"
+//go:cgo_import_dynamic libc_pthread_create pthread_create "libc.so"
+//go:cgo_import_dynamic libc_raise raise "libc.so"
+//go:cgo_import_dynamic libc_read read "libc.so"
+//go:cgo_import_dynamic libc_select select "libc.so"
+//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so"
+//go:cgo_import_dynamic libc_sem_init sem_init "libc.so"
+//go:cgo_import_dynamic libc_sem_post sem_post "libc.so"
+//go:cgo_import_dynamic libc_sem_reltimedwait_np sem_reltimedwait_np "libc.so"
+//go:cgo_import_dynamic libc_sem_wait sem_wait "libc.so"
+//go:cgo_import_dynamic libc_setitimer setitimer "libc.so"
+//go:cgo_import_dynamic libc_sigaction sigaction "libc.so"
+//go:cgo_import_dynamic libc_sigaltstack sigaltstack "libc.so"
+//go:cgo_import_dynamic libc_sigprocmask sigprocmask "libc.so"
+//go:cgo_import_dynamic libc_sysconf sysconf "libc.so"
+//go:cgo_import_dynamic libc_usleep usleep "libc.so"
+//go:cgo_import_dynamic libc_write write "libc.so"
+
+//go:linkname libc____errno libc____errno
+//go:linkname libc_clock_gettime libc_clock_gettime
+//go:linkname libc_close libc_close
+//go:linkname libc_exit libc_exit
+//go:linkname libc_fstat libc_fstat
+//go:linkname libc_getcontext libc_getcontext
+//go:linkname libc_getrlimit libc_getrlimit
+//go:linkname libc_madvise libc_madvise
+//go:linkname libc_malloc libc_malloc
+//go:linkname libc_mmap libc_mmap
+//go:linkname libc_munmap libc_munmap
+//go:linkname libc_open libc_open
+//go:linkname libc_pthread_attr_destroy libc_pthread_attr_destroy
+//go:linkname libc_pthread_attr_getstack libc_pthread_attr_getstack
+//go:linkname libc_pthread_attr_init libc_pthread_attr_init
+//go:linkname libc_pthread_attr_setdetachstate libc_pthread_attr_setdetachstate
+//go:linkname libc_pthread_attr_setstack libc_pthread_attr_setstack
+//go:linkname libc_pthread_create libc_pthread_create
+//go:linkname libc_raise libc_raise
+//go:linkname libc_read libc_read
+//go:linkname libc_select libc_select
+//go:linkname libc_sched_yield libc_sched_yield
+//go:linkname libc_sem_init libc_sem_init
+//go:linkname libc_sem_post libc_sem_post
+//go:linkname libc_sem_reltimedwait_np libc_sem_reltimedwait_np
+//go:linkname libc_sem_wait libc_sem_wait
+//go:linkname libc_setitimer libc_setitimer
+//go:linkname libc_sigaction libc_sigaction
+//go:linkname libc_sigaltstack libc_sigaltstack
+//go:linkname libc_sigprocmask libc_sigprocmask
+//go:linkname libc_sysconf libc_sysconf
+//go:linkname libc_usleep libc_usleep
+//go:linkname libc_write libc_write
+
+var (
+ libc____errno,
+ libc_clock_gettime,
+ libc_close,
+ libc_exit,
+ libc_fstat,
+ libc_getcontext,
+ libc_getrlimit,
+ libc_madvise,
+ libc_malloc,
+ libc_mmap,
+ libc_munmap,
+ libc_open,
+ libc_pthread_attr_destroy,
+ libc_pthread_attr_getstack,
+ libc_pthread_attr_init,
+ libc_pthread_attr_setdetachstate,
+ libc_pthread_attr_setstack,
+ libc_pthread_create,
+ libc_raise,
+ libc_read,
+ libc_sched_yield,
+ libc_select,
+ libc_sem_init,
+ libc_sem_post,
+ libc_sem_reltimedwait_np,
+ libc_sem_wait,
+ libc_setitimer,
+ libc_sigaction,
+ libc_sigaltstack,
+ libc_sigprocmask,
+ libc_sysconf,
+ libc_usleep,
+ libc_write libcFunc
+)
+
+var sigset_none = sigset{}
+var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
+
+func getncpu() int32 {
+ n := int32(sysconf(__SC_NPROCESSORS_ONLN))
+ if n < 1 {
+ return 1
+ }
+ return n
+}
+
+func osinit() {
+ ncpu = getncpu()
+}
+
+func tstart_sysvicall()
+
+func newosproc(mp *m, _ unsafe.Pointer) {
+ var (
+ attr pthreadattr
+ oset sigset
+ tid pthread
+ ret int32
+ size uint64
+ )
+
+ if pthread_attr_init(&attr) != 0 {
+ gothrow("pthread_attr_init")
+ }
+ if pthread_attr_setstack(&attr, 0, 0x200000) != 0 {
+ gothrow("pthread_attr_setstack")
+ }
+ if pthread_attr_getstack(&attr, unsafe.Pointer(&mp.g0.stack.hi), &size) != 0 {
+ gothrow("pthread_attr_getstack")
+ }
+ mp.g0.stack.lo = mp.g0.stack.hi - uintptr(size)
+ if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
+ gothrow("pthread_attr_setdetachstate")
+ }
+
+ // Disable signals during create, so that the new thread starts
+ // with signals disabled. It will enable them in minit.
+ sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
+ ret = pthread_create(&tid, &attr, funcPC(tstart_sysvicall), unsafe.Pointer(mp))
+ sigprocmask(_SIG_SETMASK, &oset, nil)
+ if ret != 0 {
+ print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", ret, ")\n")
+ gothrow("newosproc")
+ }
+}
+
+var urandom_data [_HashRandomBytes]byte
+var urandom_dev = []byte("/dev/random\x00")
+
+//go:nosplit
+func get_random_data(rnd *unsafe.Pointer, rnd_len *int32) {
+ fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
+ if read(fd, unsafe.Pointer(&urandom_data), _HashRandomBytes) == _HashRandomBytes {
+ *rnd = unsafe.Pointer(&urandom_data[0])
+ *rnd_len = _HashRandomBytes
+ } else {
+ *rnd = nil
+ *rnd_len = 0
+ }
+ close(fd)
+}
+
+func goenvs() {
+ goenvs_unix()
+}
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
+func mpreinit(mp *m) {
+ mp.gsignal = malg(32 * 1024)
+ mp.gsignal.m = mp
+}
+
+func miniterrno()
+
+// Called to initialize a new m (including the bootstrap m).
+// Called on the new thread, can not allocate memory.
+func minit() {
+ _g_ := getg()
+ asmcgocall(unsafe.Pointer(funcPC(miniterrno)), unsafe.Pointer(libc____errno))
+ // Initialize signal handling
+ signalstack((*byte)(unsafe.Pointer(_g_.m.gsignal.stack.lo)), 32*1024)
+ sigprocmask(_SIG_SETMASK, &sigset_none, nil)
+}
+
+// Called from dropm to undo the effect of an minit.
+func unminit() {
+ signalstack(nil, 0)
+}
+
+func memlimit() uintptr {
+ /*
+ TODO: Convert to Go when something actually uses the result.
+ Rlimit rl;
+ extern byte runtime·text[], runtime·end[];
+ uintptr used;
+
+ if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
+ return 0;
+ if(rl.rlim_cur >= 0x7fffffff)
+ return 0;
+
+ // Estimate our VM footprint excluding the heap.
+ // Not an exact science: use size of binary plus
+ // some room for thread stacks.
+ used = runtime·end - runtime·text + (64<<20);
+ if(used >= rl.rlim_cur)
+ return 0;
+
+ // If there's not at least 16 MB left, we're probably
+ // not going to be able to do much. Treat as no limit.
+ rl.rlim_cur -= used;
+ if(rl.rlim_cur < (16<<20))
+ return 0;
+
+ return rl.rlim_cur - used;
+ */
+
+ return 0
+}
+
+func sigtramp()
+
+func setsig(i int32, fn uintptr, restart bool) {
+ var sa sigactiont
+
+ sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK
+ sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK
+ if restart {
+ sa.sa_flags |= _SA_RESTART
+ }
+ sa.sa_mask = sigset_all
+ if fn == funcPC(sighandler) {
+ fn = funcPC(sigtramp)
+ }
+ *((*uintptr)(unsafe.Pointer(&sa._funcptr))) = fn
+ sigaction(i, &sa, nil)
+}
+
+func getsig(i int32) uintptr {
+ var sa sigactiont
+ sigaction(i, nil, &sa)
+ if *((*uintptr)(unsafe.Pointer(&sa._funcptr))) == funcPC(sigtramp) {
+ return funcPC(sighandler)
+ }
+ return *((*uintptr)(unsafe.Pointer(&sa._funcptr)))
+}
+
+func signalstack(p *byte, n int32) {
+ var st sigaltstackt
+ st.ss_sp = (*byte)(unsafe.Pointer(p))
+ st.ss_size = uint64(n)
+ st.ss_flags = 0
+ if p == nil {
+ st.ss_flags = _SS_DISABLE
+ }
+ sigaltstack(&st, nil)
+}
+
+func unblocksignals() {
+ sigprocmask(_SIG_SETMASK, &sigset_none, nil)
+}
+
+//go:nosplit
+func semacreate() uintptr {
+ var sem *semt
+ _g_ := getg()
+
+ // Call libc's malloc rather than malloc. This will
+ // allocate space on the C heap. We can't call malloc
+ // here because it could cause a deadlock.
+ _g_.m.libcall.fn = uintptr(libc_malloc)
+ _g_.m.libcall.n = 1
+ memclr(unsafe.Pointer(&_g_.m.scratch), uintptr(len(_g_.m.scratch.v)))
+ _g_.m.scratch.v[0] = unsafe.Sizeof(*sem)
+ _g_.m.libcall.args = uintptr(unsafe.Pointer(&_g_.m.scratch))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&_g_.m.libcall))
+ sem = (*semt)(unsafe.Pointer(_g_.m.libcall.r1))
+ if sem_init(sem, 0, 0) != 0 {
+ gothrow("sem_init")
+ }
+ return uintptr(unsafe.Pointer(sem))
+}
+
+//go:nosplit
+func semasleep(ns int64) int32 {
+ _m_ := getg().m
+ if ns >= 0 {
+ _m_.ts.tv_sec = ns / 1000000000
+ _m_.ts.tv_nsec = ns % 1000000000
+
+ _m_.libcall.fn = uintptr(unsafe.Pointer(libc_sem_reltimedwait_np))
+ _m_.libcall.n = 2
+ memclr(unsafe.Pointer(&_m_.scratch), uintptr(len(_m_.scratch.v)))
+ _m_.scratch.v[0] = _m_.waitsema
+ _m_.scratch.v[1] = uintptr(unsafe.Pointer(&_m_.ts))
+ _m_.libcall.args = uintptr(unsafe.Pointer(&_m_.scratch))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&_m_.libcall))
+ if *_m_.perrno != 0 {
+ if *_m_.perrno == _ETIMEDOUT || *_m_.perrno == _EAGAIN || *_m_.perrno == _EINTR {
+ return -1
+ }
+ gothrow("sem_reltimedwait_np")
+ }
+ return 0
+ }
+ for {
+ _m_.libcall.fn = uintptr(unsafe.Pointer(libc_sem_wait))
+ _m_.libcall.n = 1
+ memclr(unsafe.Pointer(&_m_.scratch), uintptr(len(_m_.scratch.v)))
+ _m_.scratch.v[0] = _m_.waitsema
+ _m_.libcall.args = uintptr(unsafe.Pointer(&_m_.scratch))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&_m_.libcall))
+ if _m_.libcall.r1 == 0 {
+ break
+ }
+ if *_m_.perrno == _EINTR {
+ continue
+ }
+ gothrow("sem_wait")
+ }
+ return 0
+}
+
+//go:nosplit
+func semawakeup(mp *m) {
+ if sem_post((*semt)(unsafe.Pointer(mp.waitsema))) != 0 {
+ gothrow("sem_post")
+ }
+}
+
+//go:nosplit
+func close(fd int32) int32 {
+ return int32(sysvicall1(libc_close, uintptr(fd)))
+}
+
+//go:nosplit
+func exit(r int32) {
+ sysvicall1(libc_exit, uintptr(r))
+}
+
+//go:nosplit
+func getcontext(context *ucontext) /* int32 */ {
+ sysvicall1(libc_getcontext, uintptr(unsafe.Pointer(context)))
+}
+
+//go:nosplit
+func madvise(addr unsafe.Pointer, n uintptr, flags int32) {
+ sysvicall3(libc_madvise, uintptr(addr), uintptr(n), uintptr(flags))
+}
+
+//go:nosplit
+func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer {
+ return unsafe.Pointer(sysvicall6(libc_mmap, uintptr(addr), uintptr(n), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(off)))
+}
+
+//go:nosplit
+func munmap(addr unsafe.Pointer, n uintptr) {
+ sysvicall2(libc_munmap, uintptr(addr), uintptr(n))
+}
+
+func nanotime1()
+
+//go:nosplit
+func nanotime() int64 {
+ return int64(sysvicall0(libcFunc(funcPC(nanotime1))))
+}
+
+//go:nosplit
+func open(path *byte, mode, perm int32) int32 {
+ return int32(sysvicall3(libc_open, uintptr(unsafe.Pointer(path)), uintptr(mode), uintptr(perm)))
+}
+
+func pthread_attr_destroy(attr *pthreadattr) int32 {
+ return int32(sysvicall1(libc_pthread_attr_destroy, uintptr(unsafe.Pointer(attr))))
+}
+
+func pthread_attr_getstack(attr *pthreadattr, addr unsafe.Pointer, size *uint64) int32 {
+ return int32(sysvicall3(libc_pthread_attr_getstack, uintptr(unsafe.Pointer(attr)), uintptr(addr), uintptr(unsafe.Pointer(size))))
+}
+
+func pthread_attr_init(attr *pthreadattr) int32 {
+ return int32(sysvicall1(libc_pthread_attr_init, uintptr(unsafe.Pointer(attr))))
+}
+
+func pthread_attr_setdetachstate(attr *pthreadattr, state int32) int32 {
+ return int32(sysvicall2(libc_pthread_attr_setdetachstate, uintptr(unsafe.Pointer(attr)), uintptr(state)))
+}
+
+func pthread_attr_setstack(attr *pthreadattr, addr uintptr, size uint64) int32 {
+ return int32(sysvicall3(libc_pthread_attr_setstack, uintptr(unsafe.Pointer(attr)), uintptr(addr), uintptr(size)))
+}
+
+func pthread_create(thread *pthread, attr *pthreadattr, fn uintptr, arg unsafe.Pointer) int32 {
+ return int32(sysvicall4(libc_pthread_create, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(fn), uintptr(arg)))
+}
+
+func raise(sig int32) /* int32 */ {
+ sysvicall1(libc_raise, uintptr(sig))
+}
+
+//go:nosplit
+func read(fd int32, buf unsafe.Pointer, nbyte int32) int32 {
+ return int32(sysvicall3(libc_read, uintptr(fd), uintptr(buf), uintptr(nbyte)))
+}
+
+//go:nosplit
+func sem_init(sem *semt, pshared int32, value uint32) int32 {
+ return int32(sysvicall3(libc_sem_init, uintptr(unsafe.Pointer(sem)), uintptr(pshared), uintptr(value)))
+}
+
+//go:nosplit
+func sem_post(sem *semt) int32 {
+ return int32(sysvicall1(libc_sem_post, uintptr(unsafe.Pointer(sem))))
+}
+
+//go:nosplit
+func sem_reltimedwait_np(sem *semt, timeout *timespec) int32 {
+ return int32(sysvicall2(libc_sem_reltimedwait_np, uintptr(unsafe.Pointer(sem)), uintptr(unsafe.Pointer(timeout))))
+}
+
+//go:nosplit
+func sem_wait(sem *semt) int32 {
+ return int32(sysvicall1(libc_sem_wait, uintptr(unsafe.Pointer(sem))))
+}
+
+func setitimer(which int32, value *itimerval, ovalue *itimerval) /* int32 */ {
+ sysvicall3(libc_setitimer, uintptr(which), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(ovalue)))
+}
+
+func sigaction(sig int32, act *sigactiont, oact *sigactiont) /* int32 */ {
+ sysvicall3(libc_sigaction, uintptr(sig), uintptr(unsafe.Pointer(act)), uintptr(unsafe.Pointer(oact)))
+}
+
+func sigaltstack(ss *sigaltstackt, oss *sigaltstackt) /* int32 */ {
+ sysvicall2(libc_sigaltstack, uintptr(unsafe.Pointer(ss)), uintptr(unsafe.Pointer(oss)))
+}
+
+func sigprocmask(how int32, set *sigset, oset *sigset) /* int32 */ {
+ sysvicall3(libc_sigprocmask, uintptr(how), uintptr(unsafe.Pointer(set)), uintptr(unsafe.Pointer(oset)))
+}
+
+func sysconf(name int32) int64 {
+ return int64(sysvicall1(libc_sysconf, uintptr(name)))
+}
+
+func usleep1(uint32)
+
+//go:nosplit
+func usleep(µs uint32) {
+ usleep1(µs)
+}
+
+//go:nosplit
+func write(fd uintptr, buf unsafe.Pointer, nbyte int32) int32 {
+ return int32(sysvicall3(libc_write, uintptr(fd), uintptr(buf), uintptr(nbyte)))
+}
+
+func osyield1()
+
+//go:nosplit
+func osyield() {
+ _g_ := getg()
+
+ // Check the validity of m because we might be called in cgo callback
+ // path early enough where there isn't a m available yet.
+ if _g_ != nil && _g_.m != nil {
+ sysvicall0(libc_sched_yield)
+ return
+ }
+ osyield1()
+}
diff --git a/src/runtime/os_darwin.c b/src/runtime/os_darwin.c
deleted file mode 100644
index b866863d0..000000000
--- a/src/runtime/os_darwin.c
+++ /dev/null
@@ -1,570 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "signal_unix.h"
-#include "stack.h"
-#include "textflag.h"
-
-extern SigTab runtime·sigtab[];
-
-static Sigset sigset_none;
-static Sigset sigset_all = ~(Sigset)0;
-
-static void
-unimplemented(int8 *name)
-{
- runtime·prints(name);
- runtime·prints(" not implemented\n");
- *(int32*)1231 = 1231;
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·semawakeup(M *mp)
-{
- runtime·mach_semrelease(mp->waitsema);
-}
-
-static void
-semacreate(void)
-{
- g->m->scalararg[0] = runtime·mach_semcreate();
-}
-
-#pragma textflag NOSPLIT
-uintptr
-runtime·semacreate(void)
-{
- uintptr x;
- void (*fn)(void);
-
- fn = semacreate;
- runtime·onM(&fn);
- x = g->m->scalararg[0];
- g->m->scalararg[0] = 0;
- return x;
-}
-
-// BSD interface for threading.
-void
-runtime·osinit(void)
-{
- // bsdthread_register delayed until end of goenvs so that we
- // can look at the environment first.
-
- // Use sysctl to fetch hw.ncpu.
- uint32 mib[2];
- uint32 out;
- int32 ret;
- uintptr nout;
-
- mib[0] = 6;
- mib[1] = 3;
- nout = sizeof out;
- out = 0;
- ret = runtime·sysctl(mib, 2, (byte*)&out, &nout, nil, 0);
- if(ret >= 0)
- runtime·ncpu = out;
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·get_random_data(byte **rnd, int32 *rnd_len)
-{
- #pragma dataflag NOPTR
- static byte urandom_data[HashRandomBytes];
- int32 fd;
- fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
- if(runtime·read(fd, urandom_data, HashRandomBytes) == HashRandomBytes) {
- *rnd = urandom_data;
- *rnd_len = HashRandomBytes;
- } else {
- *rnd = nil;
- *rnd_len = 0;
- }
- runtime·close(fd);
-}
-
-void
-runtime·goenvs(void)
-{
- runtime·goenvs_unix();
-
- // Register our thread-creation callback (see sys_darwin_{amd64,386}.s)
- // but only if we're not using cgo. If we are using cgo we need
- // to let the C pthread library install its own thread-creation callback.
- if(!runtime·iscgo) {
- if(runtime·bsdthread_register() != 0) {
- if(runtime·getenv("DYLD_INSERT_LIBRARIES"))
- runtime·throw("runtime: bsdthread_register error (unset DYLD_INSERT_LIBRARIES)");
- runtime·throw("runtime: bsdthread_register error");
- }
- }
-
-}
-
-void
-runtime·newosproc(M *mp, void *stk)
-{
- int32 errno;
- Sigset oset;
-
- mp->tls[0] = mp->id; // so 386 asm can find it
- if(0){
- runtime·printf("newosproc stk=%p m=%p g=%p id=%d/%d ostk=%p\n",
- stk, mp, mp->g0, mp->id, (int32)mp->tls[0], &mp);
- }
-
- runtime·sigprocmask(SIG_SETMASK, &sigset_all, &oset);
- errno = runtime·bsdthread_create(stk, mp, mp->g0, runtime·mstart);
- runtime·sigprocmask(SIG_SETMASK, &oset, nil);
-
- if(errno < 0) {
- runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), -errno);
- runtime·throw("runtime.newosproc");
- }
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
-void
-runtime·mpreinit(M *mp)
-{
- mp->gsignal = runtime·malg(32*1024); // OS X wants >=8K, Linux >=2K
- runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
-
- mp->gsignal->m = mp;
- runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the new thread, can not allocate memory.
-void
-runtime·minit(void)
-{
- // Initialize signal handling.
- runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
-
- runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
-}
-
-// Called from dropm to undo the effect of an minit.
-void
-runtime·unminit(void)
-{
- runtime·signalstack(nil, 0);
-}
-
-// Mach IPC, to get at semaphores
-// Definitions are in /usr/include/mach on a Mac.
-
-static void
-macherror(int32 r, int8 *fn)
-{
- runtime·prints("mach error ");
- runtime·prints(fn);
- runtime·prints(": ");
- runtime·printint(r);
- runtime·prints("\n");
- runtime·throw("mach error");
-}
-
-enum
-{
- DebugMach = 0
-};
-
-static MachNDR zerondr;
-
-#define MACH_MSGH_BITS(a, b) ((a) | ((b)<<8))
-
-static int32
-mach_msg(MachHeader *h,
- int32 op,
- uint32 send_size,
- uint32 rcv_size,
- uint32 rcv_name,
- uint32 timeout,
- uint32 notify)
-{
- // TODO: Loop on interrupt.
- return runtime·mach_msg_trap(h, op, send_size, rcv_size, rcv_name, timeout, notify);
-}
-
-// Mach RPC (MIG)
-
-enum
-{
- MinMachMsg = 48,
- Reply = 100,
-};
-
-#pragma pack on
-typedef struct CodeMsg CodeMsg;
-struct CodeMsg
-{
- MachHeader h;
- MachNDR NDR;
- int32 code;
-};
-#pragma pack off
-
-static int32
-machcall(MachHeader *h, int32 maxsize, int32 rxsize)
-{
- uint32 *p;
- int32 i, ret, id;
- uint32 port;
- CodeMsg *c;
-
- if((port = g->m->machport) == 0){
- port = runtime·mach_reply_port();
- g->m->machport = port;
- }
-
- h->msgh_bits |= MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE);
- h->msgh_local_port = port;
- h->msgh_reserved = 0;
- id = h->msgh_id;
-
- if(DebugMach){
- p = (uint32*)h;
- runtime·prints("send:\t");
- for(i=0; i<h->msgh_size/sizeof(p[0]); i++){
- runtime·prints(" ");
- runtime·printpointer((void*)p[i]);
- if(i%8 == 7)
- runtime·prints("\n\t");
- }
- if(i%8)
- runtime·prints("\n");
- }
-
- ret = mach_msg(h, MACH_SEND_MSG|MACH_RCV_MSG,
- h->msgh_size, maxsize, port, 0, 0);
- if(ret != 0){
- if(DebugMach){
- runtime·prints("mach_msg error ");
- runtime·printint(ret);
- runtime·prints("\n");
- }
- return ret;
- }
-
- if(DebugMach){
- p = (uint32*)h;
- runtime·prints("recv:\t");
- for(i=0; i<h->msgh_size/sizeof(p[0]); i++){
- runtime·prints(" ");
- runtime·printpointer((void*)p[i]);
- if(i%8 == 7)
- runtime·prints("\n\t");
- }
- if(i%8)
- runtime·prints("\n");
- }
-
- if(h->msgh_id != id+Reply){
- if(DebugMach){
- runtime·prints("mach_msg reply id mismatch ");
- runtime·printint(h->msgh_id);
- runtime·prints(" != ");
- runtime·printint(id+Reply);
- runtime·prints("\n");
- }
- return -303; // MIG_REPLY_MISMATCH
- }
-
- // Look for a response giving the return value.
- // Any call can send this back with an error,
- // and some calls only have return values so they
- // send it back on success too. I don't quite see how
- // you know it's one of these and not the full response
- // format, so just look if the message is right.
- c = (CodeMsg*)h;
- if(h->msgh_size == sizeof(CodeMsg)
- && !(h->msgh_bits & MACH_MSGH_BITS_COMPLEX)){
- if(DebugMach){
- runtime·prints("mig result ");
- runtime·printint(c->code);
- runtime·prints("\n");
- }
- return c->code;
- }
-
- if(h->msgh_size != rxsize){
- if(DebugMach){
- runtime·prints("mach_msg reply size mismatch ");
- runtime·printint(h->msgh_size);
- runtime·prints(" != ");
- runtime·printint(rxsize);
- runtime·prints("\n");
- }
- return -307; // MIG_ARRAY_TOO_LARGE
- }
-
- return 0;
-}
-
-
-// Semaphores!
-
-enum
-{
- Tmach_semcreate = 3418,
- Rmach_semcreate = Tmach_semcreate + Reply,
-
- Tmach_semdestroy = 3419,
- Rmach_semdestroy = Tmach_semdestroy + Reply,
-
- // Mach calls that get interrupted by Unix signals
- // return this error code. We retry them.
- KERN_ABORTED = 14,
- KERN_OPERATION_TIMED_OUT = 49,
-};
-
-typedef struct Tmach_semcreateMsg Tmach_semcreateMsg;
-typedef struct Rmach_semcreateMsg Rmach_semcreateMsg;
-typedef struct Tmach_semdestroyMsg Tmach_semdestroyMsg;
-// Rmach_semdestroyMsg = CodeMsg
-
-#pragma pack on
-struct Tmach_semcreateMsg
-{
- MachHeader h;
- MachNDR ndr;
- int32 policy;
- int32 value;
-};
-
-struct Rmach_semcreateMsg
-{
- MachHeader h;
- MachBody body;
- MachPort semaphore;
-};
-
-struct Tmach_semdestroyMsg
-{
- MachHeader h;
- MachBody body;
- MachPort semaphore;
-};
-#pragma pack off
-
-uint32
-runtime·mach_semcreate(void)
-{
- union {
- Tmach_semcreateMsg tx;
- Rmach_semcreateMsg rx;
- uint8 pad[MinMachMsg];
- } m;
- int32 r;
-
- m.tx.h.msgh_bits = 0;
- m.tx.h.msgh_size = sizeof(m.tx);
- m.tx.h.msgh_remote_port = runtime·mach_task_self();
- m.tx.h.msgh_id = Tmach_semcreate;
- m.tx.ndr = zerondr;
-
- m.tx.policy = 0; // 0 = SYNC_POLICY_FIFO
- m.tx.value = 0;
-
- while((r = machcall(&m.tx.h, sizeof m, sizeof(m.rx))) != 0){
- if(r == KERN_ABORTED) // interrupted
- continue;
- macherror(r, "semaphore_create");
- }
- if(m.rx.body.msgh_descriptor_count != 1)
- unimplemented("mach_semcreate desc count");
- return m.rx.semaphore.name;
-}
-
-void
-runtime·mach_semdestroy(uint32 sem)
-{
- union {
- Tmach_semdestroyMsg tx;
- uint8 pad[MinMachMsg];
- } m;
- int32 r;
-
- m.tx.h.msgh_bits = MACH_MSGH_BITS_COMPLEX;
- m.tx.h.msgh_size = sizeof(m.tx);
- m.tx.h.msgh_remote_port = runtime·mach_task_self();
- m.tx.h.msgh_id = Tmach_semdestroy;
- m.tx.body.msgh_descriptor_count = 1;
- m.tx.semaphore.name = sem;
- m.tx.semaphore.disposition = MACH_MSG_TYPE_MOVE_SEND;
- m.tx.semaphore.type = 0;
-
- while((r = machcall(&m.tx.h, sizeof m, 0)) != 0){
- if(r == KERN_ABORTED) // interrupted
- continue;
- macherror(r, "semaphore_destroy");
- }
-}
-
-// The other calls have simple system call traps in sys_darwin_{amd64,386}.s
-int32 runtime·mach_semaphore_wait(uint32 sema);
-int32 runtime·mach_semaphore_timedwait(uint32 sema, uint32 sec, uint32 nsec);
-int32 runtime·mach_semaphore_signal(uint32 sema);
-int32 runtime·mach_semaphore_signal_all(uint32 sema);
-
-static void
-semasleep(void)
-{
- int32 r, secs, nsecs;
- int64 ns;
-
- ns = (int64)(uint32)g->m->scalararg[0] | (int64)(uint32)g->m->scalararg[1]<<32;
- g->m->scalararg[0] = 0;
- g->m->scalararg[1] = 0;
-
- if(ns >= 0) {
- secs = runtime·timediv(ns, 1000000000, &nsecs);
- r = runtime·mach_semaphore_timedwait(g->m->waitsema, secs, nsecs);
- if(r == KERN_ABORTED || r == KERN_OPERATION_TIMED_OUT) {
- g->m->scalararg[0] = -1;
- return;
- }
- if(r != 0)
- macherror(r, "semaphore_wait");
- g->m->scalararg[0] = 0;
- return;
- }
- while((r = runtime·mach_semaphore_wait(g->m->waitsema)) != 0) {
- if(r == KERN_ABORTED) // interrupted
- continue;
- macherror(r, "semaphore_wait");
- }
- g->m->scalararg[0] = 0;
- return;
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·semasleep(int64 ns)
-{
- int32 r;
- void (*fn)(void);
-
- g->m->scalararg[0] = (uint32)ns;
- g->m->scalararg[1] = (uint32)(ns>>32);
- fn = semasleep;
- runtime·onM(&fn);
- r = g->m->scalararg[0];
- g->m->scalararg[0] = 0;
- return r;
-}
-
-static int32 mach_semrelease_errno;
-
-static void
-mach_semrelease_fail(void)
-{
- macherror(mach_semrelease_errno, "semaphore_signal");
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·mach_semrelease(uint32 sem)
-{
- int32 r;
- void (*fn)(void);
-
- while((r = runtime·mach_semaphore_signal(sem)) != 0) {
- if(r == KERN_ABORTED) // interrupted
- continue;
-
- // mach_semrelease must be completely nosplit,
- // because it is called from Go code.
- // If we're going to die, start that process on the m stack
- // to avoid a Go stack split.
- // Only do that if we're actually running on the g stack.
- // We might be on the gsignal stack, and if so, onM will abort.
- // We use the global variable instead of scalararg because
- // we might be on the gsignal stack, having interrupted a
- // normal call to onM. It doesn't quite matter, since the
- // program is about to die, but better to be clean.
- mach_semrelease_errno = r;
- fn = mach_semrelease_fail;
- if(g == g->m->curg)
- runtime·onM(&fn);
- else
- fn();
- }
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·osyield(void)
-{
- runtime·usleep(1);
-}
-
-uintptr
-runtime·memlimit(void)
-{
- // NOTE(rsc): Could use getrlimit here,
- // like on FreeBSD or Linux, but Darwin doesn't enforce
- // ulimit -v, so it's unclear why we'd try to stay within
- // the limit.
- return 0;
-}
-
-void
-runtime·setsig(int32 i, GoSighandler *fn, bool restart)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
- if(restart)
- sa.sa_flags |= SA_RESTART;
- sa.sa_mask = ~(uintptr)0;
- sa.sa_tramp = (void*)runtime·sigtramp; // runtime·sigtramp's job is to call into real handler
- *(uintptr*)sa.__sigaction_u = (uintptr)fn;
- runtime·sigaction(i, &sa, nil);
-}
-
-GoSighandler*
-runtime·getsig(int32 i)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- runtime·sigaction(i, nil, &sa);
- return *(void**)sa.__sigaction_u;
-}
-
-void
-runtime·signalstack(byte *p, int32 n)
-{
- StackT st;
-
- st.ss_sp = (void*)p;
- st.ss_size = n;
- st.ss_flags = 0;
- if(p == nil)
- st.ss_flags = SS_DISABLE;
- runtime·sigaltstack(&st, nil);
-}
-
-void
-runtime·unblocksignals(void)
-{
- runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
-}
-
-#pragma textflag NOSPLIT
-int8*
-runtime·signame(int32 sig)
-{
- return runtime·sigtab[sig].name;
-}
diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go
index 4327ced91..d8296e056 100644
--- a/src/runtime/os_darwin.go
+++ b/src/runtime/os_darwin.go
@@ -6,19 +6,31 @@ package runtime
import "unsafe"
-func bsdthread_create(stk, mm, gg, fn unsafe.Pointer) int32
+func bsdthread_create(stk unsafe.Pointer, mm *m, gg *g, fn uintptr) int32
func bsdthread_register() int32
+
+//go:noescape
func mach_msg_trap(h unsafe.Pointer, op int32, send_size, rcv_size, rcv_name, timeout, notify uint32) int32
+
func mach_reply_port() uint32
func mach_task_self() uint32
func mach_thread_self() uint32
+
+//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func sigprocmask(sig int32, new, old unsafe.Pointer)
-func sigaction(mode uint32, new, old unsafe.Pointer)
-func sigaltstack(new, old unsafe.Pointer)
+
+//go:noescape
+func sigprocmask(sig uint32, new, old *uint32)
+
+//go:noescape
+func sigaction(mode uint32, new, old *sigactiont)
+
+//go:noescape
+func sigaltstack(new, old *stackt)
+
func sigtramp()
-func setitimer(mode int32, new, old unsafe.Pointer)
-func mach_semaphore_wait(sema uint32) int32
-func mach_semaphore_timedwait(sema, sec, nsec uint32) int32
-func mach_semaphore_signal(sema uint32) int32
-func mach_semaphore_signal_all(sema uint32) int32
+
+//go:noescape
+func setitimer(mode int32, new, old *itimerval)
+
+func raise(int32)
diff --git a/src/runtime/os_darwin.h b/src/runtime/os_darwin.h
deleted file mode 100644
index e8bb45daf..000000000
--- a/src/runtime/os_darwin.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-typedef byte* kevent_udata;
-
-int32 runtime·bsdthread_create(void*, M*, G*, void(*)(void));
-int32 runtime·bsdthread_register(void);
-int32 runtime·mach_msg_trap(MachHeader*, int32, uint32, uint32, uint32, uint32, uint32);
-uint32 runtime·mach_reply_port(void);
-int32 runtime·mach_semacquire(uint32, int64);
-uint32 runtime·mach_semcreate(void);
-void runtime·mach_semdestroy(uint32);
-void runtime·mach_semrelease(uint32);
-void runtime·mach_semreset(uint32);
-uint32 runtime·mach_task_self(void);
-uint32 runtime·mach_task_self(void);
-uint32 runtime·mach_thread_self(void);
-uint32 runtime·mach_thread_self(void);
-int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
-
-typedef uint32 Sigset;
-void runtime·sigprocmask(int32, Sigset*, Sigset*);
-void runtime·unblocksignals(void);
-
-struct SigactionT;
-void runtime·sigaction(uintptr, struct SigactionT*, struct SigactionT*);
-
-struct StackT;
-void runtime·sigaltstack(struct StackT*, struct StackT*);
-void runtime·sigtramp(void);
-void runtime·sigpanic(void);
-void runtime·setitimer(int32, Itimerval*, Itimerval*);
-
-
-enum {
- NSIG = 32,
- SI_USER = 0, /* empirically true, but not what headers say */
- SIG_BLOCK = 1,
- SIG_UNBLOCK = 2,
- SIG_SETMASK = 3,
- SS_DISABLE = 4,
-};
diff --git a/src/runtime/os_dragonfly.c b/src/runtime/os_dragonfly.c
deleted file mode 100644
index 051192ad3..000000000
--- a/src/runtime/os_dragonfly.c
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "signal_unix.h"
-#include "stack.h"
-#include "textflag.h"
-
-extern SigTab runtime·sigtab[];
-extern int32 runtime·sys_umtx_sleep(uint32*, int32, int32);
-extern int32 runtime·sys_umtx_wakeup(uint32*, int32);
-
-// From DragonFly's <sys/sysctl.h>
-#define CTL_HW 6
-#define HW_NCPU 3
-
-static Sigset sigset_none;
-static Sigset sigset_all = { ~(uint32)0, ~(uint32)0, ~(uint32)0, ~(uint32)0, };
-
-static int32
-getncpu(void)
-{
- uint32 mib[2];
- uint32 out;
- int32 ret;
- uintptr nout;
-
- // Fetch hw.ncpu via sysctl.
- mib[0] = CTL_HW;
- mib[1] = HW_NCPU;
- nout = sizeof out;
- out = 0;
- ret = runtime·sysctl(mib, 2, (byte*)&out, &nout, nil, 0);
- if(ret >= 0)
- return out;
- else
- return 1;
-}
-
-static void futexsleep(void);
-
-#pragma textflag NOSPLIT
-void
-runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
-{
- void (*fn)(void);
-
- g->m->ptrarg[0] = addr;
- g->m->scalararg[0] = val;
- g->m->ptrarg[1] = &ns;
-
- fn = futexsleep;
- runtime·onM(&fn);
-}
-
-static void
-futexsleep(void)
-{
- uint32 *addr;
- uint32 val;
- int64 ns;
- int32 timeout = 0;
- int32 ret;
-
- addr = g->m->ptrarg[0];
- val = g->m->scalararg[0];
- ns = *(int64*)g->m->ptrarg[1];
- g->m->ptrarg[0] = nil;
- g->m->scalararg[0] = 0;
- g->m->ptrarg[1] = nil;
-
- if(ns >= 0) {
- // The timeout is specified in microseconds - ensure that we
- // do not end up dividing to zero, which would put us to sleep
- // indefinitely...
- timeout = runtime·timediv(ns, 1000, nil);
- if(timeout == 0)
- timeout = 1;
- }
-
- // sys_umtx_sleep will return EWOULDBLOCK (EAGAIN) when the timeout
- // expires or EBUSY if the mutex value does not match.
- ret = runtime·sys_umtx_sleep(addr, val, timeout);
- if(ret >= 0 || ret == -EINTR || ret == -EAGAIN || ret == -EBUSY)
- return;
-
- runtime·prints("umtx_wait addr=");
- runtime·printpointer(addr);
- runtime·prints(" val=");
- runtime·printint(val);
- runtime·prints(" ret=");
- runtime·printint(ret);
- runtime·prints("\n");
- *(int32*)0x1005 = 0x1005;
-}
-
-static void badfutexwakeup(void);
-
-#pragma textflag NOSPLIT
-void
-runtime·futexwakeup(uint32 *addr, uint32 cnt)
-{
- int32 ret;
- void (*fn)(void);
-
- ret = runtime·sys_umtx_wakeup(addr, cnt);
- if(ret >= 0)
- return;
-
- g->m->ptrarg[0] = addr;
- g->m->scalararg[0] = ret;
- fn = badfutexwakeup;
- if(g == g->m->gsignal)
- fn();
- else
- runtime·onM(&fn);
- *(int32*)0x1006 = 0x1006;
-}
-
-static void
-badfutexwakeup(void)
-{
- void *addr;
- int32 ret;
-
- addr = g->m->ptrarg[0];
- ret = g->m->scalararg[0];
- runtime·printf("umtx_wake addr=%p ret=%d\n", addr, ret);
-}
-
-void runtime·lwp_start(void*);
-
-void
-runtime·newosproc(M *mp, void *stk)
-{
- Lwpparams params;
- Sigset oset;
-
- if(0){
- runtime·printf("newosproc stk=%p m=%p g=%p id=%d/%d ostk=%p\n",
- stk, mp, mp->g0, mp->id, (int32)mp->tls[0], &mp);
- }
-
- runtime·sigprocmask(&sigset_all, &oset);
- runtime·memclr((byte*)&params, sizeof params);
-
- params.func = runtime·lwp_start;
- params.arg = (byte*)mp;
- params.stack = (byte*)stk;
- params.tid1 = (int32*)&mp->procid;
- params.tid2 = nil;
-
- mp->tls[0] = mp->id; // so 386 asm can find it
-
- runtime·lwp_create(&params);
- runtime·sigprocmask(&oset, nil);
-}
-
-void
-runtime·osinit(void)
-{
- runtime·ncpu = getncpu();
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·get_random_data(byte **rnd, int32 *rnd_len)
-{
- #pragma dataflag NOPTR
- static byte urandom_data[HashRandomBytes];
- int32 fd;
- fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
- if(runtime·read(fd, urandom_data, HashRandomBytes) == HashRandomBytes) {
- *rnd = urandom_data;
- *rnd_len = HashRandomBytes;
- } else {
- *rnd = nil;
- *rnd_len = 0;
- }
- runtime·close(fd);
-}
-
-void
-runtime·goenvs(void)
-{
- runtime·goenvs_unix();
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
-void
-runtime·mpreinit(M *mp)
-{
- mp->gsignal = runtime·malg(32*1024);
- runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
-
- mp->gsignal->m = mp;
- runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the new thread, can not allocate memory.
-void
-runtime·minit(void)
-{
- // Initialize signal handling
- runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
- runtime·sigprocmask(&sigset_none, nil);
-}
-
-// Called from dropm to undo the effect of an minit.
-void
-runtime·unminit(void)
-{
- runtime·signalstack(nil, 0);
-}
-
-uintptr
-runtime·memlimit(void)
-{
- Rlimit rl;
- extern byte runtime·text[], runtime·end[];
- uintptr used;
-
- if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
- return 0;
- if(rl.rlim_cur >= 0x7fffffff)
- return 0;
-
- // Estimate our VM footprint excluding the heap.
- // Not an exact science: use size of binary plus
- // some room for thread stacks.
- used = runtime·end - runtime·text + (64<<20);
- if(used >= rl.rlim_cur)
- return 0;
-
- // If there's not at least 16 MB left, we're probably
- // not going to be able to do much. Treat as no limit.
- rl.rlim_cur -= used;
- if(rl.rlim_cur < (16<<20))
- return 0;
-
- return rl.rlim_cur - used;
-}
-
-extern void runtime·sigtramp(void);
-
-typedef struct sigaction {
- union {
- void (*__sa_handler)(int32);
- void (*__sa_sigaction)(int32, Siginfo*, void *);
- } __sigaction_u; /* signal handler */
- int32 sa_flags; /* see signal options below */
- Sigset sa_mask; /* signal mask to apply */
-} SigactionT;
-
-void
-runtime·setsig(int32 i, GoSighandler *fn, bool restart)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
- if(restart)
- sa.sa_flags |= SA_RESTART;
- sa.sa_mask.__bits[0] = ~(uint32)0;
- sa.sa_mask.__bits[1] = ~(uint32)0;
- sa.sa_mask.__bits[2] = ~(uint32)0;
- sa.sa_mask.__bits[3] = ~(uint32)0;
- if(fn == runtime·sighandler)
- fn = (void*)runtime·sigtramp;
- sa.__sigaction_u.__sa_sigaction = (void*)fn;
- runtime·sigaction(i, &sa, nil);
-}
-
-GoSighandler*
-runtime·getsig(int32 i)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- runtime·sigaction(i, nil, &sa);
- if((void*)sa.__sigaction_u.__sa_sigaction == runtime·sigtramp)
- return runtime·sighandler;
- return (void*)sa.__sigaction_u.__sa_sigaction;
-}
-
-void
-runtime·signalstack(byte *p, int32 n)
-{
- StackT st;
-
- st.ss_sp = (void*)p;
- st.ss_size = n;
- st.ss_flags = 0;
- if(p == nil)
- st.ss_flags = SS_DISABLE;
- runtime·sigaltstack(&st, nil);
-}
-
-void
-runtime·unblocksignals(void)
-{
- runtime·sigprocmask(&sigset_none, nil);
-}
-
-#pragma textflag NOSPLIT
-int8*
-runtime·signame(int32 sig)
-{
- return runtime·sigtab[sig].name;
-}
diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go
index cdaa06986..0e00f874f 100644
--- a/src/runtime/os_dragonfly.go
+++ b/src/runtime/os_dragonfly.go
@@ -6,15 +6,35 @@ package runtime
import "unsafe"
-func lwp_create(param unsafe.Pointer) int32
-func sigaltstack(new, old unsafe.Pointer)
-func sigaction(sig int32, new, old unsafe.Pointer)
-func sigprocmask(new, old unsafe.Pointer)
-func setitimer(mode int32, new, old unsafe.Pointer)
+//go:noescape
+func lwp_create(param *lwpparams) int32
+
+//go:noescape
+func sigaltstack(new, old *sigaltstackt)
+
+//go:noescape
+func sigaction(sig int32, new, old *sigactiont)
+
+//go:noescape
+func sigprocmask(new, old *sigset)
+
+//go:noescape
+func setitimer(mode int32, new, old *itimerval)
+
+//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
+
+//go:noescape
func getrlimit(kind int32, limit unsafe.Pointer) int32
+
func raise(sig int32)
-func sys_umtx_sleep(addr unsafe.Pointer, val, timeout int32) int32
-func sys_umtx_wakeup(addr unsafe.Pointer, val int32) int32
+
+//go:noescape
+func sys_umtx_sleep(addr *uint32, val, timeout int32) int32
+
+//go:noescape
+func sys_umtx_wakeup(addr *uint32, val int32) int32
+
+func osyield()
const stackSystem = 0
diff --git a/src/runtime/os_dragonfly.h b/src/runtime/os_dragonfly.h
deleted file mode 100644
index 389736a32..000000000
--- a/src/runtime/os_dragonfly.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-
-typedef byte* kevent_udata;
-
-int32 runtime·lwp_create(Lwpparams*);
-void runtime·sigpanic(void);
-void runtime·sigaltstack(SigaltstackT*, SigaltstackT*);
-struct sigaction;
-void runtime·sigaction(int32, struct sigaction*, struct sigaction*);
-void runtime·sigprocmask(Sigset *, Sigset *);
-void runtime·unblocksignals(void);
-void runtime·setitimer(int32, Itimerval*, Itimerval*);
-int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
-
-enum {
- NSIG = 33,
- SI_USER = 0x10001,
- SS_DISABLE = 4,
- RLIMIT_AS = 10,
-};
-
-typedef struct Rlimit Rlimit;
-struct Rlimit {
- int64 rlim_cur;
- int64 rlim_max;
-};
-int32 runtime·getrlimit(int32, Rlimit*);
diff --git a/src/runtime/os_freebsd.c b/src/runtime/os_freebsd.c
deleted file mode 100644
index 1c126547a..000000000
--- a/src/runtime/os_freebsd.c
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "signal_unix.h"
-#include "stack.h"
-#include "textflag.h"
-
-extern SigTab runtime·sigtab[];
-extern int32 runtime·sys_umtx_op(uint32*, int32, uint32, void*, void*);
-
-// From FreeBSD's <sys/sysctl.h>
-#define CTL_HW 6
-#define HW_NCPU 3
-
-static Sigset sigset_none;
-static Sigset sigset_all = { ~(uint32)0, ~(uint32)0, ~(uint32)0, ~(uint32)0, };
-
-static int32
-getncpu(void)
-{
- uint32 mib[2];
- uint32 out;
- int32 ret;
- uintptr nout;
-
- // Fetch hw.ncpu via sysctl.
- mib[0] = CTL_HW;
- mib[1] = HW_NCPU;
- nout = sizeof out;
- out = 0;
- ret = runtime·sysctl(mib, 2, (byte*)&out, &nout, nil, 0);
- if(ret >= 0)
- return out;
- else
- return 1;
-}
-
-// FreeBSD's umtx_op syscall is effectively the same as Linux's futex, and
-// thus the code is largely similar. See linux/thread.c and lock_futex.c for comments.
-
-static void futexsleep(void);
-
-#pragma textflag NOSPLIT
-void
-runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
-{
- void (*fn)(void);
-
- g->m->ptrarg[0] = addr;
- g->m->scalararg[0] = val;
- g->m->ptrarg[1] = &ns;
-
- fn = futexsleep;
- runtime·onM(&fn);
-}
-
-static void
-futexsleep(void)
-{
- uint32 *addr;
- uint32 val;
- int64 ns;
- int32 ret;
- Timespec ts;
-
- addr = g->m->ptrarg[0];
- val = g->m->scalararg[0];
- ns = *(int64*)g->m->ptrarg[1];
- g->m->ptrarg[0] = nil;
- g->m->scalararg[0] = 0;
- g->m->ptrarg[1] = nil;
-
- if(ns < 0) {
- ret = runtime·sys_umtx_op(addr, UMTX_OP_WAIT_UINT_PRIVATE, val, nil, nil);
- if(ret >= 0 || ret == -EINTR)
- return;
- goto fail;
- }
- // NOTE: tv_nsec is int64 on amd64, so this assumes a little-endian system.
- ts.tv_nsec = 0;
- ts.tv_sec = runtime·timediv(ns, 1000000000, (int32*)&ts.tv_nsec);
- ret = runtime·sys_umtx_op(addr, UMTX_OP_WAIT_UINT_PRIVATE, val, nil, &ts);
- if(ret >= 0 || ret == -EINTR)
- return;
-
-fail:
- runtime·prints("umtx_wait addr=");
- runtime·printpointer(addr);
- runtime·prints(" val=");
- runtime·printint(val);
- runtime·prints(" ret=");
- runtime·printint(ret);
- runtime·prints("\n");
- *(int32*)0x1005 = 0x1005;
-}
-
-static void badfutexwakeup(void);
-
-#pragma textflag NOSPLIT
-void
-runtime·futexwakeup(uint32 *addr, uint32 cnt)
-{
- int32 ret;
- void (*fn)(void);
-
- ret = runtime·sys_umtx_op(addr, UMTX_OP_WAKE_PRIVATE, cnt, nil, nil);
- if(ret >= 0)
- return;
-
- g->m->ptrarg[0] = addr;
- g->m->scalararg[0] = ret;
- fn = badfutexwakeup;
- if(g == g->m->gsignal)
- fn();
- else
- runtime·onM(&fn);
- *(int32*)0x1006 = 0x1006;
-}
-
-static void
-badfutexwakeup(void)
-{
- void *addr;
- int32 ret;
-
- addr = g->m->ptrarg[0];
- ret = g->m->scalararg[0];
- runtime·printf("umtx_wake addr=%p ret=%d\n", addr, ret);
-}
-
-void runtime·thr_start(void*);
-
-void
-runtime·newosproc(M *mp, void *stk)
-{
- ThrParam param;
- Sigset oset;
-
- if(0){
- runtime·printf("newosproc stk=%p m=%p g=%p id=%d/%d ostk=%p\n",
- stk, mp, mp->g0, mp->id, (int32)mp->tls[0], &mp);
- }
-
- runtime·sigprocmask(&sigset_all, &oset);
- runtime·memclr((byte*)&param, sizeof param);
-
- param.start_func = runtime·thr_start;
- param.arg = (byte*)mp;
-
- // NOTE(rsc): This code is confused. stackbase is the top of the stack
- // and is equal to stk. However, it's working, so I'm not changing it.
- param.stack_base = (void*)mp->g0->stack.hi;
- param.stack_size = (byte*)stk - (byte*)mp->g0->stack.hi;
-
- param.child_tid = (void*)&mp->procid;
- param.parent_tid = nil;
- param.tls_base = (void*)&mp->tls[0];
- param.tls_size = sizeof mp->tls;
-
- mp->tls[0] = mp->id; // so 386 asm can find it
-
- runtime·thr_new(&param, sizeof param);
- runtime·sigprocmask(&oset, nil);
-}
-
-void
-runtime·osinit(void)
-{
- runtime·ncpu = getncpu();
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·get_random_data(byte **rnd, int32 *rnd_len)
-{
- #pragma dataflag NOPTR
- static byte urandom_data[HashRandomBytes];
- int32 fd;
- fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
- if(runtime·read(fd, urandom_data, HashRandomBytes) == HashRandomBytes) {
- *rnd = urandom_data;
- *rnd_len = HashRandomBytes;
- } else {
- *rnd = nil;
- *rnd_len = 0;
- }
- runtime·close(fd);
-}
-
-void
-runtime·goenvs(void)
-{
- runtime·goenvs_unix();
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
-void
-runtime·mpreinit(M *mp)
-{
- mp->gsignal = runtime·malg(32*1024);
- runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
-
- mp->gsignal->m = mp;
- runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the new thread, can not allocate memory.
-void
-runtime·minit(void)
-{
- // Initialize signal handling
- runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
- runtime·sigprocmask(&sigset_none, nil);
-}
-
-// Called from dropm to undo the effect of an minit.
-void
-runtime·unminit(void)
-{
- runtime·signalstack(nil, 0);
-}
-
-uintptr
-runtime·memlimit(void)
-{
- Rlimit rl;
- extern byte runtime·text[], runtime·end[];
- uintptr used;
-
- if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
- return 0;
- if(rl.rlim_cur >= 0x7fffffff)
- return 0;
-
- // Estimate our VM footprint excluding the heap.
- // Not an exact science: use size of binary plus
- // some room for thread stacks.
- used = runtime·end - runtime·text + (64<<20);
- if(used >= rl.rlim_cur)
- return 0;
-
- // If there's not at least 16 MB left, we're probably
- // not going to be able to do much. Treat as no limit.
- rl.rlim_cur -= used;
- if(rl.rlim_cur < (16<<20))
- return 0;
-
- return rl.rlim_cur - used;
-}
-
-extern void runtime·sigtramp(void);
-
-typedef struct sigaction {
- union {
- void (*__sa_handler)(int32);
- void (*__sa_sigaction)(int32, Siginfo*, void *);
- } __sigaction_u; /* signal handler */
- int32 sa_flags; /* see signal options below */
- Sigset sa_mask; /* signal mask to apply */
-} SigactionT;
-
-void
-runtime·setsig(int32 i, GoSighandler *fn, bool restart)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
- if(restart)
- sa.sa_flags |= SA_RESTART;
- sa.sa_mask.__bits[0] = ~(uint32)0;
- sa.sa_mask.__bits[1] = ~(uint32)0;
- sa.sa_mask.__bits[2] = ~(uint32)0;
- sa.sa_mask.__bits[3] = ~(uint32)0;
- if(fn == runtime·sighandler)
- fn = (void*)runtime·sigtramp;
- sa.__sigaction_u.__sa_sigaction = (void*)fn;
- runtime·sigaction(i, &sa, nil);
-}
-
-GoSighandler*
-runtime·getsig(int32 i)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- runtime·sigaction(i, nil, &sa);
- if((void*)sa.__sigaction_u.__sa_sigaction == runtime·sigtramp)
- return runtime·sighandler;
- return (void*)sa.__sigaction_u.__sa_sigaction;
-}
-
-void
-runtime·signalstack(byte *p, int32 n)
-{
- StackT st;
-
- st.ss_sp = (void*)p;
- st.ss_size = n;
- st.ss_flags = 0;
- if(p == nil)
- st.ss_flags = SS_DISABLE;
- runtime·sigaltstack(&st, nil);
-}
-
-void
-runtime·unblocksignals(void)
-{
- runtime·sigprocmask(&sigset_none, nil);
-}
-
-#pragma textflag NOSPLIT
-int8*
-runtime·signame(int32 sig)
-{
- return runtime·sigtab[sig].name;
-}
diff --git a/src/runtime/os_freebsd.go b/src/runtime/os_freebsd.go
index 59708049c..998fbca0f 100644
--- a/src/runtime/os_freebsd.go
+++ b/src/runtime/os_freebsd.go
@@ -6,12 +6,29 @@ package runtime
import "unsafe"
-func thr_new(param unsafe.Pointer, size int32)
-func sigaltstack(new, old unsafe.Pointer)
-func sigaction(sig int32, new, old unsafe.Pointer)
-func sigprocmask(new, old unsafe.Pointer)
-func setitimer(mode int32, new, old unsafe.Pointer)
+//go:noescape
+func thr_new(param *thrparam, size int32)
+
+//go:noescape
+func sigaltstack(new, old *stackt)
+
+//go:noescape
+func sigaction(sig int32, new, old *sigactiont)
+
+//go:noescape
+func sigprocmask(new, old *sigset)
+
+//go:noescape
+func setitimer(mode int32, new, old *itimerval)
+
+//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
+
+//go:noescape
func getrlimit(kind int32, limit unsafe.Pointer) int32
func raise(sig int32)
-func sys_umtx_op(addr unsafe.Pointer, mode int32, val uint32, ptr2, ts unsafe.Pointer) int32
+
+//go:noescape
+func sys_umtx_op(addr *uint32, mode int32, val uint32, ptr2, ts *timespec) int32
+
+func osyield()
diff --git a/src/runtime/os_freebsd.h b/src/runtime/os_freebsd.h
deleted file mode 100644
index b86bb393c..000000000
--- a/src/runtime/os_freebsd.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-typedef byte* kevent_udata;
-
-int32 runtime·thr_new(ThrParam*, int32);
-void runtime·sigpanic(void);
-void runtime·sigaltstack(SigaltstackT*, SigaltstackT*);
-struct sigaction;
-void runtime·sigaction(int32, struct sigaction*, struct sigaction*);
-void runtime·sigprocmask(Sigset *, Sigset *);
-void runtime·unblocksignals(void);
-void runtime·setitimer(int32, Itimerval*, Itimerval*);
-int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
-
-enum {
- SS_DISABLE = 4,
- NSIG = 33,
- SI_USER = 0x10001,
- RLIMIT_AS = 10,
-};
-
-typedef struct Rlimit Rlimit;
-struct Rlimit {
- int64 rlim_cur;
- int64 rlim_max;
-};
-int32 runtime·getrlimit(int32, Rlimit*);
diff --git a/src/runtime/os_freebsd_arm.c b/src/runtime/os_freebsd_arm.go
index 2f2d7767f..e049cbf9a 100644
--- a/src/runtime/os_freebsd_arm.c
+++ b/src/runtime/os_freebsd_arm.go
@@ -2,23 +2,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "textflag.h"
+package runtime
-void
-runtime·checkgoarm(void)
-{
+func checkgoarm() {
// TODO(minux)
}
-#pragma textflag NOSPLIT
-int64
-runtime·cputicks(void)
-{
+//go:nosplit
+func cputicks() int64 {
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand1.
- return runtime·nanotime();
+ return nanotime()
}
diff --git a/src/runtime/os_linux.c b/src/runtime/os_linux.c
deleted file mode 100644
index cc23774e3..000000000
--- a/src/runtime/os_linux.c
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "signal_unix.h"
-#include "stack.h"
-#include "textflag.h"
-
-extern SigTab runtime·sigtab[];
-
-static Sigset sigset_none;
-static Sigset sigset_all = { ~(uint32)0, ~(uint32)0 };
-
-// Linux futex.
-//
-// futexsleep(uint32 *addr, uint32 val)
-// futexwakeup(uint32 *addr)
-//
-// Futexsleep atomically checks if *addr == val and if so, sleeps on addr.
-// Futexwakeup wakes up threads sleeping on addr.
-// Futexsleep is allowed to wake up spuriously.
-
-enum
-{
- FUTEX_WAIT = 0,
- FUTEX_WAKE = 1,
-};
-
-// Atomically,
-// if(*addr == val) sleep
-// Might be woken up spuriously; that's allowed.
-// Don't sleep longer than ns; ns < 0 means forever.
-#pragma textflag NOSPLIT
-void
-runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
-{
- Timespec ts;
-
- // Some Linux kernels have a bug where futex of
- // FUTEX_WAIT returns an internal error code
- // as an errno. Libpthread ignores the return value
- // here, and so can we: as it says a few lines up,
- // spurious wakeups are allowed.
-
- if(ns < 0) {
- runtime·futex(addr, FUTEX_WAIT, val, nil, nil, 0);
- return;
- }
-
- // It's difficult to live within the no-split stack limits here.
- // On ARM and 386, a 64-bit divide invokes a general software routine
- // that needs more stack than we can afford. So we use timediv instead.
- // But on real 64-bit systems, where words are larger but the stack limit
- // is not, even timediv is too heavy, and we really need to use just an
- // ordinary machine instruction.
- // Sorry for the #ifdef.
- // For what it's worth, the #ifdef eliminated an implicit little-endian assumption.
-#ifdef _64BIT
- ts.tv_sec = ns / 1000000000LL;
- ts.tv_nsec = ns % 1000000000LL;
-#else
- ts.tv_nsec = 0;
- ts.tv_sec = runtime·timediv(ns, 1000000000LL, (int32*)&ts.tv_nsec);
-#endif
- runtime·futex(addr, FUTEX_WAIT, val, &ts, nil, 0);
-}
-
-static void badfutexwakeup(void);
-
-// If any procs are sleeping on addr, wake up at most cnt.
-#pragma textflag NOSPLIT
-void
-runtime·futexwakeup(uint32 *addr, uint32 cnt)
-{
- int64 ret;
- void (*fn)(void);
-
- ret = runtime·futex(addr, FUTEX_WAKE, cnt, nil, nil, 0);
- if(ret >= 0)
- return;
-
- // I don't know that futex wakeup can return
- // EAGAIN or EINTR, but if it does, it would be
- // safe to loop and call futex again.
- g->m->ptrarg[0] = addr;
- g->m->scalararg[0] = (int32)ret; // truncated but fine
- fn = badfutexwakeup;
- if(g == g->m->gsignal)
- fn();
- else
- runtime·onM(&fn);
- *(int32*)0x1006 = 0x1006;
-}
-
-static void
-badfutexwakeup(void)
-{
- void *addr;
- int64 ret;
-
- addr = g->m->ptrarg[0];
- ret = (int32)g->m->scalararg[0];
- runtime·printf("futexwakeup addr=%p returned %D\n", addr, ret);
-}
-
-extern runtime·sched_getaffinity(uintptr pid, uintptr len, uintptr *buf);
-static int32
-getproccount(void)
-{
- uintptr buf[16], t;
- int32 r, n, i;
-
- r = runtime·sched_getaffinity(0, sizeof(buf), buf);
- if(r <= 0)
- return 1;
- n = 0;
- for(i = 0; i < r/sizeof(buf[0]); i++) {
- t = buf[i];
- while(t != 0) {
- n += t&1;
- t >>= 1;
- }
- }
- if(n < 1)
- n = 1;
- return n;
-}
-
-// Clone, the Linux rfork.
-enum
-{
- CLONE_VM = 0x100,
- CLONE_FS = 0x200,
- CLONE_FILES = 0x400,
- CLONE_SIGHAND = 0x800,
- CLONE_PTRACE = 0x2000,
- CLONE_VFORK = 0x4000,
- CLONE_PARENT = 0x8000,
- CLONE_THREAD = 0x10000,
- CLONE_NEWNS = 0x20000,
- CLONE_SYSVSEM = 0x40000,
- CLONE_SETTLS = 0x80000,
- CLONE_PARENT_SETTID = 0x100000,
- CLONE_CHILD_CLEARTID = 0x200000,
- CLONE_UNTRACED = 0x800000,
- CLONE_CHILD_SETTID = 0x1000000,
- CLONE_STOPPED = 0x2000000,
- CLONE_NEWUTS = 0x4000000,
- CLONE_NEWIPC = 0x8000000,
-};
-
-void
-runtime·newosproc(M *mp, void *stk)
-{
- int32 ret;
- int32 flags;
- Sigset oset;
-
- /*
- * note: strace gets confused if we use CLONE_PTRACE here.
- */
- flags = CLONE_VM /* share memory */
- | CLONE_FS /* share cwd, etc */
- | CLONE_FILES /* share fd table */
- | CLONE_SIGHAND /* share sig handler table */
- | CLONE_THREAD /* revisit - okay for now */
- ;
-
- mp->tls[0] = mp->id; // so 386 asm can find it
- if(0){
- runtime·printf("newosproc stk=%p m=%p g=%p clone=%p id=%d/%d ostk=%p\n",
- stk, mp, mp->g0, runtime·clone, mp->id, (int32)mp->tls[0], &mp);
- }
-
- // Disable signals during clone, so that the new thread starts
- // with signals disabled. It will enable them in minit.
- runtime·rtsigprocmask(SIG_SETMASK, &sigset_all, &oset, sizeof oset);
- ret = runtime·clone(flags, stk, mp, mp->g0, runtime·mstart);
- runtime·rtsigprocmask(SIG_SETMASK, &oset, nil, sizeof oset);
-
- if(ret < 0) {
- runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), -ret);
- runtime·throw("runtime.newosproc");
- }
-}
-
-void
-runtime·osinit(void)
-{
- runtime·ncpu = getproccount();
-}
-
-// Random bytes initialized at startup. These come
-// from the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.c).
-byte* runtime·startup_random_data;
-uint32 runtime·startup_random_data_len;
-
-#pragma textflag NOSPLIT
-void
-runtime·get_random_data(byte **rnd, int32 *rnd_len)
-{
- if(runtime·startup_random_data != nil) {
- *rnd = runtime·startup_random_data;
- *rnd_len = runtime·startup_random_data_len;
- } else {
- #pragma dataflag NOPTR
- static byte urandom_data[HashRandomBytes];
- int32 fd;
- fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
- if(runtime·read(fd, urandom_data, HashRandomBytes) == HashRandomBytes) {
- *rnd = urandom_data;
- *rnd_len = HashRandomBytes;
- } else {
- *rnd = nil;
- *rnd_len = 0;
- }
- runtime·close(fd);
- }
-}
-
-void
-runtime·goenvs(void)
-{
- runtime·goenvs_unix();
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
-void
-runtime·mpreinit(M *mp)
-{
- mp->gsignal = runtime·malg(32*1024); // OS X wants >=8K, Linux >=2K
- runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
-
- mp->gsignal->m = mp;
- runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the new thread, can not allocate memory.
-void
-runtime·minit(void)
-{
- // Initialize signal handling.
- runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
- runtime·rtsigprocmask(SIG_SETMASK, &sigset_none, nil, sizeof(Sigset));
-}
-
-// Called from dropm to undo the effect of an minit.
-void
-runtime·unminit(void)
-{
- runtime·signalstack(nil, 0);
-}
-
-uintptr
-runtime·memlimit(void)
-{
- Rlimit rl;
- extern byte runtime·text[], runtime·end[];
- uintptr used;
-
- if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
- return 0;
- if(rl.rlim_cur >= 0x7fffffff)
- return 0;
-
- // Estimate our VM footprint excluding the heap.
- // Not an exact science: use size of binary plus
- // some room for thread stacks.
- used = runtime·end - runtime·text + (64<<20);
- if(used >= rl.rlim_cur)
- return 0;
-
- // If there's not at least 16 MB left, we're probably
- // not going to be able to do much. Treat as no limit.
- rl.rlim_cur -= used;
- if(rl.rlim_cur < (16<<20))
- return 0;
-
- return rl.rlim_cur - used;
-}
-
-#ifdef GOARCH_386
-#define sa_handler k_sa_handler
-#endif
-
-/*
- * This assembler routine takes the args from registers, puts them on the stack,
- * and calls sighandler().
- */
-extern void runtime·sigtramp(void);
-extern void runtime·sigreturn(void); // calls rt_sigreturn, only used with SA_RESTORER
-
-void
-runtime·setsig(int32 i, GoSighandler *fn, bool restart)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- sa.sa_flags = SA_ONSTACK | SA_SIGINFO | SA_RESTORER;
- if(restart)
- sa.sa_flags |= SA_RESTART;
- sa.sa_mask = ~0ULL;
- // Although Linux manpage says "sa_restorer element is obsolete and
- // should not be used". x86_64 kernel requires it. Only use it on
- // x86.
-#ifdef GOARCH_386
- sa.sa_restorer = (void*)runtime·sigreturn;
-#endif
-#ifdef GOARCH_amd64
- sa.sa_restorer = (void*)runtime·sigreturn;
-#endif
- if(fn == runtime·sighandler)
- fn = (void*)runtime·sigtramp;
- sa.sa_handler = fn;
- // Qemu rejects rt_sigaction of SIGRTMAX (64).
- if(runtime·rt_sigaction(i, &sa, nil, sizeof(sa.sa_mask)) != 0 && i != 64)
- runtime·throw("rt_sigaction failure");
-}
-
-GoSighandler*
-runtime·getsig(int32 i)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- if(runtime·rt_sigaction(i, nil, &sa, sizeof(sa.sa_mask)) != 0)
- runtime·throw("rt_sigaction read failure");
- if((void*)sa.sa_handler == runtime·sigtramp)
- return runtime·sighandler;
- return (void*)sa.sa_handler;
-}
-
-void
-runtime·signalstack(byte *p, int32 n)
-{
- SigaltstackT st;
-
- st.ss_sp = p;
- st.ss_size = n;
- st.ss_flags = 0;
- if(p == nil)
- st.ss_flags = SS_DISABLE;
- runtime·sigaltstack(&st, nil);
-}
-
-void
-runtime·unblocksignals(void)
-{
- runtime·rtsigprocmask(SIG_SETMASK, &sigset_none, nil, sizeof sigset_none);
-}
-
-#pragma textflag NOSPLIT
-int8*
-runtime·signame(int32 sig)
-{
- return runtime·sigtab[sig].name;
-}
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index 41123ad57..113219aab 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -6,12 +6,28 @@ package runtime
import "unsafe"
+//go:noescape
func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32
+
+//go:noescape
func clone(flags int32, stk, mm, gg, fn unsafe.Pointer) int32
-func rt_sigaction(sig uintptr, new, old unsafe.Pointer, size uintptr) int32
-func sigaltstack(new, old unsafe.Pointer)
-func setitimer(mode int32, new, old unsafe.Pointer)
-func rtsigprocmask(sig int32, new, old unsafe.Pointer, size int32)
+
+//go:noescape
+func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
+
+//go:noescape
+func sigaltstack(new, old *sigaltstackt)
+
+//go:noescape
+func setitimer(mode int32, new, old *itimerval)
+
+//go:noescape
+func rtsigprocmask(sig uint32, new, old *sigset, size int32)
+
+//go:noescape
func getrlimit(kind int32, limit unsafe.Pointer) int32
-func raise(sig int32)
+func raise(sig uint32)
+
+//go:noescape
func sched_getaffinity(pid, len uintptr, buf *uintptr) int32
+func osyield()
diff --git a/src/runtime/os_linux.h b/src/runtime/os_linux.h
deleted file mode 100644
index 75606d615..000000000
--- a/src/runtime/os_linux.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-
-// Linux-specific system calls
-int32 runtime·futex(uint32*, int32, uint32, Timespec*, uint32*, uint32);
-int32 runtime·clone(int32, void*, M*, G*, void(*)(void));
-
-struct SigactionT;
-int32 runtime·rt_sigaction(uintptr, struct SigactionT*, void*, uintptr);
-
-void runtime·sigaltstack(SigaltstackT*, SigaltstackT*);
-void runtime·sigpanic(void);
-void runtime·setitimer(int32, Itimerval*, Itimerval*);
-
-enum {
- SS_DISABLE = 2,
- NSIG = 65,
- SI_USER = 0,
- SIG_SETMASK = 2,
- RLIMIT_AS = 9,
-};
-
-// It's hard to tease out exactly how big a Sigset is, but
-// rt_sigprocmask crashes if we get it wrong, so if binaries
-// are running, this is right.
-typedef struct Sigset Sigset;
-struct Sigset
-{
- uint32 mask[2];
-};
-void runtime·rtsigprocmask(int32, Sigset*, Sigset*, int32);
-void runtime·unblocksignals(void);
-
-typedef struct Rlimit Rlimit;
-struct Rlimit {
- uintptr rlim_cur;
- uintptr rlim_max;
-};
-int32 runtime·getrlimit(int32, Rlimit*);
diff --git a/src/runtime/os_linux_386.c b/src/runtime/os_linux_386.c
deleted file mode 100644
index dc89d04e2..000000000
--- a/src/runtime/os_linux_386.c
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "textflag.h"
-
-#define AT_NULL 0
-#define AT_RANDOM 25
-#define AT_SYSINFO 32
-extern uint32 runtime·_vdso;
-
-#pragma textflag NOSPLIT
-void
-runtime·linux_setup_vdso(int32 argc, byte **argv)
-{
- byte **envp;
- uint32 *auxv;
-
- // skip envp to get to ELF auxiliary vector.
- for(envp = &argv[argc+1]; *envp != nil; envp++)
- ;
- envp++;
-
- for(auxv=(uint32*)envp; auxv[0] != AT_NULL; auxv += 2) {
- if(auxv[0] == AT_SYSINFO) {
- runtime·_vdso = auxv[1];
- continue;
- }
- if(auxv[0] == AT_RANDOM) {
- runtime·startup_random_data = (byte*)auxv[1];
- runtime·startup_random_data_len = 16;
- continue;
- }
- }
-}
diff --git a/src/runtime/os_linux_386.go b/src/runtime/os_linux_386.go
new file mode 100644
index 000000000..adcd5a1c4
--- /dev/null
+++ b/src/runtime/os_linux_386.go
@@ -0,0 +1,36 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+const (
+ _AT_NULL = 0
+ _AT_RANDOM = 25
+ _AT_SYSINFO = 32
+)
+
+var _vdso uint32
+
+func sysargs(argc int32, argv **byte) {
+ // skip over argv, envv to get to auxv
+ n := argc + 1
+ for argv_index(argv, n) != nil {
+ n++
+ }
+ n++
+ auxv := (*[1 << 28]uint32)(add(unsafe.Pointer(argv), uintptr(n)*ptrSize))
+
+ for i := 0; auxv[i] != _AT_NULL; i += 2 {
+ switch auxv[i] {
+ case _AT_SYSINFO:
+ _vdso = auxv[i+1]
+
+ case _AT_RANDOM:
+ startup_random_data = (*byte)(unsafe.Pointer(uintptr(auxv[i+1])))
+ startup_random_data_len = 16
+ }
+ }
+}
diff --git a/src/runtime/os_linux_arm.c b/src/runtime/os_linux_arm.c
deleted file mode 100644
index e3eda7c2d..000000000
--- a/src/runtime/os_linux_arm.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "textflag.h"
-
-#define AT_NULL 0
-#define AT_PLATFORM 15 // introduced in at least 2.6.11
-#define AT_HWCAP 16 // introduced in at least 2.6.11
-#define AT_RANDOM 25 // introduced in 2.6.29
-#define HWCAP_VFP (1 << 6) // introduced in at least 2.6.11
-#define HWCAP_VFPv3 (1 << 13) // introduced in 2.6.30
-static uint32 runtime·randomNumber;
-uint8 runtime·armArch = 6; // we default to ARMv6
-uint32 runtime·hwcap; // set by setup_auxv
-extern uint8 runtime·goarm; // set by 5l
-
-void
-runtime·checkgoarm(void)
-{
- if(runtime·goarm > 5 && !(runtime·hwcap & HWCAP_VFP)) {
- runtime·printf("runtime: this CPU has no floating point hardware, so it cannot run\n");
- runtime·printf("this GOARM=%d binary. Recompile using GOARM=5.\n", runtime·goarm);
- runtime·exit(1);
- }
- if(runtime·goarm > 6 && !(runtime·hwcap & HWCAP_VFPv3)) {
- runtime·printf("runtime: this CPU has no VFPv3 floating point hardware, so it cannot run\n");
- runtime·printf("this GOARM=%d binary. Recompile using GOARM=6.\n", runtime·goarm);
- runtime·exit(1);
- }
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·setup_auxv(int32 argc, byte **argv)
-{
- byte **envp;
- byte *rnd;
- uint32 *auxv;
- uint32 t;
-
- // skip envp to get to ELF auxiliary vector.
- for(envp = &argv[argc+1]; *envp != nil; envp++)
- ;
- envp++;
-
- for(auxv=(uint32*)envp; auxv[0] != AT_NULL; auxv += 2) {
- switch(auxv[0]) {
- case AT_RANDOM: // kernel provided 16-byte worth of random data
- if(auxv[1]) {
- rnd = (byte*)auxv[1];
- runtime·randomNumber = rnd[4] | rnd[5]<<8 | rnd[6]<<16 | rnd[7]<<24;
- }
- break;
- case AT_PLATFORM: // v5l, v6l, v7l
- if(auxv[1]) {
- t = *(uint8*)(auxv[1]+1);
- if(t >= '5' && t <= '7')
- runtime·armArch = t - '0';
- }
- break;
- case AT_HWCAP: // CPU capability bit flags
- runtime·hwcap = auxv[1];
- break;
- }
- }
-}
-
-#pragma textflag NOSPLIT
-int64
-runtime·cputicks(void)
-{
- // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
- // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
- // runtime·randomNumber provides better seeding of fastrand1.
- return runtime·nanotime() + runtime·randomNumber;
-}
diff --git a/src/runtime/os_linux_arm.go b/src/runtime/os_linux_arm.go
new file mode 100644
index 000000000..9b0ade614
--- /dev/null
+++ b/src/runtime/os_linux_arm.go
@@ -0,0 +1,75 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+const (
+ _AT_NULL = 0
+ _AT_PLATFORM = 15 // introduced in at least 2.6.11
+ _AT_HWCAP = 16 // introduced in at least 2.6.11
+ _AT_RANDOM = 25 // introduced in 2.6.29
+
+ _HWCAP_VFP = 1 << 6 // introduced in at least 2.6.11
+ _HWCAP_VFPv3 = 1 << 13 // introduced in 2.6.30
+)
+
+var randomNumber uint32
+var armArch uint8 = 6 // we default to ARMv6
+var hwcap uint32 // set by setup_auxv
+var goarm uint8 // set by 5l
+
+func checkgoarm() {
+ if goarm > 5 && hwcap&_HWCAP_VFP == 0 {
+ print("runtime: this CPU has no floating point hardware, so it cannot run\n")
+ print("this GOARM=", goarm, " binary. Recompile using GOARM=5.\n")
+ exit(1)
+ }
+ if goarm > 6 && hwcap&_HWCAP_VFPv3 == 0 {
+ print("runtime: this CPU has no VFPv3 floating point hardware, so it cannot run\n")
+ print("this GOARM=", goarm, " binary. Recompile using GOARM=5.\n")
+ exit(1)
+ }
+}
+
+//go:nosplit
+func setup_auxv(argc int32, argv **byte) {
+ // skip over argv, envv to get to auxv
+ n := argc + 1
+ for argv_index(argv, n) != nil {
+ n++
+ }
+ n++
+ auxv := (*[1 << 28]uint32)(add(unsafe.Pointer(argv), uintptr(n)*ptrSize))
+
+ for i := 0; auxv[i] != _AT_NULL; i += 2 {
+ switch auxv[i] {
+ case _AT_RANDOM: // kernel provided 16-byte worth of random data
+ if auxv[i+1] != 0 {
+ randomNumber = *(*uint32)(unsafe.Pointer(uintptr(auxv[i+1])))
+ }
+
+ case _AT_PLATFORM: // v5l, v6l, v7l
+ t := *(*uint8)(unsafe.Pointer(uintptr(auxv[i+1] + 1)))
+ if '5' <= t && t <= '7' {
+ armArch = t - '0'
+ }
+
+ case _AT_HWCAP: // CPU capability bit flags
+ hwcap = auxv[i+1]
+ }
+ }
+}
+
+func cputicks() int64 {
+ // Currently cputicks() is used in blocking profiler and to seed fastrand1().
+ // nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
+ // randomNumber provides better seeding of fastrand1.
+ return nanotime() + int64(randomNumber)
+}
diff --git a/src/runtime/os_openbsd.c b/src/runtime/os_openbsd.c
deleted file mode 100644
index 960aaffff..000000000
--- a/src/runtime/os_openbsd.c
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "signal_unix.h"
-#include "stack.h"
-#include "textflag.h"
-
-enum
-{
- ESRCH = 3,
- EAGAIN = 35,
- EWOULDBLOCK = EAGAIN,
- ENOTSUP = 91,
-
- // From OpenBSD's sys/time.h
- CLOCK_REALTIME = 0,
- CLOCK_VIRTUAL = 1,
- CLOCK_PROF = 2,
- CLOCK_MONOTONIC = 3
-};
-
-extern SigTab runtime·sigtab[];
-
-static Sigset sigset_none;
-static Sigset sigset_all = ~(Sigset)0;
-
-extern int32 runtime·tfork(TforkT *param, uintptr psize, M *mp, G *gp, void (*fn)(void));
-extern int32 runtime·thrsleep(void *ident, int32 clock_id, void *tsp, void *lock, const int32 *abort);
-extern int32 runtime·thrwakeup(void *ident, int32 n);
-
-// From OpenBSD's <sys/sysctl.h>
-#define CTL_HW 6
-#define HW_NCPU 3
-
-static int32
-getncpu(void)
-{
- uint32 mib[2];
- uint32 out;
- int32 ret;
- uintptr nout;
-
- // Fetch hw.ncpu via sysctl.
- mib[0] = CTL_HW;
- mib[1] = HW_NCPU;
- nout = sizeof out;
- out = 0;
- ret = runtime·sysctl(mib, 2, (byte*)&out, &nout, nil, 0);
- if(ret >= 0)
- return out;
- else
- return 1;
-}
-
-#pragma textflag NOSPLIT
-uintptr
-runtime·semacreate(void)
-{
- return 1;
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·semasleep(int64 ns)
-{
- Timespec ts, *tsp = nil;
-
- // Compute sleep deadline.
- if(ns >= 0) {
- int32 nsec;
- ns += runtime·nanotime();
- ts.tv_sec = runtime·timediv(ns, 1000000000, &nsec);
- ts.tv_nsec = nsec; // tv_nsec is int64 on amd64
- tsp = &ts;
- }
-
- for(;;) {
- int32 ret;
-
- // spin-mutex lock
- while(runtime·xchg(&g->m->waitsemalock, 1))
- runtime·osyield();
-
- if(g->m->waitsemacount != 0) {
- // semaphore is available.
- g->m->waitsemacount--;
- // spin-mutex unlock
- runtime·atomicstore(&g->m->waitsemalock, 0);
- return 0; // semaphore acquired
- }
-
- // sleep until semaphore != 0 or timeout.
- // thrsleep unlocks m->waitsemalock.
- ret = runtime·thrsleep(&g->m->waitsemacount, CLOCK_MONOTONIC, tsp, &g->m->waitsemalock, (int32 *)&g->m->waitsemacount);
- if(ret == EWOULDBLOCK)
- return -1;
- }
-}
-
-static void badsemawakeup(void);
-
-#pragma textflag NOSPLIT
-void
-runtime·semawakeup(M *mp)
-{
- uint32 ret;
- void *oldptr;
- uint32 oldscalar;
- void (*fn)(void);
-
- // spin-mutex lock
- while(runtime·xchg(&mp->waitsemalock, 1))
- runtime·osyield();
- mp->waitsemacount++;
- ret = runtime·thrwakeup(&mp->waitsemacount, 1);
- if(ret != 0 && ret != ESRCH) {
- // semawakeup can be called on signal stack.
- // Save old ptrarg/scalararg so we can restore them.
- oldptr = g->m->ptrarg[0];
- oldscalar = g->m->scalararg[0];
- g->m->ptrarg[0] = mp;
- g->m->scalararg[0] = ret;
- fn = badsemawakeup;
- if(g == g->m->gsignal)
- fn();
- else
- runtime·onM(&fn);
- g->m->ptrarg[0] = oldptr;
- g->m->scalararg[0] = oldscalar;
- }
- // spin-mutex unlock
- runtime·atomicstore(&mp->waitsemalock, 0);
-}
-
-static void
-badsemawakeup(void)
-{
- M *mp;
- int32 ret;
-
- mp = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- ret = g->m->scalararg[0];
- g->m->scalararg[0] = 0;
-
- runtime·printf("thrwakeup addr=%p sem=%d ret=%d\n", &mp->waitsemacount, mp->waitsemacount, ret);
-}
-
-void
-runtime·newosproc(M *mp, void *stk)
-{
- TforkT param;
- Sigset oset;
- int32 ret;
-
- if(0) {
- runtime·printf(
- "newosproc stk=%p m=%p g=%p id=%d/%d ostk=%p\n",
- stk, mp, mp->g0, mp->id, (int32)mp->tls[0], &mp);
- }
-
- mp->tls[0] = mp->id; // so 386 asm can find it
-
- param.tf_tcb = (byte*)&mp->tls[0];
- param.tf_tid = (int32*)&mp->procid;
- param.tf_stack = stk;
-
- oset = runtime·sigprocmask(SIG_SETMASK, sigset_all);
- ret = runtime·tfork(&param, sizeof(param), mp, mp->g0, runtime·mstart);
- runtime·sigprocmask(SIG_SETMASK, oset);
-
- if(ret < 0) {
- runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount() - 1, -ret);
- if (ret == -ENOTSUP)
- runtime·printf("runtime: is kern.rthreads disabled?\n");
- runtime·throw("runtime.newosproc");
- }
-}
-
-void
-runtime·osinit(void)
-{
- runtime·ncpu = getncpu();
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·get_random_data(byte **rnd, int32 *rnd_len)
-{
- #pragma dataflag NOPTR
- static byte urandom_data[HashRandomBytes];
- int32 fd;
- fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
- if(runtime·read(fd, urandom_data, HashRandomBytes) == HashRandomBytes) {
- *rnd = urandom_data;
- *rnd_len = HashRandomBytes;
- } else {
- *rnd = nil;
- *rnd_len = 0;
- }
- runtime·close(fd);
-}
-
-void
-runtime·goenvs(void)
-{
- runtime·goenvs_unix();
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
-void
-runtime·mpreinit(M *mp)
-{
- mp->gsignal = runtime·malg(32*1024);
- runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
-
- mp->gsignal->m = mp;
- runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the new thread, can not allocate memory.
-void
-runtime·minit(void)
-{
- // Initialize signal handling
- runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
- runtime·sigprocmask(SIG_SETMASK, sigset_none);
-}
-
-// Called from dropm to undo the effect of an minit.
-void
-runtime·unminit(void)
-{
- runtime·signalstack(nil, 0);
-}
-
-uintptr
-runtime·memlimit(void)
-{
- return 0;
-}
-
-extern void runtime·sigtramp(void);
-
-typedef struct sigaction {
- union {
- void (*__sa_handler)(int32);
- void (*__sa_sigaction)(int32, Siginfo*, void *);
- } __sigaction_u; /* signal handler */
- uint32 sa_mask; /* signal mask to apply */
- int32 sa_flags; /* see signal options below */
-} SigactionT;
-
-void
-runtime·setsig(int32 i, GoSighandler *fn, bool restart)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
- if(restart)
- sa.sa_flags |= SA_RESTART;
- sa.sa_mask = ~0U;
- if(fn == runtime·sighandler)
- fn = (void*)runtime·sigtramp;
- sa.__sigaction_u.__sa_sigaction = (void*)fn;
- runtime·sigaction(i, &sa, nil);
-}
-
-GoSighandler*
-runtime·getsig(int32 i)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- runtime·sigaction(i, nil, &sa);
- if((void*)sa.__sigaction_u.__sa_sigaction == runtime·sigtramp)
- return runtime·sighandler;
- return (void*)sa.__sigaction_u.__sa_sigaction;
-}
-
-void
-runtime·signalstack(byte *p, int32 n)
-{
- StackT st;
-
- st.ss_sp = (void*)p;
- st.ss_size = n;
- st.ss_flags = 0;
- if(p == nil)
- st.ss_flags = SS_DISABLE;
- runtime·sigaltstack(&st, nil);
-}
-
-void
-runtime·unblocksignals(void)
-{
- runtime·sigprocmask(SIG_SETMASK, sigset_none);
-}
-
-#pragma textflag NOSPLIT
-int8*
-runtime·signame(int32 sig)
-{
- return runtime·sigtab[sig].name;
-}
diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go
index a000f963e..9e5adcd3d 100644
--- a/src/runtime/os_openbsd.go
+++ b/src/runtime/os_openbsd.go
@@ -4,14 +4,30 @@
package runtime
-import "unsafe"
+//go:noescape
+func setitimer(mode int32, new, old *itimerval)
-func setitimer(mode int32, new, old unsafe.Pointer)
-func sigaction(sig int32, new, old unsafe.Pointer)
-func sigaltstack(new, old unsafe.Pointer)
+//go:noescape
+func sigaction(sig int32, new, old *sigactiont)
+
+//go:noescape
+func sigaltstack(new, old *stackt)
+
+//go:noescape
func sigprocmask(mode int32, new uint32) uint32
+
+//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
+
func raise(sig int32)
-func tfork(param unsafe.Pointer, psize uintptr, mm, gg, fn unsafe.Pointer) int32
-func thrsleep(ident unsafe.Pointer, clock_id int32, tsp, lock, abort unsafe.Pointer) int32
-func thrwakeup(ident unsafe.Pointer, n int32) int32
+
+//go:noescape
+func tfork(param *tforkt, psize uintptr, mm *m, gg *g, fn uintptr) int32
+
+//go:noescape
+func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *int32) int32
+
+//go:noescape
+func thrwakeup(ident uintptr, n int32) int32
+
+func osyield()
diff --git a/src/runtime/os_openbsd.h b/src/runtime/os_openbsd.h
deleted file mode 100644
index 6ad98109e..000000000
--- a/src/runtime/os_openbsd.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-
-typedef byte* kevent_udata;
-
-struct sigaction;
-
-void runtime·sigpanic(void);
-
-void runtime·setitimer(int32, Itimerval*, Itimerval*);
-void runtime·sigaction(int32, struct sigaction*, struct sigaction*);
-void runtime·sigaltstack(SigaltstackT*, SigaltstackT*);
-Sigset runtime·sigprocmask(int32, Sigset);
-void runtime·unblocksignals(void);
-int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
-
-enum {
- SS_DISABLE = 4,
- SIG_BLOCK = 1,
- SIG_UNBLOCK = 2,
- SIG_SETMASK = 3,
- NSIG = 33,
- SI_USER = 0,
-};
diff --git a/src/runtime/os_solaris.c b/src/runtime/os_solaris.c
deleted file mode 100644
index bee91d8e6..000000000
--- a/src/runtime/os_solaris.c
+++ /dev/null
@@ -1,560 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "signal_unix.h"
-#include "stack.h"
-#include "textflag.h"
-
-#pragma dynexport runtime·end _end
-#pragma dynexport runtime·etext _etext
-#pragma dynexport runtime·edata _edata
-
-#pragma dynimport libc·___errno ___errno "libc.so"
-#pragma dynimport libc·clock_gettime clock_gettime "libc.so"
-#pragma dynimport libc·close close "libc.so"
-#pragma dynimport libc·exit exit "libc.so"
-#pragma dynimport libc·fstat fstat "libc.so"
-#pragma dynimport libc·getcontext getcontext "libc.so"
-#pragma dynimport libc·getrlimit getrlimit "libc.so"
-#pragma dynimport libc·malloc malloc "libc.so"
-#pragma dynimport libc·mmap mmap "libc.so"
-#pragma dynimport libc·munmap munmap "libc.so"
-#pragma dynimport libc·open open "libc.so"
-#pragma dynimport libc·pthread_attr_destroy pthread_attr_destroy "libc.so"
-#pragma dynimport libc·pthread_attr_getstack pthread_attr_getstack "libc.so"
-#pragma dynimport libc·pthread_attr_init pthread_attr_init "libc.so"
-#pragma dynimport libc·pthread_attr_setdetachstate pthread_attr_setdetachstate "libc.so"
-#pragma dynimport libc·pthread_attr_setstack pthread_attr_setstack "libc.so"
-#pragma dynimport libc·pthread_create pthread_create "libc.so"
-#pragma dynimport libc·raise raise "libc.so"
-#pragma dynimport libc·read read "libc.so"
-#pragma dynimport libc·select select "libc.so"
-#pragma dynimport libc·sched_yield sched_yield "libc.so"
-#pragma dynimport libc·sem_init sem_init "libc.so"
-#pragma dynimport libc·sem_post sem_post "libc.so"
-#pragma dynimport libc·sem_reltimedwait_np sem_reltimedwait_np "libc.so"
-#pragma dynimport libc·sem_wait sem_wait "libc.so"
-#pragma dynimport libc·setitimer setitimer "libc.so"
-#pragma dynimport libc·sigaction sigaction "libc.so"
-#pragma dynimport libc·sigaltstack sigaltstack "libc.so"
-#pragma dynimport libc·sigprocmask sigprocmask "libc.so"
-#pragma dynimport libc·sysconf sysconf "libc.so"
-#pragma dynimport libc·usleep usleep "libc.so"
-#pragma dynimport libc·write write "libc.so"
-
-extern uintptr libc·___errno;
-extern uintptr libc·clock_gettime;
-extern uintptr libc·close;
-extern uintptr libc·exit;
-extern uintptr libc·fstat;
-extern uintptr libc·getcontext;
-extern uintptr libc·getrlimit;
-extern uintptr libc·malloc;
-extern uintptr libc·mmap;
-extern uintptr libc·munmap;
-extern uintptr libc·open;
-extern uintptr libc·pthread_attr_destroy;
-extern uintptr libc·pthread_attr_getstack;
-extern uintptr libc·pthread_attr_init;
-extern uintptr libc·pthread_attr_setdetachstate;
-extern uintptr libc·pthread_attr_setstack;
-extern uintptr libc·pthread_create;
-extern uintptr libc·raise;
-extern uintptr libc·read;
-extern uintptr libc·sched_yield;
-extern uintptr libc·select;
-extern uintptr libc·sem_init;
-extern uintptr libc·sem_post;
-extern uintptr libc·sem_reltimedwait_np;
-extern uintptr libc·sem_wait;
-extern uintptr libc·setitimer;
-extern uintptr libc·sigaction;
-extern uintptr libc·sigaltstack;
-extern uintptr libc·sigprocmask;
-extern uintptr libc·sysconf;
-extern uintptr libc·usleep;
-extern uintptr libc·write;
-
-void runtime·getcontext(Ucontext *context);
-int32 runtime·pthread_attr_destroy(PthreadAttr* attr);
-int32 runtime·pthread_attr_init(PthreadAttr* attr);
-int32 runtime·pthread_attr_getstack(PthreadAttr* attr, void** addr, uint64* size);
-int32 runtime·pthread_attr_setdetachstate(PthreadAttr* attr, int32 state);
-int32 runtime·pthread_attr_setstack(PthreadAttr* attr, void* addr, uint64 size);
-int32 runtime·pthread_create(Pthread* thread, PthreadAttr* attr, void(*fn)(void), void *arg);
-uint32 runtime·tstart_sysvicall(M *newm);
-int32 runtime·sem_init(SemT* sem, int32 pshared, uint32 value);
-int32 runtime·sem_post(SemT* sem);
-int32 runtime·sem_reltimedwait_np(SemT* sem, Timespec* timeout);
-int32 runtime·sem_wait(SemT* sem);
-int64 runtime·sysconf(int32 name);
-
-extern SigTab runtime·sigtab[];
-static Sigset sigset_none;
-static Sigset sigset_all = { ~(uint32)0, ~(uint32)0, ~(uint32)0, ~(uint32)0, };
-
-static int32
-getncpu(void)
-{
- int32 n;
-
- n = (int32)runtime·sysconf(_SC_NPROCESSORS_ONLN);
- if(n < 1)
- return 1;
- return n;
-}
-
-void
-runtime·osinit(void)
-{
- runtime·ncpu = getncpu();
-}
-
-void
-runtime·newosproc(M *mp, void *stk)
-{
- PthreadAttr attr;
- Sigset oset;
- Pthread tid;
- int32 ret;
- uint64 size;
-
- USED(stk);
- if(runtime·pthread_attr_init(&attr) != 0)
- runtime·throw("pthread_attr_init");
- if(runtime·pthread_attr_setstack(&attr, 0, 0x200000) != 0)
- runtime·throw("pthread_attr_setstack");
- size = 0;
- if(runtime·pthread_attr_getstack(&attr, (void**)&mp->g0->stack.hi, &size) != 0)
- runtime·throw("pthread_attr_getstack");
- mp->g0->stack.lo = mp->g0->stack.hi - size;
- if(runtime·pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
- runtime·throw("pthread_attr_setdetachstate");
-
- // Disable signals during create, so that the new thread starts
- // with signals disabled. It will enable them in minit.
- runtime·sigprocmask(SIG_SETMASK, &sigset_all, &oset);
- ret = runtime·pthread_create(&tid, &attr, (void (*)(void))runtime·tstart_sysvicall, mp);
- runtime·sigprocmask(SIG_SETMASK, &oset, nil);
- if(ret != 0) {
- runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), ret);
- runtime·throw("runtime.newosproc");
- }
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·get_random_data(byte **rnd, int32 *rnd_len)
-{
- #pragma dataflag NOPTR
- static byte urandom_data[HashRandomBytes];
- int32 fd;
- fd = runtime·open("/dev/urandom", 0 /* O_RDONLY */, 0);
- if(runtime·read(fd, urandom_data, HashRandomBytes) == HashRandomBytes) {
- *rnd = urandom_data;
- *rnd_len = HashRandomBytes;
- } else {
- *rnd = nil;
- *rnd_len = 0;
- }
- runtime·close(fd);
-}
-
-void
-runtime·goenvs(void)
-{
- runtime·goenvs_unix();
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
-void
-runtime·mpreinit(M *mp)
-{
- mp->gsignal = runtime·malg(32*1024);
- runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
-
- mp->gsignal->m = mp;
- runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
-}
-
-// Called to initialize a new m (including the bootstrap m).
-// Called on the new thread, can not allocate memory.
-void
-runtime·minit(void)
-{
- runtime·asmcgocall(runtime·miniterrno, (void *)libc·___errno);
- // Initialize signal handling
- runtime·signalstack((byte*)g->m->gsignal->stack.lo, 32*1024);
- runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
-}
-
-// Called from dropm to undo the effect of an minit.
-void
-runtime·unminit(void)
-{
- runtime·signalstack(nil, 0);
-}
-
-uintptr
-runtime·memlimit(void)
-{
- Rlimit rl;
- extern byte runtime·text[], runtime·end[];
- uintptr used;
-
- if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
- return 0;
- if(rl.rlim_cur >= 0x7fffffff)
- return 0;
-
- // Estimate our VM footprint excluding the heap.
- // Not an exact science: use size of binary plus
- // some room for thread stacks.
- used = runtime·end - runtime·text + (64<<20);
- if(used >= rl.rlim_cur)
- return 0;
-
- // If there's not at least 16 MB left, we're probably
- // not going to be able to do much. Treat as no limit.
- rl.rlim_cur -= used;
- if(rl.rlim_cur < (16<<20))
- return 0;
-
- return rl.rlim_cur - used;
-}
-
-void
-runtime·setprof(bool on)
-{
- USED(on);
-}
-
-extern void runtime·sigtramp(void);
-
-void
-runtime·setsig(int32 i, GoSighandler *fn, bool restart)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- sa.sa_flags = SA_SIGINFO|SA_ONSTACK;
- if(restart)
- sa.sa_flags |= SA_RESTART;
- sa.sa_mask.__sigbits[0] = ~(uint32)0;
- sa.sa_mask.__sigbits[1] = ~(uint32)0;
- sa.sa_mask.__sigbits[2] = ~(uint32)0;
- sa.sa_mask.__sigbits[3] = ~(uint32)0;
- if(fn == runtime·sighandler)
- fn = (void*)runtime·sigtramp;
- *((void**)&sa._funcptr[0]) = (void*)fn;
- runtime·sigaction(i, &sa, nil);
-}
-
-GoSighandler*
-runtime·getsig(int32 i)
-{
- SigactionT sa;
-
- runtime·memclr((byte*)&sa, sizeof sa);
- runtime·sigaction(i, nil, &sa);
- if(*((void**)&sa._funcptr[0]) == runtime·sigtramp)
- return runtime·sighandler;
- return *((void**)&sa._funcptr[0]);
-}
-
-void
-runtime·signalstack(byte *p, int32 n)
-{
- StackT st;
-
- st.ss_sp = (void*)p;
- st.ss_size = n;
- st.ss_flags = 0;
- if(p == nil)
- st.ss_flags = SS_DISABLE;
- runtime·sigaltstack(&st, nil);
-}
-
-void
-runtime·unblocksignals(void)
-{
- runtime·sigprocmask(SIG_SETMASK, &sigset_none, nil);
-}
-
-#pragma textflag NOSPLIT
-uintptr
-runtime·semacreate(void)
-{
- SemT* sem;
-
- // Call libc's malloc rather than runtime·malloc. This will
- // allocate space on the C heap. We can't call runtime·malloc
- // here because it could cause a deadlock.
- g->m->libcall.fn = (uintptr)(void*)libc·malloc;
- g->m->libcall.n = 1;
- runtime·memclr((byte*)&g->m->scratch, sizeof(g->m->scratch));
- g->m->scratch.v[0] = (uintptr)sizeof(*sem);
- g->m->libcall.args = (uintptr)(uintptr*)&g->m->scratch;
- runtime·asmcgocall(runtime·asmsysvicall6, &g->m->libcall);
- sem = (void*)g->m->libcall.r1;
- if(runtime·sem_init(sem, 0, 0) != 0)
- runtime·throw("sem_init");
- return (uintptr)sem;
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·semasleep(int64 ns)
-{
- M *m;
-
- m = g->m;
- if(ns >= 0) {
- m->ts.tv_sec = ns / 1000000000LL;
- m->ts.tv_nsec = ns % 1000000000LL;
-
- m->libcall.fn = (uintptr)(void*)libc·sem_reltimedwait_np;
- m->libcall.n = 2;
- runtime·memclr((byte*)&m->scratch, sizeof(m->scratch));
- m->scratch.v[0] = m->waitsema;
- m->scratch.v[1] = (uintptr)&m->ts;
- m->libcall.args = (uintptr)(uintptr*)&m->scratch;
- runtime·asmcgocall(runtime·asmsysvicall6, &m->libcall);
- if(*m->perrno != 0) {
- if(*m->perrno == ETIMEDOUT || *m->perrno == EAGAIN || *m->perrno == EINTR)
- return -1;
- runtime·throw("sem_reltimedwait_np");
- }
- return 0;
- }
- for(;;) {
- m->libcall.fn = (uintptr)(void*)libc·sem_wait;
- m->libcall.n = 1;
- runtime·memclr((byte*)&m->scratch, sizeof(m->scratch));
- m->scratch.v[0] = m->waitsema;
- m->libcall.args = (uintptr)(uintptr*)&m->scratch;
- runtime·asmcgocall(runtime·asmsysvicall6, &m->libcall);
- if(m->libcall.r1 == 0)
- break;
- if(*m->perrno == EINTR)
- continue;
- runtime·throw("sem_wait");
- }
- return 0;
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·semawakeup(M *mp)
-{
- SemT* sem = (SemT*)mp->waitsema;
- if(runtime·sem_post(sem) != 0)
- runtime·throw("sem_post");
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·close(int32 fd)
-{
- return runtime·sysvicall1(libc·close, (uintptr)fd);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·exit(int32 r)
-{
- runtime·sysvicall1(libc·exit, (uintptr)r);
-}
-
-#pragma textflag NOSPLIT
-/* int32 */ void
-runtime·getcontext(Ucontext* context)
-{
- runtime·sysvicall1(libc·getcontext, (uintptr)context);
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·getrlimit(int32 res, Rlimit* rlp)
-{
- return runtime·sysvicall2(libc·getrlimit, (uintptr)res, (uintptr)rlp);
-}
-
-#pragma textflag NOSPLIT
-uint8*
-runtime·mmap(byte* addr, uintptr len, int32 prot, int32 flags, int32 fildes, uint32 off)
-{
- return (uint8*)runtime·sysvicall6(libc·mmap, (uintptr)addr, (uintptr)len, (uintptr)prot, (uintptr)flags, (uintptr)fildes, (uintptr)off);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·munmap(byte* addr, uintptr len)
-{
- runtime·sysvicall2(libc·munmap, (uintptr)addr, (uintptr)len);
-}
-
-extern int64 runtime·nanotime1(void);
-#pragma textflag NOSPLIT
-int64
-runtime·nanotime(void)
-{
- return runtime·sysvicall0((uintptr)runtime·nanotime1);
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·open(int8* path, int32 oflag, int32 mode)
-{
- return runtime·sysvicall3(libc·open, (uintptr)path, (uintptr)oflag, (uintptr)mode);
-}
-
-int32
-runtime·pthread_attr_destroy(PthreadAttr* attr)
-{
- return runtime·sysvicall1(libc·pthread_attr_destroy, (uintptr)attr);
-}
-
-int32
-runtime·pthread_attr_getstack(PthreadAttr* attr, void** addr, uint64* size)
-{
- return runtime·sysvicall3(libc·pthread_attr_getstack, (uintptr)attr, (uintptr)addr, (uintptr)size);
-}
-
-int32
-runtime·pthread_attr_init(PthreadAttr* attr)
-{
- return runtime·sysvicall1(libc·pthread_attr_init, (uintptr)attr);
-}
-
-int32
-runtime·pthread_attr_setdetachstate(PthreadAttr* attr, int32 state)
-{
- return runtime·sysvicall2(libc·pthread_attr_setdetachstate, (uintptr)attr, (uintptr)state);
-}
-
-int32
-runtime·pthread_attr_setstack(PthreadAttr* attr, void* addr, uint64 size)
-{
- return runtime·sysvicall3(libc·pthread_attr_setstack, (uintptr)attr, (uintptr)addr, (uintptr)size);
-}
-
-int32
-runtime·pthread_create(Pthread* thread, PthreadAttr* attr, void(*fn)(void), void *arg)
-{
- return runtime·sysvicall4(libc·pthread_create, (uintptr)thread, (uintptr)attr, (uintptr)fn, (uintptr)arg);
-}
-
-/* int32 */ void
-runtime·raise(int32 sig)
-{
- runtime·sysvicall1(libc·raise, (uintptr)sig);
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·read(int32 fd, void* buf, int32 nbyte)
-{
- return runtime·sysvicall3(libc·read, (uintptr)fd, (uintptr)buf, (uintptr)nbyte);
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·sem_init(SemT* sem, int32 pshared, uint32 value)
-{
- return runtime·sysvicall3(libc·sem_init, (uintptr)sem, (uintptr)pshared, (uintptr)value);
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·sem_post(SemT* sem)
-{
- return runtime·sysvicall1(libc·sem_post, (uintptr)sem);
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·sem_reltimedwait_np(SemT* sem, Timespec* timeout)
-{
- return runtime·sysvicall2(libc·sem_reltimedwait_np, (uintptr)sem, (uintptr)timeout);
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·sem_wait(SemT* sem)
-{
- return runtime·sysvicall1(libc·sem_wait, (uintptr)sem);
-}
-
-/* int32 */ void
-runtime·setitimer(int32 which, Itimerval* value, Itimerval* ovalue)
-{
- runtime·sysvicall3(libc·setitimer, (uintptr)which, (uintptr)value, (uintptr)ovalue);
-}
-
-/* int32 */ void
-runtime·sigaction(int32 sig, struct SigactionT* act, struct SigactionT* oact)
-{
- runtime·sysvicall3(libc·sigaction, (uintptr)sig, (uintptr)act, (uintptr)oact);
-}
-
-/* int32 */ void
-runtime·sigaltstack(SigaltstackT* ss, SigaltstackT* oss)
-{
- runtime·sysvicall2(libc·sigaltstack, (uintptr)ss, (uintptr)oss);
-}
-
-/* int32 */ void
-runtime·sigprocmask(int32 how, Sigset* set, Sigset* oset)
-{
- runtime·sysvicall3(libc·sigprocmask, (uintptr)how, (uintptr)set, (uintptr)oset);
-}
-
-int64
-runtime·sysconf(int32 name)
-{
- return runtime·sysvicall1(libc·sysconf, (uintptr)name);
-}
-
-extern void runtime·usleep1(uint32);
-
-#pragma textflag NOSPLIT
-void
-runtime·usleep(uint32 µs)
-{
- runtime·usleep1(µs);
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·write(uintptr fd, void* buf, int32 nbyte)
-{
- return runtime·sysvicall3(libc·write, (uintptr)fd, (uintptr)buf, (uintptr)nbyte);
-}
-
-extern void runtime·osyield1(void);
-
-#pragma textflag NOSPLIT
-void
-runtime·osyield(void)
-{
- // Check the validity of m because we might be called in cgo callback
- // path early enough where there isn't a m available yet.
- if(g && g->m != nil) {
- runtime·sysvicall0(libc·sched_yield);
- return;
- }
- runtime·osyield1();
-}
-
-#pragma textflag NOSPLIT
-int8*
-runtime·signame(int32 sig)
-{
- return runtime·sigtab[sig].name;
-}
diff --git a/src/runtime/os_solaris.go b/src/runtime/os_solaris.go
index ca1315120..6864ef938 100644
--- a/src/runtime/os_solaris.go
+++ b/src/runtime/os_solaris.go
@@ -6,53 +6,35 @@ package runtime
import "unsafe"
-func setitimer(mode int32, new, old unsafe.Pointer)
-func sigaction(sig int32, new, old unsafe.Pointer)
-func sigaltstack(new, old unsafe.Pointer)
-func sigprocmask(mode int32, new, old unsafe.Pointer)
-func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func getrlimit(kind int32, limit unsafe.Pointer)
-func miniterrno(fn unsafe.Pointer)
-func raise(sig int32)
-func getcontext(ctxt unsafe.Pointer)
-func tstart_sysvicall(mm unsafe.Pointer) uint32
-func nanotime1() int64
-func usleep1(usec uint32)
-func osyield1()
-func netpollinit()
-func netpollopen(fd uintptr, pd *pollDesc) int32
-func netpollclose(fd uintptr) int32
-func netpollarm(pd *pollDesc, mode int)
-
-type libcFunc byte
+type libcFunc uintptr
var asmsysvicall6 libcFunc
//go:nosplit
-func sysvicall0(fn *libcFunc) uintptr {
+func sysvicall0(fn libcFunc) uintptr {
libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
+ libcall.fn = uintptr(fn)
libcall.n = 0
- // TODO(rsc): Why is noescape necessary here and below?
- libcall.args = uintptr(noescape(unsafe.Pointer(&fn))) // it's unused but must be non-nil, otherwise crashes
+ libcall.args = uintptr(fn) // it's unused but must be non-nil, otherwise crashes
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
return libcall.r1
}
//go:nosplit
-func sysvicall1(fn *libcFunc, a1 uintptr) uintptr {
+func sysvicall1(fn libcFunc, a1 uintptr) uintptr {
libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
+ libcall.fn = uintptr(fn)
libcall.n = 1
+ // TODO(rsc): Why is noescape necessary here and below?
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
return libcall.r1
}
//go:nosplit
-func sysvicall2(fn *libcFunc, a1, a2 uintptr) uintptr {
+func sysvicall2(fn libcFunc, a1, a2 uintptr) uintptr {
libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
+ libcall.fn = uintptr(fn)
libcall.n = 2
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
@@ -60,9 +42,9 @@ func sysvicall2(fn *libcFunc, a1, a2 uintptr) uintptr {
}
//go:nosplit
-func sysvicall3(fn *libcFunc, a1, a2, a3 uintptr) uintptr {
+func sysvicall3(fn libcFunc, a1, a2, a3 uintptr) uintptr {
libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
+ libcall.fn = uintptr(fn)
libcall.n = 3
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
@@ -70,9 +52,9 @@ func sysvicall3(fn *libcFunc, a1, a2, a3 uintptr) uintptr {
}
//go:nosplit
-func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
+func sysvicall4(fn libcFunc, a1, a2, a3, a4 uintptr) uintptr {
libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
+ libcall.fn = uintptr(fn)
libcall.n = 4
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
@@ -80,9 +62,9 @@ func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
}
//go:nosplit
-func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
+func sysvicall5(fn libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
+ libcall.fn = uintptr(fn)
libcall.n = 5
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
@@ -90,9 +72,9 @@ func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
}
//go:nosplit
-func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
+func sysvicall6(fn libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
+ libcall.fn = uintptr(fn)
libcall.n = 6
libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
diff --git a/src/runtime/os_solaris.h b/src/runtime/os_solaris.h
deleted file mode 100644
index 3d9e1a240..000000000
--- a/src/runtime/os_solaris.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-
-typedef uintptr kevent_udata;
-
-struct sigaction;
-
-void runtime·sigpanic(void);
-
-void runtime·setitimer(int32, Itimerval*, Itimerval*);
-void runtime·sigaction(int32, struct SigactionT*, struct SigactionT*);
-void runtime·sigaltstack(SigaltstackT*, SigaltstackT*);
-void runtime·sigprocmask(int32, Sigset*, Sigset*);
-void runtime·unblocksignals(void);
-int32 runtime·sysctl(uint32*, uint32, byte*, uintptr*, byte*, uintptr);
-
-
-void runtime·raisesigpipe(void);
-void runtime·setsig(int32, void(*)(int32, Siginfo*, void*, G*), bool);
-void runtime·sighandler(int32 sig, Siginfo *info, void *context, G *gp);
-void runtime·sigpanic(void);
-
-enum {
- SS_DISABLE = 2,
- SIG_BLOCK = 1,
- SIG_UNBLOCK = 2,
- SIG_SETMASK = 3,
- NSIG = 73, /* number of signals in runtime·SigTab array */
- SI_USER = 0,
- _UC_SIGMASK = 0x01,
- _UC_CPU = 0x04,
- RLIMIT_AS = 10,
-};
-
-typedef struct Rlimit Rlimit;
-struct Rlimit {
- int64 rlim_cur;
- int64 rlim_max;
-};
-int32 runtime·getrlimit(int32, Rlimit*);
-
-// Call an external library function described by {fn, a0, ..., an}, with
-// SysV conventions, switching to os stack during the call, if necessary.
-uintptr runtime·sysvicall0(uintptr fn);
-uintptr runtime·sysvicall1(uintptr fn, uintptr a1);
-uintptr runtime·sysvicall2(uintptr fn, uintptr a1, uintptr a2);
-uintptr runtime·sysvicall3(uintptr fn, uintptr a1, uintptr a2, uintptr a3);
-uintptr runtime·sysvicall4(uintptr fn, uintptr a1, uintptr a2, uintptr a3, uintptr a4);
-uintptr runtime·sysvicall5(uintptr fn, uintptr a1, uintptr a2, uintptr a3, uintptr a4, uintptr a5);
-uintptr runtime·sysvicall6(uintptr fn, uintptr a1, uintptr a2, uintptr a3, uintptr a4, uintptr a5, uintptr a6);
-void runtime·asmsysvicall6(void *c);
-
-void runtime·miniterrno(void *fn);
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index 1528d2fd1..fcd8f44cc 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -21,10 +21,6 @@ func asmstdcall(fn unsafe.Pointer)
func getlasterror() uint32
func setlasterror(err uint32)
func usleep1(usec uint32)
-func netpollinit()
-func netpollopen(fd uintptr, pd *pollDesc) int32
-func netpollclose(fd uintptr) int32
-func netpollarm(pd *pollDesc, mode int)
func os_sigpipe() {
gothrow("too many writes on closed pipe")
diff --git a/src/runtime/panic.c b/src/runtime/panic.c
deleted file mode 100644
index b19fdd0e1..000000000
--- a/src/runtime/panic.c
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "stack.h"
-#include "malloc.h"
-#include "textflag.h"
-
-// Code related to defer, panic and recover.
-
-// TODO: remove once code is moved to Go
-extern Defer* runtime·newdefer(int32 siz);
-extern runtime·freedefer(Defer *d);
-
-uint32 runtime·panicking;
-static Mutex paniclk;
-
-void
-runtime·deferproc_m(void)
-{
- int32 siz;
- FuncVal *fn;
- uintptr argp;
- uintptr callerpc;
- Defer *d;
-
- siz = g->m->scalararg[0];
- fn = g->m->ptrarg[0];
- argp = g->m->scalararg[1];
- callerpc = g->m->scalararg[2];
- g->m->ptrarg[0] = nil;
- g->m->scalararg[1] = 0;
-
- d = runtime·newdefer(siz);
- if(d->panic != nil)
- runtime·throw("deferproc: d->panic != nil after newdefer");
- d->fn = fn;
- d->pc = callerpc;
- d->argp = argp;
- runtime·memmove(d+1, (void*)argp, siz);
-}
-
-// Unwind the stack after a deferred function calls recover
-// after a panic. Then arrange to continue running as though
-// the caller of the deferred function returned normally.
-void
-runtime·recovery_m(G *gp)
-{
- void *argp;
- uintptr pc;
-
- // Info about defer passed in G struct.
- argp = (void*)gp->sigcode0;
- pc = (uintptr)gp->sigcode1;
-
- // d's arguments need to be in the stack.
- if(argp != nil && ((uintptr)argp < gp->stack.lo || gp->stack.hi < (uintptr)argp)) {
- runtime·printf("recover: %p not in [%p, %p]\n", argp, gp->stack.lo, gp->stack.hi);
- runtime·throw("bad recovery");
- }
-
- // Make the deferproc for this d return again,
- // this time returning 1. The calling function will
- // jump to the standard return epilogue.
- // The -2*sizeof(uintptr) makes up for the
- // two extra words that are on the stack at
- // each call to deferproc.
- // (The pc we're returning to does pop pop
- // before it tests the return value.)
- // On the arm and power there are 2 saved LRs mixed in too.
- if(thechar == '5' || thechar == '9')
- gp->sched.sp = (uintptr)argp - 4*sizeof(uintptr);
- else
- gp->sched.sp = (uintptr)argp - 2*sizeof(uintptr);
- gp->sched.pc = pc;
- gp->sched.lr = 0;
- gp->sched.ret = 1;
- runtime·gogo(&gp->sched);
-}
-
-void
-runtime·startpanic_m(void)
-{
- if(runtime·mheap.cachealloc.size == 0) { // very early
- runtime·printf("runtime: panic before malloc heap initialized\n");
- g->m->mallocing = 1; // tell rest of panic not to try to malloc
- } else if(g->m->mcache == nil) // can happen if called from signal handler or throw
- g->m->mcache = runtime·allocmcache();
- switch(g->m->dying) {
- case 0:
- g->m->dying = 1;
- if(g != nil) {
- g->writebuf.array = nil;
- g->writebuf.len = 0;
- g->writebuf.cap = 0;
- }
- runtime·xadd(&runtime·panicking, 1);
- runtime·lock(&paniclk);
- if(runtime·debug.schedtrace > 0 || runtime·debug.scheddetail > 0)
- runtime·schedtrace(true);
- runtime·freezetheworld();
- return;
- case 1:
- // Something failed while panicing, probably the print of the
- // argument to panic(). Just print a stack trace and exit.
- g->m->dying = 2;
- runtime·printf("panic during panic\n");
- runtime·dopanic(0);
- runtime·exit(3);
- case 2:
- // This is a genuine bug in the runtime, we couldn't even
- // print the stack trace successfully.
- g->m->dying = 3;
- runtime·printf("stack trace unavailable\n");
- runtime·exit(4);
- default:
- // Can't even print! Just exit.
- runtime·exit(5);
- }
-}
-
-void
-runtime·dopanic_m(void)
-{
- G *gp;
- uintptr sp, pc;
- static bool didothers;
- bool crash;
- int32 t;
-
- gp = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- pc = g->m->scalararg[0];
- sp = g->m->scalararg[1];
- g->m->scalararg[1] = 0;
- if(gp->sig != 0)
- runtime·printf("[signal %x code=%p addr=%p pc=%p]\n",
- gp->sig, gp->sigcode0, gp->sigcode1, gp->sigpc);
-
- if((t = runtime·gotraceback(&crash)) > 0){
- if(gp != gp->m->g0) {
- runtime·printf("\n");
- runtime·goroutineheader(gp);
- runtime·traceback(pc, sp, 0, gp);
- } else if(t >= 2 || g->m->throwing > 0) {
- runtime·printf("\nruntime stack:\n");
- runtime·traceback(pc, sp, 0, gp);
- }
- if(!didothers) {
- didothers = true;
- runtime·tracebackothers(gp);
- }
- }
- runtime·unlock(&paniclk);
- if(runtime·xadd(&runtime·panicking, -1) != 0) {
- // Some other m is panicking too.
- // Let it print what it needs to print.
- // Wait forever without chewing up cpu.
- // It will exit when it's done.
- static Mutex deadlock;
- runtime·lock(&deadlock);
- runtime·lock(&deadlock);
- }
-
- if(crash)
- runtime·crash();
-
- runtime·exit(2);
-}
-
-#pragma textflag NOSPLIT
-bool
-runtime·canpanic(G *gp)
-{
- M *m;
- uint32 status;
-
- // Note that g is m->gsignal, different from gp.
- // Note also that g->m can change at preemption, so m can go stale
- // if this function ever makes a function call.
- m = g->m;
-
- // Is it okay for gp to panic instead of crashing the program?
- // Yes, as long as it is running Go code, not runtime code,
- // and not stuck in a system call.
- if(gp == nil || gp != m->curg)
- return false;
- if(m->locks-m->softfloat != 0 || m->mallocing != 0 || m->throwing != 0 || m->gcing != 0 || m->dying != 0)
- return false;
- status = runtime·readgstatus(gp);
- if((status&~Gscan) != Grunning || gp->syscallsp != 0)
- return false;
-#ifdef GOOS_windows
- if(m->libcallsp != 0)
- return false;
-#endif
- return true;
-}
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 91b5da294..892946702 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -54,6 +54,11 @@ func throwinit() {
// The compiler turns a defer statement into a call to this.
//go:nosplit
func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
+ if getg().m.curg != getg() {
+ // go code on the system stack can't defer
+ gothrow("defer on system stack")
+ }
+
// the arguments of fn are in a perilous state. The stack map
// for deferproc does not describe them. So we can't let garbage
// collection or stack copying trigger until we've copied them out
@@ -64,20 +69,18 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
if GOARCH == "arm" || GOARCH == "power64" || GOARCH == "power64le" {
argp += ptrSize // skip caller's saved link register
}
- mp := acquirem()
- mp.scalararg[0] = uintptr(siz)
- mp.ptrarg[0] = unsafe.Pointer(fn)
- mp.scalararg[1] = argp
- mp.scalararg[2] = getcallerpc(unsafe.Pointer(&siz))
-
- if mp.curg != getg() {
- // go code on the m stack can't defer
- gothrow("defer on m")
- }
-
- onM(deferproc_m)
+ callerpc := getcallerpc(unsafe.Pointer(&siz))
- releasem(mp)
+ systemstack(func() {
+ d := newdefer(siz)
+ if d._panic != nil {
+ gothrow("deferproc: d.panic != nil after newdefer")
+ }
+ d.fn = fn
+ d.pc = callerpc
+ d.argp = argp
+ memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz))
+ })
// deferproc returns 0 normally.
// a deferred func that stops a panic
@@ -298,8 +301,6 @@ func Goexit() {
goexit()
}
-func canpanic(*g) bool
-
// Print all currently active panics. Used when crashing.
func printpanics(p *_panic) {
if p.link != nil {
@@ -318,7 +319,10 @@ func printpanics(p *_panic) {
func gopanic(e interface{}) {
gp := getg()
if gp.m.curg != gp {
- gothrow("panic on m stack")
+ print("panic: ")
+ printany(e)
+ print("\n")
+ gothrow("panic on system stack")
}
// m.softfloat is set during software floating point.
@@ -414,7 +418,7 @@ func gopanic(e interface{}) {
// Pass information about recovering frame to recovery.
gp.sigcode0 = uintptr(argp)
gp.sigcode1 = pc
- mcall(recovery_m)
+ mcall(recovery)
gothrow("recovery failed") // mcall should not return
}
}
@@ -466,17 +470,17 @@ func gorecover(argp uintptr) interface{} {
//go:nosplit
func startpanic() {
- onM_signalok(startpanic_m)
+ systemstack(startpanic_m)
}
//go:nosplit
func dopanic(unused int) {
+ pc := getcallerpc(unsafe.Pointer(&unused))
+ sp := getcallersp(unsafe.Pointer(&unused))
gp := getg()
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(gp)
- mp.scalararg[0] = getcallerpc((unsafe.Pointer)(&unused))
- mp.scalararg[1] = getcallersp((unsafe.Pointer)(&unused))
- onM_signalok(dopanic_m) // should never return
+ systemstack(func() {
+ dopanic_m(gp, pc, sp) // should never return
+ })
*(*int)(nil) = 0
}
diff --git a/src/runtime/panic1.go b/src/runtime/panic1.go
new file mode 100644
index 000000000..17379f963
--- /dev/null
+++ b/src/runtime/panic1.go
@@ -0,0 +1,161 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// Code related to defer, panic and recover.
+// TODO: Merge into panic.go.
+
+//uint32 runtime·panicking;
+var paniclk mutex
+
+const hasLinkRegister = GOARCH == "arm" || GOARCH == "power64" || GOARCH == "power64le"
+
+// Unwind the stack after a deferred function calls recover
+// after a panic. Then arrange to continue running as though
+// the caller of the deferred function returned normally.
+func recovery(gp *g) {
+ // Info about defer passed in G struct.
+ argp := (unsafe.Pointer)(gp.sigcode0)
+ pc := uintptr(gp.sigcode1)
+
+ // d's arguments need to be in the stack.
+ if argp != nil && (uintptr(argp) < gp.stack.lo || gp.stack.hi < uintptr(argp)) {
+ print("recover: ", argp, " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
+ gothrow("bad recovery")
+ }
+
+ // Make the deferproc for this d return again,
+ // this time returning 1. The calling function will
+ // jump to the standard return epilogue.
+ // The -2*sizeof(uintptr) makes up for the
+ // two extra words that are on the stack at
+ // each call to deferproc.
+ // (The pc we're returning to does pop pop
+ // before it tests the return value.)
+ // On the arm and power there are 2 saved LRs mixed in too.
+ if hasLinkRegister {
+ gp.sched.sp = uintptr(argp) - 4*ptrSize
+ } else {
+ gp.sched.sp = uintptr(argp) - 2*ptrSize
+ }
+ gp.sched.pc = pc
+ gp.sched.lr = 0
+ gp.sched.ret = 1
+ gogo(&gp.sched)
+}
+
+func startpanic_m() {
+ _g_ := getg()
+ if mheap_.cachealloc.size == 0 { // very early
+ print("runtime: panic before malloc heap initialized\n")
+ _g_.m.mallocing = 1 // tell rest of panic not to try to malloc
+ } else if _g_.m.mcache == nil { // can happen if called from signal handler or throw
+ _g_.m.mcache = allocmcache()
+ }
+
+ switch _g_.m.dying {
+ case 0:
+ _g_.m.dying = 1
+ if _g_ != nil {
+ _g_.writebuf = nil
+ }
+ xadd(&panicking, 1)
+ lock(&paniclk)
+ if debug.schedtrace > 0 || debug.scheddetail > 0 {
+ schedtrace(true)
+ }
+ freezetheworld()
+ return
+ case 1:
+ // Something failed while panicing, probably the print of the
+ // argument to panic(). Just print a stack trace and exit.
+ _g_.m.dying = 2
+ print("panic during panic\n")
+ dopanic(0)
+ exit(3)
+ fallthrough
+ case 2:
+ // This is a genuine bug in the runtime, we couldn't even
+ // print the stack trace successfully.
+ _g_.m.dying = 3
+ print("stack trace unavailable\n")
+ exit(4)
+ fallthrough
+ default:
+ // Can't even print! Just exit.
+ exit(5)
+ }
+}
+
+var didothers bool
+var deadlock mutex
+
+func dopanic_m(gp *g, pc, sp uintptr) {
+ if gp.sig != 0 {
+ print("[signal ", hex(gp.sig), " code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
+ }
+
+ var docrash bool
+ _g_ := getg()
+ if t := gotraceback(&docrash); t > 0 {
+ if gp != gp.m.g0 {
+ print("\n")
+ goroutineheader(gp)
+ traceback(pc, sp, 0, gp)
+ } else if t >= 2 || _g_.m.throwing > 0 {
+ print("\nruntime stack:\n")
+ traceback(pc, sp, 0, gp)
+ }
+ if !didothers {
+ didothers = true
+ tracebackothers(gp)
+ }
+ }
+ unlock(&paniclk)
+
+ if xadd(&panicking, -1) != 0 {
+ // Some other m is panicking too.
+ // Let it print what it needs to print.
+ // Wait forever without chewing up cpu.
+ // It will exit when it's done.
+ lock(&deadlock)
+ lock(&deadlock)
+ }
+
+ if docrash {
+ crash()
+ }
+
+ exit(2)
+}
+
+//go:nosplit
+func canpanic(gp *g) bool {
+ // Note that g is m->gsignal, different from gp.
+ // Note also that g->m can change at preemption, so m can go stale
+ // if this function ever makes a function call.
+ _g_ := getg()
+ _m_ := _g_.m
+
+ // Is it okay for gp to panic instead of crashing the program?
+ // Yes, as long as it is running Go code, not runtime code,
+ // and not stuck in a system call.
+ if gp == nil || gp != _m_.curg {
+ return false
+ }
+ if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.gcing != 0 || _m_.dying != 0 {
+ return false
+ }
+ status := readgstatus(gp)
+ if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
+ return false
+ }
+ if GOOS == "windows" && _m_.libcallsp != 0 {
+ return false
+ }
+ return true
+}
diff --git a/src/runtime/parfor.c b/src/runtime/parfor.c
deleted file mode 100644
index e44956840..000000000
--- a/src/runtime/parfor.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Parallel for algorithm.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-
-struct ParForThread
-{
- // the thread's iteration space [32lsb, 32msb)
- uint64 pos;
- // stats
- uint64 nsteal;
- uint64 nstealcnt;
- uint64 nprocyield;
- uint64 nosyield;
- uint64 nsleep;
- byte pad[CacheLineSize];
-};
-
-void
-runtime·parforsetup(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void (*body)(ParFor*, uint32))
-{
- uint32 i, begin, end;
- uint64 *pos;
-
- if(desc == nil || nthr == 0 || nthr > desc->nthrmax || body == nil) {
- runtime·printf("desc=%p nthr=%d count=%d body=%p\n", desc, nthr, n, body);
- runtime·throw("parfor: invalid args");
- }
-
- desc->body = body;
- desc->done = 0;
- desc->nthr = nthr;
- desc->thrseq = 0;
- desc->cnt = n;
- desc->ctx = ctx;
- desc->wait = wait;
- desc->nsteal = 0;
- desc->nstealcnt = 0;
- desc->nprocyield = 0;
- desc->nosyield = 0;
- desc->nsleep = 0;
- for(i=0; i<nthr; i++) {
- begin = (uint64)n*i / nthr;
- end = (uint64)n*(i+1) / nthr;
- pos = &desc->thr[i].pos;
- if(((uintptr)pos & 7) != 0)
- runtime·throw("parforsetup: pos is not aligned");
- *pos = (uint64)begin | (((uint64)end)<<32);
- }
-}
-
-void
-runtime·parfordo(ParFor *desc)
-{
- ParForThread *me;
- uint32 tid, begin, end, begin2, try, victim, i;
- uint64 *mypos, *victimpos, pos, newpos;
- void (*body)(ParFor*, uint32);
- bool idle;
-
- // Obtain 0-based thread index.
- tid = runtime·xadd(&desc->thrseq, 1) - 1;
- if(tid >= desc->nthr) {
- runtime·printf("tid=%d nthr=%d\n", tid, desc->nthr);
- runtime·throw("parfor: invalid tid");
- }
-
- // If single-threaded, just execute the for serially.
- if(desc->nthr==1) {
- for(i=0; i<desc->cnt; i++)
- desc->body(desc, i);
- return;
- }
-
- body = desc->body;
- me = &desc->thr[tid];
- mypos = &me->pos;
- for(;;) {
- for(;;) {
- // While there is local work,
- // bump low index and execute the iteration.
- pos = runtime·xadd64(mypos, 1);
- begin = (uint32)pos-1;
- end = (uint32)(pos>>32);
- if(begin < end) {
- body(desc, begin);
- continue;
- }
- break;
- }
-
- // Out of work, need to steal something.
- idle = false;
- for(try=0;; try++) {
- // If we don't see any work for long enough,
- // increment the done counter...
- if(try > desc->nthr*4 && !idle) {
- idle = true;
- runtime·xadd(&desc->done, 1);
- }
- // ...if all threads have incremented the counter,
- // we are done.
- if(desc->done + !idle == desc->nthr) {
- if(!idle)
- runtime·xadd(&desc->done, 1);
- goto exit;
- }
- // Choose a random victim for stealing.
- victim = runtime·fastrand1() % (desc->nthr-1);
- if(victim >= tid)
- victim++;
- victimpos = &desc->thr[victim].pos;
- for(;;) {
- // See if it has any work.
- pos = runtime·atomicload64(victimpos);
- begin = (uint32)pos;
- end = (uint32)(pos>>32);
- if(begin+1 >= end) {
- begin = end = 0;
- break;
- }
- if(idle) {
- runtime·xadd(&desc->done, -1);
- idle = false;
- }
- begin2 = begin + (end-begin)/2;
- newpos = (uint64)begin | (uint64)begin2<<32;
- if(runtime·cas64(victimpos, pos, newpos)) {
- begin = begin2;
- break;
- }
- }
- if(begin < end) {
- // Has successfully stolen some work.
- if(idle)
- runtime·throw("parfor: should not be idle");
- runtime·atomicstore64(mypos, (uint64)begin | (uint64)end<<32);
- me->nsteal++;
- me->nstealcnt += end-begin;
- break;
- }
- // Backoff.
- if(try < desc->nthr) {
- // nothing
- } else if (try < 4*desc->nthr) {
- me->nprocyield++;
- runtime·procyield(20);
- // If a caller asked not to wait for the others, exit now
- // (assume that most work is already done at this point).
- } else if (!desc->wait) {
- if(!idle)
- runtime·xadd(&desc->done, 1);
- goto exit;
- } else if (try < 6*desc->nthr) {
- me->nosyield++;
- runtime·osyield();
- } else {
- me->nsleep++;
- runtime·usleep(1);
- }
- }
- }
-exit:
- runtime·xadd64(&desc->nsteal, me->nsteal);
- runtime·xadd64(&desc->nstealcnt, me->nstealcnt);
- runtime·xadd64(&desc->nprocyield, me->nprocyield);
- runtime·xadd64(&desc->nosyield, me->nosyield);
- runtime·xadd64(&desc->nsleep, me->nsleep);
- me->nsteal = 0;
- me->nstealcnt = 0;
- me->nprocyield = 0;
- me->nosyield = 0;
- me->nsleep = 0;
-}
-
-// For testing from Go.
-void
-runtime·newparfor_m(void)
-{
- g->m->ptrarg[0] = runtime·parforalloc(g->m->scalararg[0]);
-}
-
-void
-runtime·parforsetup_m(void)
-{
- ParFor *desc;
- void *ctx;
- void (*body)(ParFor*, uint32);
-
- desc = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- ctx = g->m->ptrarg[1];
- g->m->ptrarg[1] = nil;
- body = g->m->ptrarg[2];
- g->m->ptrarg[2] = nil;
-
- runtime·parforsetup(desc, g->m->scalararg[0], g->m->scalararg[1], ctx, g->m->scalararg[2], body);
-}
-
-void
-runtime·parfordo_m(void)
-{
- ParFor *desc;
-
- desc = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- runtime·parfordo(desc);
-}
-
-void
-runtime·parforiters_m(void)
-{
- ParFor *desc;
- uintptr tid;
-
- desc = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- tid = g->m->scalararg[0];
- g->m->scalararg[0] = desc->thr[tid].pos;
- g->m->scalararg[1] = desc->thr[tid].pos>>32;
-}
diff --git a/src/runtime/parfor.go b/src/runtime/parfor.go
new file mode 100644
index 000000000..14870c9fe
--- /dev/null
+++ b/src/runtime/parfor.go
@@ -0,0 +1,186 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parallel for algorithm.
+
+package runtime
+
+import "unsafe"
+
+type parforthread struct {
+ // the thread's iteration space [32lsb, 32msb)
+ pos uint64
+ // stats
+ nsteal uint64
+ nstealcnt uint64
+ nprocyield uint64
+ nosyield uint64
+ nsleep uint64
+ pad [_CacheLineSize]byte
+}
+
+func desc_thr_index(desc *parfor, i uint32) *parforthread {
+ return (*parforthread)(add(unsafe.Pointer(desc.thr), uintptr(i)*unsafe.Sizeof(*desc.thr)))
+}
+
+func parforsetup(desc *parfor, nthr, n uint32, ctx unsafe.Pointer, wait bool, body func(*parfor, uint32)) {
+ if desc == nil || nthr == 0 || nthr > desc.nthrmax || body == nil {
+ print("desc=", desc, " nthr=", nthr, " count=", n, " body=", body, "\n")
+ gothrow("parfor: invalid args")
+ }
+
+ desc.body = *(*unsafe.Pointer)(unsafe.Pointer(&body))
+ desc.done = 0
+ desc.nthr = nthr
+ desc.thrseq = 0
+ desc.cnt = n
+ desc.ctx = ctx
+ desc.wait = wait
+ desc.nsteal = 0
+ desc.nstealcnt = 0
+ desc.nprocyield = 0
+ desc.nosyield = 0
+ desc.nsleep = 0
+
+ for i := uint32(0); i < nthr; i++ {
+ begin := uint32(uint64(n) * uint64(i) / uint64(nthr))
+ end := uint32(uint64(n) * uint64(i+1) / uint64(nthr))
+ pos := &desc_thr_index(desc, i).pos
+ if uintptr(unsafe.Pointer(pos))&7 != 0 {
+ gothrow("parforsetup: pos is not aligned")
+ }
+ *pos = uint64(begin) | uint64(end)<<32
+ }
+}
+
+func parfordo(desc *parfor) {
+ // Obtain 0-based thread index.
+ tid := xadd(&desc.thrseq, 1) - 1
+ if tid >= desc.nthr {
+ print("tid=", tid, " nthr=", desc.nthr, "\n")
+ gothrow("parfor: invalid tid")
+ }
+
+ // If single-threaded, just execute the for serially.
+ body := *(*func(*parfor, uint32))(unsafe.Pointer(&desc.body))
+ if desc.nthr == 1 {
+ for i := uint32(0); i < desc.cnt; i++ {
+ body(desc, i)
+ }
+ return
+ }
+
+ me := desc_thr_index(desc, tid)
+ mypos := &me.pos
+ for {
+ for {
+ // While there is local work,
+ // bump low index and execute the iteration.
+ pos := xadd64(mypos, 1)
+ begin := uint32(pos) - 1
+ end := uint32(pos >> 32)
+ if begin < end {
+ body(desc, begin)
+ continue
+ }
+ break
+ }
+
+ // Out of work, need to steal something.
+ idle := false
+ for try := uint32(0); ; try++ {
+ // If we don't see any work for long enough,
+ // increment the done counter...
+ if try > desc.nthr*4 && !idle {
+ idle = true
+ xadd(&desc.done, 1)
+ }
+
+ // ...if all threads have incremented the counter,
+ // we are done.
+ extra := uint32(0)
+ if !idle {
+ extra = 1
+ }
+ if desc.done+extra == desc.nthr {
+ if !idle {
+ xadd(&desc.done, 1)
+ }
+ goto exit
+ }
+
+ // Choose a random victim for stealing.
+ var begin, end uint32
+ victim := fastrand1() % (desc.nthr - 1)
+ if victim >= tid {
+ victim++
+ }
+ victimpos := &desc_thr_index(desc, victim).pos
+ for {
+ // See if it has any work.
+ pos := atomicload64(victimpos)
+ begin = uint32(pos)
+ end = uint32(pos >> 32)
+ if begin+1 >= end {
+ end = 0
+ begin = end
+ break
+ }
+ if idle {
+ xadd(&desc.done, -1)
+ idle = false
+ }
+ begin2 := begin + (end-begin)/2
+ newpos := uint64(begin) | uint64(begin2)<<32
+ if cas64(victimpos, pos, newpos) {
+ begin = begin2
+ break
+ }
+ }
+ if begin < end {
+ // Has successfully stolen some work.
+ if idle {
+ gothrow("parfor: should not be idle")
+ }
+ atomicstore64(mypos, uint64(begin)|uint64(end)<<32)
+ me.nsteal++
+ me.nstealcnt += uint64(end) - uint64(begin)
+ break
+ }
+
+ // Backoff.
+ if try < desc.nthr {
+ // nothing
+ } else if try < 4*desc.nthr {
+ me.nprocyield++
+ procyield(20)
+ } else if !desc.wait {
+ // If a caller asked not to wait for the others, exit now
+ // (assume that most work is already done at this point).
+ if !idle {
+ xadd(&desc.done, 1)
+ }
+ goto exit
+ } else if try < 6*desc.nthr {
+ me.nosyield++
+ osyield()
+ } else {
+ me.nsleep++
+ usleep(1)
+ }
+ }
+ }
+
+exit:
+ xadd64(&desc.nsteal, int64(me.nsteal))
+ xadd64(&desc.nstealcnt, int64(me.nstealcnt))
+ xadd64(&desc.nprocyield, int64(me.nprocyield))
+ xadd64(&desc.nosyield, int64(me.nosyield))
+ xadd64(&desc.nsleep, int64(me.nsleep))
+ me.nsteal = 0
+ me.nstealcnt = 0
+ me.nprocyield = 0
+ me.nosyield = 0
+ me.nsleep = 0
+}
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index 8677cb30c..101c05989 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -249,7 +249,7 @@ func TestGoroutineSwitch(t *testing.T) {
// exists to record a PC without a traceback. Those are okay.
if len(stk) == 2 {
f := runtime.FuncForPC(stk[1])
- if f != nil && (f.Name() == "System" || f.Name() == "ExternalCode" || f.Name() == "GC") {
+ if f != nil && (f.Name() == "runtime._System" || f.Name() == "runtime._ExternalCode" || f.Name() == "runtime._GC") {
return
}
}
diff --git a/src/runtime/proc.c b/src/runtime/proc.c
deleted file mode 100644
index ce39db4ab..000000000
--- a/src/runtime/proc.c
+++ /dev/null
@@ -1,3497 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "zaexperiment.h"
-#include "malloc.h"
-#include "stack.h"
-#include "race.h"
-#include "type.h"
-#include "mgc0.h"
-#include "textflag.h"
-
-// Goroutine scheduler
-// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
-//
-// The main concepts are:
-// G - goroutine.
-// M - worker thread, or machine.
-// P - processor, a resource that is required to execute Go code.
-// M must have an associated P to execute Go code, however it can be
-// blocked or in a syscall w/o an associated P.
-//
-// Design doc at http://golang.org/s/go11sched.
-
-enum
-{
- // Number of goroutine ids to grab from runtime·sched.goidgen to local per-P cache at once.
- // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
- GoidCacheBatch = 16,
-};
-
-SchedT runtime·sched;
-int32 runtime·gomaxprocs;
-uint32 runtime·needextram;
-bool runtime·iscgo;
-M runtime·m0;
-G runtime·g0; // idle goroutine for m0
-G* runtime·lastg;
-M* runtime·allm;
-M* runtime·extram;
-P* runtime·allp[MaxGomaxprocs+1];
-int8* runtime·goos;
-int32 runtime·ncpu;
-int32 runtime·newprocs;
-
-Mutex runtime·allglock; // the following vars are protected by this lock or by stoptheworld
-G** runtime·allg;
-Slice runtime·allgs;
-uintptr runtime·allglen;
-ForceGCState runtime·forcegc;
-
-void runtime·mstart(void);
-static void runqput(P*, G*);
-static G* runqget(P*);
-static bool runqputslow(P*, G*, uint32, uint32);
-static G* runqsteal(P*, P*);
-static void mput(M*);
-static M* mget(void);
-static void mcommoninit(M*);
-static void schedule(void);
-static void procresize(int32);
-static void acquirep(P*);
-static P* releasep(void);
-static void newm(void(*)(void), P*);
-static void stopm(void);
-static void startm(P*, bool);
-static void handoffp(P*);
-static void wakep(void);
-static void stoplockedm(void);
-static void startlockedm(G*);
-static void sysmon(void);
-static uint32 retake(int64);
-static void incidlelocked(int32);
-static void checkdead(void);
-static void exitsyscall0(G*);
-void runtime·park_m(G*);
-static void goexit0(G*);
-static void gfput(P*, G*);
-static G* gfget(P*);
-static void gfpurge(P*);
-static void globrunqput(G*);
-static void globrunqputbatch(G*, G*, int32);
-static G* globrunqget(P*, int32);
-static P* pidleget(void);
-static void pidleput(P*);
-static void injectglist(G*);
-static bool preemptall(void);
-static bool preemptone(P*);
-static bool exitsyscallfast(void);
-static bool haveexperiment(int8*);
-void runtime·allgadd(G*);
-static void dropg(void);
-
-extern String runtime·buildVersion;
-
-// For cgo-using programs with external linking,
-// export "main" (defined in assembly) so that libc can handle basic
-// C runtime startup and call the Go program as if it were
-// the C main function.
-#pragma cgo_export_static main
-
-// Filled in by dynamic linker when Cgo is available.
-void (*_cgo_init)(void);
-void (*_cgo_malloc)(void);
-void (*_cgo_free)(void);
-
-// Copy for Go code.
-void* runtime·cgoMalloc;
-void* runtime·cgoFree;
-
-// The bootstrap sequence is:
-//
-// call osinit
-// call schedinit
-// make & queue new G
-// call runtime·mstart
-//
-// The new G calls runtime·main.
-void
-runtime·schedinit(void)
-{
- int32 n, procs;
- byte *p;
-
- // raceinit must be the first call to race detector.
- // In particular, it must be done before mallocinit below calls racemapshadow.
- if(raceenabled)
- g->racectx = runtime·raceinit();
-
- runtime·sched.maxmcount = 10000;
-
- runtime·tracebackinit();
- runtime·symtabinit();
- runtime·stackinit();
- runtime·mallocinit();
- mcommoninit(g->m);
-
- runtime·goargs();
- runtime·goenvs();
- runtime·parsedebugvars();
- runtime·gcinit();
-
- runtime·sched.lastpoll = runtime·nanotime();
- procs = 1;
- p = runtime·getenv("GOMAXPROCS");
- if(p != nil && (n = runtime·atoi(p)) > 0) {
- if(n > MaxGomaxprocs)
- n = MaxGomaxprocs;
- procs = n;
- }
- procresize(procs);
-
- if(runtime·buildVersion.str == nil) {
- // Condition should never trigger. This code just serves
- // to ensure runtime·buildVersion is kept in the resulting binary.
- runtime·buildVersion.str = (uint8*)"unknown";
- runtime·buildVersion.len = 7;
- }
-
- runtime·cgoMalloc = _cgo_malloc;
- runtime·cgoFree = _cgo_free;
-}
-
-void
-runtime·newsysmon(void)
-{
- newm(sysmon, nil);
-}
-
-static void
-dumpgstatus(G* gp)
-{
- runtime·printf("runtime: gp: gp=%p, goid=%D, gp->atomicstatus=%x\n", gp, gp->goid, runtime·readgstatus(gp));
- runtime·printf("runtime: g: g=%p, goid=%D, g->atomicstatus=%x\n", g, g->goid, runtime·readgstatus(g));
-}
-
-static void
-checkmcount(void)
-{
- // sched lock is held
- if(runtime·sched.mcount > runtime·sched.maxmcount){
- runtime·printf("runtime: program exceeds %d-thread limit\n", runtime·sched.maxmcount);
- runtime·throw("thread exhaustion");
- }
-}
-
-static void
-mcommoninit(M *mp)
-{
- // g0 stack won't make sense for user (and is not necessary unwindable).
- if(g != g->m->g0)
- runtime·callers(1, mp->createstack, nelem(mp->createstack));
-
- mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks();
-
- runtime·lock(&runtime·sched.lock);
- mp->id = runtime·sched.mcount++;
- checkmcount();
- runtime·mpreinit(mp);
- if(mp->gsignal)
- mp->gsignal->stackguard1 = mp->gsignal->stack.lo + StackGuard;
-
- // Add to runtime·allm so garbage collector doesn't free g->m
- // when it is just in a register or thread-local storage.
- mp->alllink = runtime·allm;
- // runtime·NumCgoCall() iterates over allm w/o schedlock,
- // so we need to publish it safely.
- runtime·atomicstorep(&runtime·allm, mp);
- runtime·unlock(&runtime·sched.lock);
-}
-
-// Mark gp ready to run.
-void
-runtime·ready(G *gp)
-{
- uint32 status;
-
- status = runtime·readgstatus(gp);
- // Mark runnable.
- g->m->locks++; // disable preemption because it can be holding p in a local var
- if((status&~Gscan) != Gwaiting){
- dumpgstatus(gp);
- runtime·throw("bad g->status in ready");
- }
- // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
- runtime·casgstatus(gp, Gwaiting, Grunnable);
- runqput(g->m->p, gp);
- if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0) // TODO: fast atomic
- wakep();
- g->m->locks--;
- if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
- g->stackguard0 = StackPreempt;
-}
-
-void
-runtime·ready_m(void)
-{
- G *gp;
-
- gp = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- runtime·ready(gp);
-}
-
-int32
-runtime·gcprocs(void)
-{
- int32 n;
-
- // Figure out how many CPUs to use during GC.
- // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
- runtime·lock(&runtime·sched.lock);
- n = runtime·gomaxprocs;
- if(n > runtime·ncpu)
- n = runtime·ncpu;
- if(n > MaxGcproc)
- n = MaxGcproc;
- if(n > runtime·sched.nmidle+1) // one M is currently running
- n = runtime·sched.nmidle+1;
- runtime·unlock(&runtime·sched.lock);
- return n;
-}
-
-static bool
-needaddgcproc(void)
-{
- int32 n;
-
- runtime·lock(&runtime·sched.lock);
- n = runtime·gomaxprocs;
- if(n > runtime·ncpu)
- n = runtime·ncpu;
- if(n > MaxGcproc)
- n = MaxGcproc;
- n -= runtime·sched.nmidle+1; // one M is currently running
- runtime·unlock(&runtime·sched.lock);
- return n > 0;
-}
-
-void
-runtime·helpgc(int32 nproc)
-{
- M *mp;
- int32 n, pos;
-
- runtime·lock(&runtime·sched.lock);
- pos = 0;
- for(n = 1; n < nproc; n++) { // one M is currently running
- if(runtime·allp[pos]->mcache == g->m->mcache)
- pos++;
- mp = mget();
- if(mp == nil)
- runtime·throw("runtime·gcprocs inconsistency");
- mp->helpgc = n;
- mp->mcache = runtime·allp[pos]->mcache;
- pos++;
- runtime·notewakeup(&mp->park);
- }
- runtime·unlock(&runtime·sched.lock);
-}
-
-// Similar to stoptheworld but best-effort and can be called several times.
-// There is no reverse operation, used during crashing.
-// This function must not lock any mutexes.
-void
-runtime·freezetheworld(void)
-{
- int32 i;
-
- if(runtime·gomaxprocs == 1)
- return;
- // stopwait and preemption requests can be lost
- // due to races with concurrently executing threads,
- // so try several times
- for(i = 0; i < 5; i++) {
- // this should tell the scheduler to not start any new goroutines
- runtime·sched.stopwait = 0x7fffffff;
- runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
- // this should stop running goroutines
- if(!preemptall())
- break; // no running goroutines
- runtime·usleep(1000);
- }
- // to be sure
- runtime·usleep(1000);
- preemptall();
- runtime·usleep(1000);
-}
-
-static bool
-isscanstatus(uint32 status)
-{
- if(status == Gscan)
- runtime·throw("isscanstatus: Bad status Gscan");
- return (status&Gscan) == Gscan;
-}
-
-// All reads and writes of g's status go through readgstatus, casgstatus
-// castogscanstatus, casfromgscanstatus.
-#pragma textflag NOSPLIT
-uint32
-runtime·readgstatus(G *gp)
-{
- return runtime·atomicload(&gp->atomicstatus);
-}
-
-// The Gscanstatuses are acting like locks and this releases them.
-// If it proves to be a performance hit we should be able to make these
-// simple atomic stores but for now we are going to throw if
-// we see an inconsistent state.
-void
-runtime·casfromgscanstatus(G *gp, uint32 oldval, uint32 newval)
-{
- bool success = false;
-
- // Check that transition is valid.
- switch(oldval) {
- case Gscanrunnable:
- case Gscanwaiting:
- case Gscanrunning:
- case Gscansyscall:
- if(newval == (oldval&~Gscan))
- success = runtime·cas(&gp->atomicstatus, oldval, newval);
- break;
- case Gscanenqueue:
- if(newval == Gwaiting)
- success = runtime·cas(&gp->atomicstatus, oldval, newval);
- break;
- }
- if(!success){
- runtime·printf("runtime: casfromgscanstatus failed gp=%p, oldval=%d, newval=%d\n",
- gp, oldval, newval);
- dumpgstatus(gp);
- runtime·throw("casfromgscanstatus: gp->status is not in scan state");
- }
-}
-
-// This will return false if the gp is not in the expected status and the cas fails.
-// This acts like a lock acquire while the casfromgstatus acts like a lock release.
-bool
-runtime·castogscanstatus(G *gp, uint32 oldval, uint32 newval)
-{
- switch(oldval) {
- case Grunnable:
- case Gwaiting:
- case Gsyscall:
- if(newval == (oldval|Gscan))
- return runtime·cas(&gp->atomicstatus, oldval, newval);
- break;
- case Grunning:
- if(newval == Gscanrunning || newval == Gscanenqueue)
- return runtime·cas(&gp->atomicstatus, oldval, newval);
- break;
- }
-
- runtime·printf("runtime: castogscanstatus oldval=%d newval=%d\n", oldval, newval);
- runtime·throw("castogscanstatus");
- return false; // not reached
-}
-
-static void badcasgstatus(void);
-static void helpcasgstatus(void);
-
-// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
-// and casfromgscanstatus instead.
-// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
-// put it in the Gscan state is finished.
-#pragma textflag NOSPLIT
-void
-runtime·casgstatus(G *gp, uint32 oldval, uint32 newval)
-{
- void (*fn)(void);
-
- if((oldval&Gscan) || (newval&Gscan) || oldval == newval) {
- g->m->scalararg[0] = oldval;
- g->m->scalararg[1] = newval;
- fn = badcasgstatus;
- runtime·onM(&fn);
- }
-
- // loop if gp->atomicstatus is in a scan state giving
- // GC time to finish and change the state to oldval.
- while(!runtime·cas(&gp->atomicstatus, oldval, newval)) {
-
- }
-}
-
-static void
-badcasgstatus(void)
-{
- uint32 oldval, newval;
-
- oldval = g->m->scalararg[0];
- newval = g->m->scalararg[1];
- g->m->scalararg[0] = 0;
- g->m->scalararg[1] = 0;
-
- runtime·printf("casgstatus: oldval=%d, newval=%d\n", oldval, newval);
- runtime·throw("casgstatus: bad incoming values");
-}
-
-static void
-helpcasgstatus(void)
-{
- G *gp;
-
- gp = g->m->ptrarg[0];
- g->m->ptrarg[0] = 0;
- runtime·gcphasework(gp);
-}
-
-// stopg ensures that gp is stopped at a GC safe point where its stack can be scanned
-// or in the context of a moving collector the pointers can be flipped from pointing
-// to old object to pointing to new objects.
-// If stopg returns true, the caller knows gp is at a GC safe point and will remain there until
-// the caller calls restartg.
-// If stopg returns false, the caller is not responsible for calling restartg. This can happen
-// if another thread, either the gp itself or another GC thread is taking the responsibility
-// to do the GC work related to this thread.
-bool
-runtime·stopg(G *gp)
-{
- uint32 s;
-
- for(;;) {
- if(gp->gcworkdone)
- return false;
-
- s = runtime·readgstatus(gp);
- switch(s) {
- default:
- dumpgstatus(gp);
- runtime·throw("stopg: gp->atomicstatus is not valid");
-
- case Gdead:
- return false;
-
- case Gcopystack:
- // Loop until a new stack is in place.
- break;
-
- case Grunnable:
- case Gsyscall:
- case Gwaiting:
- // Claim goroutine by setting scan bit.
- if(!runtime·castogscanstatus(gp, s, s|Gscan))
- break;
- // In scan state, do work.
- runtime·gcphasework(gp);
- return true;
-
- case Gscanrunnable:
- case Gscanwaiting:
- case Gscansyscall:
- // Goroutine already claimed by another GC helper.
- return false;
-
- case Grunning:
- if(runtime·gcphase == GCscan) {
- gp->gcworkdone = true;
- return false;
- // Running routines not scanned during
- // GCscan phase, we only scan non-running routines.
- }
-
- // Claim goroutine, so we aren't racing with a status
- // transition away from Grunning.
- if(!runtime·castogscanstatus(gp, Grunning, Gscanrunning))
- break;
-
- // Mark gp for preemption.
- if(!gp->gcworkdone) {
- gp->preemptscan = true;
- gp->preempt = true;
- gp->stackguard0 = StackPreempt;
- }
-
- // Unclaim.
- runtime·casfromgscanstatus(gp, Gscanrunning, Grunning);
- return false;
- }
- }
- // Should not be here....
-}
-
-// The GC requests that this routine be moved from a scanmumble state to a mumble state.
-void
-runtime·restartg (G *gp)
-{
- uint32 s;
-
- s = runtime·readgstatus(gp);
- switch(s) {
- default:
- dumpgstatus(gp);
- runtime·throw("restartg: unexpected status");
-
- case Gdead:
- break;
-
- case Gscanrunnable:
- case Gscanwaiting:
- case Gscansyscall:
- runtime·casfromgscanstatus(gp, s, s&~Gscan);
- break;
-
- case Gscanenqueue:
- // Scan is now completed.
- // Goroutine now needs to be made runnable.
- // We put it on the global run queue; ready blocks on the global scheduler lock.
- runtime·casfromgscanstatus(gp, Gscanenqueue, Gwaiting);
- if(gp != g->m->curg)
- runtime·throw("processing Gscanenqueue on wrong m");
- dropg();
- runtime·ready(gp);
- break;
- }
-}
-
-static void
-stopscanstart(G* gp)
-{
- if(g == gp)
- runtime·throw("GC not moved to G0");
- if(runtime·stopg(gp)) {
- if(!isscanstatus(runtime·readgstatus(gp))) {
- dumpgstatus(gp);
- runtime·throw("GC not in scan state");
- }
- runtime·restartg(gp);
- }
-}
-
-// Runs on g0 and does the actual work after putting the g back on the run queue.
-static void
-mquiesce(G *gpmaster)
-{
- G* gp;
- uint32 i;
- uint32 status;
- uint32 activeglen;
-
- // enqueue the calling goroutine.
- runtime·restartg(gpmaster);
-
- activeglen = runtime·allglen;
- for(i = 0; i < activeglen; i++) {
- gp = runtime·allg[i];
- if(runtime·readgstatus(gp) == Gdead)
- gp->gcworkdone = true; // noop scan.
- else
- gp->gcworkdone = false;
- stopscanstart(gp);
- }
-
- // Check that the G's gcwork (such as scanning) has been done. If not do it now.
- // You can end up doing work here if the page trap on a Grunning Goroutine has
- // not been sprung or in some race situations. For example a runnable goes dead
- // and is started up again with a gp->gcworkdone set to false.
- for(i = 0; i < activeglen; i++) {
- gp = runtime·allg[i];
- while (!gp->gcworkdone) {
- status = runtime·readgstatus(gp);
- if(status == Gdead) {
- gp->gcworkdone = true; // scan is a noop
- break;
- //do nothing, scan not needed.
- }
- if(status == Grunning && gp->stackguard0 == (uintptr)StackPreempt && runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) // nanosecond arg
- runtime·noteclear(&runtime·sched.stopnote);
- else
- stopscanstart(gp);
- }
- }
-
- for(i = 0; i < activeglen; i++) {
- gp = runtime·allg[i];
- status = runtime·readgstatus(gp);
- if(isscanstatus(status)) {
- runtime·printf("mstopandscang:bottom: post scan bad status gp=%p has status %x\n", gp, status);
- dumpgstatus(gp);
- }
- if(!gp->gcworkdone && status != Gdead) {
- runtime·printf("mstopandscang:bottom: post scan gp=%p->gcworkdone still false\n", gp);
- dumpgstatus(gp);
- }
- }
-
- schedule(); // Never returns.
-}
-
-// quiesce moves all the goroutines to a GC safepoint which for now is a at preemption point.
-// If the global runtime·gcphase is GCmark quiesce will ensure that all of the goroutine's stacks
-// have been scanned before it returns.
-void
-runtime·quiesce(G* mastergp)
-{
- void (*fn)(G*);
-
- runtime·castogscanstatus(mastergp, Grunning, Gscanenqueue);
- // Now move this to the g0 (aka m) stack.
- // g0 will potentially scan this thread and put mastergp on the runqueue
- fn = mquiesce;
- runtime·mcall(&fn);
-}
-
-// This is used by the GC as well as the routines that do stack dumps. In the case
-// of GC all the routines can be reliably stopped. This is not always the case
-// when the system is in panic or being exited.
-void
-runtime·stoptheworld(void)
-{
- int32 i;
- uint32 s;
- P *p;
- bool wait;
-
- // If we hold a lock, then we won't be able to stop another M
- // that is blocked trying to acquire the lock.
- if(g->m->locks > 0)
- runtime·throw("stoptheworld: holding locks");
-
- runtime·lock(&runtime·sched.lock);
- runtime·sched.stopwait = runtime·gomaxprocs;
- runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
- preemptall();
- // stop current P
- g->m->p->status = Pgcstop; // Pgcstop is only diagnostic.
- runtime·sched.stopwait--;
- // try to retake all P's in Psyscall status
- for(i = 0; i < runtime·gomaxprocs; i++) {
- p = runtime·allp[i];
- s = p->status;
- if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop))
- runtime·sched.stopwait--;
- }
- // stop idle P's
- while(p = pidleget()) {
- p->status = Pgcstop;
- runtime·sched.stopwait--;
- }
- wait = runtime·sched.stopwait > 0;
- runtime·unlock(&runtime·sched.lock);
-
- // wait for remaining P's to stop voluntarily
- if(wait) {
- for(;;) {
- // wait for 100us, then try to re-preempt in case of any races
- if(runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) {
- runtime·noteclear(&runtime·sched.stopnote);
- break;
- }
- preemptall();
- }
- }
- if(runtime·sched.stopwait)
- runtime·throw("stoptheworld: not stopped");
- for(i = 0; i < runtime·gomaxprocs; i++) {
- p = runtime·allp[i];
- if(p->status != Pgcstop)
- runtime·throw("stoptheworld: not stopped");
- }
-}
-
-static void
-mhelpgc(void)
-{
- g->m->helpgc = -1;
-}
-
-void
-runtime·starttheworld(void)
-{
- P *p, *p1;
- M *mp;
- G *gp;
- bool add;
-
- g->m->locks++; // disable preemption because it can be holding p in a local var
- gp = runtime·netpoll(false); // non-blocking
- injectglist(gp);
- add = needaddgcproc();
- runtime·lock(&runtime·sched.lock);
- if(runtime·newprocs) {
- procresize(runtime·newprocs);
- runtime·newprocs = 0;
- } else
- procresize(runtime·gomaxprocs);
- runtime·sched.gcwaiting = 0;
-
- p1 = nil;
- while(p = pidleget()) {
- // procresize() puts p's with work at the beginning of the list.
- // Once we reach a p without a run queue, the rest don't have one either.
- if(p->runqhead == p->runqtail) {
- pidleput(p);
- break;
- }
- p->m = mget();
- p->link = p1;
- p1 = p;
- }
- if(runtime·sched.sysmonwait) {
- runtime·sched.sysmonwait = false;
- runtime·notewakeup(&runtime·sched.sysmonnote);
- }
- runtime·unlock(&runtime·sched.lock);
-
- while(p1) {
- p = p1;
- p1 = p1->link;
- if(p->m) {
- mp = p->m;
- p->m = nil;
- if(mp->nextp)
- runtime·throw("starttheworld: inconsistent mp->nextp");
- mp->nextp = p;
- runtime·notewakeup(&mp->park);
- } else {
- // Start M to run P. Do not start another M below.
- newm(nil, p);
- add = false;
- }
- }
-
- if(add) {
- // If GC could have used another helper proc, start one now,
- // in the hope that it will be available next time.
- // It would have been even better to start it before the collection,
- // but doing so requires allocating memory, so it's tricky to
- // coordinate. This lazy approach works out in practice:
- // we don't mind if the first couple gc rounds don't have quite
- // the maximum number of procs.
- newm(mhelpgc, nil);
- }
- g->m->locks--;
- if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
- g->stackguard0 = StackPreempt;
-}
-
-static void mstart(void);
-
-// Called to start an M.
-#pragma textflag NOSPLIT
-void
-runtime·mstart(void)
-{
- uintptr x, size;
-
- if(g->stack.lo == 0) {
- // Initialize stack bounds from system stack.
- // Cgo may have left stack size in stack.hi.
- size = g->stack.hi;
- if(size == 0)
- size = 8192;
- g->stack.hi = (uintptr)&x;
- g->stack.lo = g->stack.hi - size + 1024;
- }
-
- // Initialize stack guards so that we can start calling
- // both Go and C functions with stack growth prologues.
- g->stackguard0 = g->stack.lo + StackGuard;
- g->stackguard1 = g->stackguard0;
- mstart();
-}
-
-static void
-mstart(void)
-{
- if(g != g->m->g0)
- runtime·throw("bad runtime·mstart");
-
- // Record top of stack for use by mcall.
- // Once we call schedule we're never coming back,
- // so other calls can reuse this stack space.
- runtime·gosave(&g->m->g0->sched);
- g->m->g0->sched.pc = (uintptr)-1; // make sure it is never used
- runtime·asminit();
- runtime·minit();
-
- // Install signal handlers; after minit so that minit can
- // prepare the thread to be able to handle the signals.
- if(g->m == &runtime·m0)
- runtime·initsig();
-
- if(g->m->mstartfn)
- g->m->mstartfn();
-
- if(g->m->helpgc) {
- g->m->helpgc = 0;
- stopm();
- } else if(g->m != &runtime·m0) {
- acquirep(g->m->nextp);
- g->m->nextp = nil;
- }
- schedule();
-
- // TODO(brainman): This point is never reached, because scheduler
- // does not release os threads at the moment. But once this path
- // is enabled, we must remove our seh here.
-}
-
-// When running with cgo, we call _cgo_thread_start
-// to start threads for us so that we can play nicely with
-// foreign code.
-void (*_cgo_thread_start)(void*);
-
-typedef struct CgoThreadStart CgoThreadStart;
-struct CgoThreadStart
-{
- G *g;
- uintptr *tls;
- void (*fn)(void);
-};
-
-M *runtime·newM(void); // in proc.go
-
-// Allocate a new m unassociated with any thread.
-// Can use p for allocation context if needed.
-M*
-runtime·allocm(P *p)
-{
- M *mp;
-
- g->m->locks++; // disable GC because it can be called from sysmon
- if(g->m->p == nil)
- acquirep(p); // temporarily borrow p for mallocs in this function
- mp = runtime·newM();
- mcommoninit(mp);
-
- // In case of cgo or Solaris, pthread_create will make us a stack.
- // Windows and Plan 9 will layout sched stack on OS stack.
- if(runtime·iscgo || Solaris || Windows || Plan9)
- mp->g0 = runtime·malg(-1);
- else
- mp->g0 = runtime·malg(8192);
- runtime·writebarrierptr_nostore(&mp->g0, mp->g0);
- mp->g0->m = mp;
- runtime·writebarrierptr_nostore(&mp->g0->m, mp->g0->m);
-
- if(p == g->m->p)
- releasep();
- g->m->locks--;
- if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
- g->stackguard0 = StackPreempt;
-
- return mp;
-}
-
-G *runtime·newG(void); // in proc.go
-
-static G*
-allocg(void)
-{
- return runtime·newG();
-}
-
-static M* lockextra(bool nilokay);
-static void unlockextra(M*);
-
-// needm is called when a cgo callback happens on a
-// thread without an m (a thread not created by Go).
-// In this case, needm is expected to find an m to use
-// and return with m, g initialized correctly.
-// Since m and g are not set now (likely nil, but see below)
-// needm is limited in what routines it can call. In particular
-// it can only call nosplit functions (textflag 7) and cannot
-// do any scheduling that requires an m.
-//
-// In order to avoid needing heavy lifting here, we adopt
-// the following strategy: there is a stack of available m's
-// that can be stolen. Using compare-and-swap
-// to pop from the stack has ABA races, so we simulate
-// a lock by doing an exchange (via casp) to steal the stack
-// head and replace the top pointer with MLOCKED (1).
-// This serves as a simple spin lock that we can use even
-// without an m. The thread that locks the stack in this way
-// unlocks the stack by storing a valid stack head pointer.
-//
-// In order to make sure that there is always an m structure
-// available to be stolen, we maintain the invariant that there
-// is always one more than needed. At the beginning of the
-// program (if cgo is in use) the list is seeded with a single m.
-// If needm finds that it has taken the last m off the list, its job
-// is - once it has installed its own m so that it can do things like
-// allocate memory - to create a spare m and put it on the list.
-//
-// Each of these extra m's also has a g0 and a curg that are
-// pressed into service as the scheduling stack and current
-// goroutine for the duration of the cgo callback.
-//
-// When the callback is done with the m, it calls dropm to
-// put the m back on the list.
-#pragma textflag NOSPLIT
-void
-runtime·needm(byte x)
-{
- M *mp;
-
- if(runtime·needextram) {
- // Can happen if C/C++ code calls Go from a global ctor.
- // Can not throw, because scheduler is not initialized yet.
- runtime·write(2, "fatal error: cgo callback before cgo call\n",
- sizeof("fatal error: cgo callback before cgo call\n")-1);
- runtime·exit(1);
- }
-
- // Lock extra list, take head, unlock popped list.
- // nilokay=false is safe here because of the invariant above,
- // that the extra list always contains or will soon contain
- // at least one m.
- mp = lockextra(false);
-
- // Set needextram when we've just emptied the list,
- // so that the eventual call into cgocallbackg will
- // allocate a new m for the extra list. We delay the
- // allocation until then so that it can be done
- // after exitsyscall makes sure it is okay to be
- // running at all (that is, there's no garbage collection
- // running right now).
- mp->needextram = mp->schedlink == nil;
- unlockextra(mp->schedlink);
-
- // Install g (= m->g0) and set the stack bounds
- // to match the current stack. We don't actually know
- // how big the stack is, like we don't know how big any
- // scheduling stack is, but we assume there's at least 32 kB,
- // which is more than enough for us.
- runtime·setg(mp->g0);
- g->stack.hi = (uintptr)(&x + 1024);
- g->stack.lo = (uintptr)(&x - 32*1024);
- g->stackguard0 = g->stack.lo + StackGuard;
-
- // Initialize this thread to use the m.
- runtime·asminit();
- runtime·minit();
-}
-
-// newextram allocates an m and puts it on the extra list.
-// It is called with a working local m, so that it can do things
-// like call schedlock and allocate.
-void
-runtime·newextram(void)
-{
- M *mp, *mnext;
- G *gp;
-
- // Create extra goroutine locked to extra m.
- // The goroutine is the context in which the cgo callback will run.
- // The sched.pc will never be returned to, but setting it to
- // runtime.goexit makes clear to the traceback routines where
- // the goroutine stack ends.
- mp = runtime·allocm(nil);
- gp = runtime·malg(4096);
- gp->sched.pc = (uintptr)runtime·goexit + PCQuantum;
- gp->sched.sp = gp->stack.hi;
- gp->sched.sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
- gp->sched.lr = 0;
- gp->sched.g = gp;
- gp->syscallpc = gp->sched.pc;
- gp->syscallsp = gp->sched.sp;
- // malg returns status as Gidle, change to Gsyscall before adding to allg
- // where GC will see it.
- runtime·casgstatus(gp, Gidle, Gsyscall);
- gp->m = mp;
- mp->curg = gp;
- mp->locked = LockInternal;
- mp->lockedg = gp;
- gp->lockedm = mp;
- gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
- if(raceenabled)
- gp->racectx = runtime·racegostart(runtime·newextram);
- // put on allg for garbage collector
- runtime·allgadd(gp);
-
- // Add m to the extra list.
- mnext = lockextra(true);
- mp->schedlink = mnext;
- unlockextra(mp);
-}
-
-// dropm is called when a cgo callback has called needm but is now
-// done with the callback and returning back into the non-Go thread.
-// It puts the current m back onto the extra list.
-//
-// The main expense here is the call to signalstack to release the
-// m's signal stack, and then the call to needm on the next callback
-// from this thread. It is tempting to try to save the m for next time,
-// which would eliminate both these costs, but there might not be
-// a next time: the current thread (which Go does not control) might exit.
-// If we saved the m for that thread, there would be an m leak each time
-// such a thread exited. Instead, we acquire and release an m on each
-// call. These should typically not be scheduling operations, just a few
-// atomics, so the cost should be small.
-//
-// TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
-// variable using pthread_key_create. Unlike the pthread keys we already use
-// on OS X, this dummy key would never be read by Go code. It would exist
-// only so that we could register at thread-exit-time destructor.
-// That destructor would put the m back onto the extra list.
-// This is purely a performance optimization. The current version,
-// in which dropm happens on each cgo call, is still correct too.
-// We may have to keep the current version on systems with cgo
-// but without pthreads, like Windows.
-void
-runtime·dropm(void)
-{
- M *mp, *mnext;
-
- // Undo whatever initialization minit did during needm.
- runtime·unminit();
-
- // Clear m and g, and return m to the extra list.
- // After the call to setmg we can only call nosplit functions.
- mp = g->m;
- runtime·setg(nil);
-
- mnext = lockextra(true);
- mp->schedlink = mnext;
- unlockextra(mp);
-}
-
-#define MLOCKED 1
-
-// lockextra locks the extra list and returns the list head.
-// The caller must unlock the list by storing a new list head
-// to runtime.extram. If nilokay is true, then lockextra will
-// return a nil list head if that's what it finds. If nilokay is false,
-// lockextra will keep waiting until the list head is no longer nil.
-#pragma textflag NOSPLIT
-static M*
-lockextra(bool nilokay)
-{
- uintptr mpx;
- void (*yield)(void);
-
- for(;;) {
- mpx = runtime·atomicloaduintptr((uintptr*)&runtime·extram);
- if(mpx == MLOCKED) {
- yield = runtime·osyield;
- yield();
- continue;
- }
- if(mpx == 0 && !nilokay) {
- runtime·usleep(1);
- continue;
- }
- if(!runtime·casuintptr((uintptr*)&runtime·extram, mpx, MLOCKED)) {
- yield = runtime·osyield;
- yield();
- continue;
- }
- break;
- }
- return (M*)mpx;
-}
-
-#pragma textflag NOSPLIT
-static void
-unlockextra(M *mp)
-{
- runtime·atomicstorep(&runtime·extram, mp);
-}
-
-
-// Create a new m. It will start off with a call to fn, or else the scheduler.
-static void
-newm(void(*fn)(void), P *p)
-{
- M *mp;
-
- mp = runtime·allocm(p);
- mp->nextp = p;
- mp->mstartfn = fn;
-
- if(runtime·iscgo) {
- CgoThreadStart ts;
-
- if(_cgo_thread_start == nil)
- runtime·throw("_cgo_thread_start missing");
- ts.g = mp->g0;
- ts.tls = mp->tls;
- ts.fn = runtime·mstart;
- runtime·asmcgocall(_cgo_thread_start, &ts);
- return;
- }
- runtime·newosproc(mp, (byte*)mp->g0->stack.hi);
-}
-
-// Stops execution of the current m until new work is available.
-// Returns with acquired P.
-static void
-stopm(void)
-{
- if(g->m->locks)
- runtime·throw("stopm holding locks");
- if(g->m->p)
- runtime·throw("stopm holding p");
- if(g->m->spinning) {
- g->m->spinning = false;
- runtime·xadd(&runtime·sched.nmspinning, -1);
- }
-
-retry:
- runtime·lock(&runtime·sched.lock);
- mput(g->m);
- runtime·unlock(&runtime·sched.lock);
- runtime·notesleep(&g->m->park);
- runtime·noteclear(&g->m->park);
- if(g->m->helpgc) {
- runtime·gchelper();
- g->m->helpgc = 0;
- g->m->mcache = nil;
- goto retry;
- }
- acquirep(g->m->nextp);
- g->m->nextp = nil;
-}
-
-static void
-mspinning(void)
-{
- g->m->spinning = true;
-}
-
-// Schedules some M to run the p (creates an M if necessary).
-// If p==nil, tries to get an idle P, if no idle P's does nothing.
-static void
-startm(P *p, bool spinning)
-{
- M *mp;
- void (*fn)(void);
-
- runtime·lock(&runtime·sched.lock);
- if(p == nil) {
- p = pidleget();
- if(p == nil) {
- runtime·unlock(&runtime·sched.lock);
- if(spinning)
- runtime·xadd(&runtime·sched.nmspinning, -1);
- return;
- }
- }
- mp = mget();
- runtime·unlock(&runtime·sched.lock);
- if(mp == nil) {
- fn = nil;
- if(spinning)
- fn = mspinning;
- newm(fn, p);
- return;
- }
- if(mp->spinning)
- runtime·throw("startm: m is spinning");
- if(mp->nextp)
- runtime·throw("startm: m has p");
- mp->spinning = spinning;
- mp->nextp = p;
- runtime·notewakeup(&mp->park);
-}
-
-// Hands off P from syscall or locked M.
-static void
-handoffp(P *p)
-{
- // if it has local work, start it straight away
- if(p->runqhead != p->runqtail || runtime·sched.runqsize) {
- startm(p, false);
- return;
- }
- // no local work, check that there are no spinning/idle M's,
- // otherwise our help is not required
- if(runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) == 0 && // TODO: fast atomic
- runtime·cas(&runtime·sched.nmspinning, 0, 1)){
- startm(p, true);
- return;
- }
- runtime·lock(&runtime·sched.lock);
- if(runtime·sched.gcwaiting) {
- p->status = Pgcstop;
- if(--runtime·sched.stopwait == 0)
- runtime·notewakeup(&runtime·sched.stopnote);
- runtime·unlock(&runtime·sched.lock);
- return;
- }
- if(runtime·sched.runqsize) {
- runtime·unlock(&runtime·sched.lock);
- startm(p, false);
- return;
- }
- // If this is the last running P and nobody is polling network,
- // need to wakeup another M to poll network.
- if(runtime·sched.npidle == runtime·gomaxprocs-1 && runtime·atomicload64(&runtime·sched.lastpoll) != 0) {
- runtime·unlock(&runtime·sched.lock);
- startm(p, false);
- return;
- }
- pidleput(p);
- runtime·unlock(&runtime·sched.lock);
-}
-
-// Tries to add one more P to execute G's.
-// Called when a G is made runnable (newproc, ready).
-static void
-wakep(void)
-{
- // be conservative about spinning threads
- if(!runtime·cas(&runtime·sched.nmspinning, 0, 1))
- return;
- startm(nil, true);
-}
-
-// Stops execution of the current m that is locked to a g until the g is runnable again.
-// Returns with acquired P.
-static void
-stoplockedm(void)
-{
- P *p;
- uint32 status;
-
- if(g->m->lockedg == nil || g->m->lockedg->lockedm != g->m)
- runtime·throw("stoplockedm: inconsistent locking");
- if(g->m->p) {
- // Schedule another M to run this p.
- p = releasep();
- handoffp(p);
- }
- incidlelocked(1);
- // Wait until another thread schedules lockedg again.
- runtime·notesleep(&g->m->park);
- runtime·noteclear(&g->m->park);
- status = runtime·readgstatus(g->m->lockedg);
- if((status&~Gscan) != Grunnable){
- runtime·printf("runtime:stoplockedm: g is not Grunnable or Gscanrunnable");
- dumpgstatus(g);
- runtime·throw("stoplockedm: not runnable");
- }
- acquirep(g->m->nextp);
- g->m->nextp = nil;
-}
-
-// Schedules the locked m to run the locked gp.
-static void
-startlockedm(G *gp)
-{
- M *mp;
- P *p;
-
- mp = gp->lockedm;
- if(mp == g->m)
- runtime·throw("startlockedm: locked to me");
- if(mp->nextp)
- runtime·throw("startlockedm: m has p");
- // directly handoff current P to the locked m
- incidlelocked(-1);
- p = releasep();
- mp->nextp = p;
- runtime·notewakeup(&mp->park);
- stopm();
-}
-
-// Stops the current m for stoptheworld.
-// Returns when the world is restarted.
-static void
-gcstopm(void)
-{
- P *p;
-
- if(!runtime·sched.gcwaiting)
- runtime·throw("gcstopm: not waiting for gc");
- if(g->m->spinning) {
- g->m->spinning = false;
- runtime·xadd(&runtime·sched.nmspinning, -1);
- }
- p = releasep();
- runtime·lock(&runtime·sched.lock);
- p->status = Pgcstop;
- if(--runtime·sched.stopwait == 0)
- runtime·notewakeup(&runtime·sched.stopnote);
- runtime·unlock(&runtime·sched.lock);
- stopm();
-}
-
-// Schedules gp to run on the current M.
-// Never returns.
-static void
-execute(G *gp)
-{
- int32 hz;
-
- runtime·casgstatus(gp, Grunnable, Grunning);
- gp->waitsince = 0;
- gp->preempt = false;
- gp->stackguard0 = gp->stack.lo + StackGuard;
- g->m->p->schedtick++;
- g->m->curg = gp;
- gp->m = g->m;
-
- // Check whether the profiler needs to be turned on or off.
- hz = runtime·sched.profilehz;
- if(g->m->profilehz != hz)
- runtime·resetcpuprofiler(hz);
-
- runtime·gogo(&gp->sched);
-}
-
-// Finds a runnable goroutine to execute.
-// Tries to steal from other P's, get g from global queue, poll network.
-static G*
-findrunnable(void)
-{
- G *gp;
- P *p;
- int32 i;
-
-top:
- if(runtime·sched.gcwaiting) {
- gcstopm();
- goto top;
- }
- if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
- runtime·ready(gp);
- // local runq
- gp = runqget(g->m->p);
- if(gp)
- return gp;
- // global runq
- if(runtime·sched.runqsize) {
- runtime·lock(&runtime·sched.lock);
- gp = globrunqget(g->m->p, 0);
- runtime·unlock(&runtime·sched.lock);
- if(gp)
- return gp;
- }
- // poll network
- gp = runtime·netpoll(false); // non-blocking
- if(gp) {
- injectglist(gp->schedlink);
- runtime·casgstatus(gp, Gwaiting, Grunnable);
- return gp;
- }
- // If number of spinning M's >= number of busy P's, block.
- // This is necessary to prevent excessive CPU consumption
- // when GOMAXPROCS>>1 but the program parallelism is low.
- if(!g->m->spinning && 2 * runtime·atomicload(&runtime·sched.nmspinning) >= runtime·gomaxprocs - runtime·atomicload(&runtime·sched.npidle)) // TODO: fast atomic
- goto stop;
- if(!g->m->spinning) {
- g->m->spinning = true;
- runtime·xadd(&runtime·sched.nmspinning, 1);
- }
- // random steal from other P's
- for(i = 0; i < 2*runtime·gomaxprocs; i++) {
- if(runtime·sched.gcwaiting)
- goto top;
- p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs];
- if(p == g->m->p)
- gp = runqget(p);
- else
- gp = runqsteal(g->m->p, p);
- if(gp)
- return gp;
- }
-stop:
- // return P and block
- runtime·lock(&runtime·sched.lock);
- if(runtime·sched.gcwaiting) {
- runtime·unlock(&runtime·sched.lock);
- goto top;
- }
- if(runtime·sched.runqsize) {
- gp = globrunqget(g->m->p, 0);
- runtime·unlock(&runtime·sched.lock);
- return gp;
- }
- p = releasep();
- pidleput(p);
- runtime·unlock(&runtime·sched.lock);
- if(g->m->spinning) {
- g->m->spinning = false;
- runtime·xadd(&runtime·sched.nmspinning, -1);
- }
- // check all runqueues once again
- for(i = 0; i < runtime·gomaxprocs; i++) {
- p = runtime·allp[i];
- if(p && p->runqhead != p->runqtail) {
- runtime·lock(&runtime·sched.lock);
- p = pidleget();
- runtime·unlock(&runtime·sched.lock);
- if(p) {
- acquirep(p);
- goto top;
- }
- break;
- }
- }
- // poll network
- if(runtime·xchg64(&runtime·sched.lastpoll, 0) != 0) {
- if(g->m->p)
- runtime·throw("findrunnable: netpoll with p");
- if(g->m->spinning)
- runtime·throw("findrunnable: netpoll with spinning");
- gp = runtime·netpoll(true); // block until new work is available
- runtime·atomicstore64(&runtime·sched.lastpoll, runtime·nanotime());
- if(gp) {
- runtime·lock(&runtime·sched.lock);
- p = pidleget();
- runtime·unlock(&runtime·sched.lock);
- if(p) {
- acquirep(p);
- injectglist(gp->schedlink);
- runtime·casgstatus(gp, Gwaiting, Grunnable);
- return gp;
- }
- injectglist(gp);
- }
- }
- stopm();
- goto top;
-}
-
-static void
-resetspinning(void)
-{
- int32 nmspinning;
-
- if(g->m->spinning) {
- g->m->spinning = false;
- nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1);
- if(nmspinning < 0)
- runtime·throw("findrunnable: negative nmspinning");
- } else
- nmspinning = runtime·atomicload(&runtime·sched.nmspinning);
-
- // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
- // so see if we need to wakeup another P here.
- if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0)
- wakep();
-}
-
-// Injects the list of runnable G's into the scheduler.
-// Can run concurrently with GC.
-static void
-injectglist(G *glist)
-{
- int32 n;
- G *gp;
-
- if(glist == nil)
- return;
- runtime·lock(&runtime·sched.lock);
- for(n = 0; glist; n++) {
- gp = glist;
- glist = gp->schedlink;
- runtime·casgstatus(gp, Gwaiting, Grunnable);
- globrunqput(gp);
- }
- runtime·unlock(&runtime·sched.lock);
-
- for(; n && runtime·sched.npidle; n--)
- startm(nil, false);
-}
-
-// One round of scheduler: find a runnable goroutine and execute it.
-// Never returns.
-static void
-schedule(void)
-{
- G *gp;
- uint32 tick;
-
- if(g->m->locks)
- runtime·throw("schedule: holding locks");
-
- if(g->m->lockedg) {
- stoplockedm();
- execute(g->m->lockedg); // Never returns.
- }
-
-top:
- if(runtime·sched.gcwaiting) {
- gcstopm();
- goto top;
- }
-
- gp = nil;
- // Check the global runnable queue once in a while to ensure fairness.
- // Otherwise two goroutines can completely occupy the local runqueue
- // by constantly respawning each other.
- tick = g->m->p->schedtick;
- // This is a fancy way to say tick%61==0,
- // it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
- if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runqsize > 0) {
- runtime·lock(&runtime·sched.lock);
- gp = globrunqget(g->m->p, 1);
- runtime·unlock(&runtime·sched.lock);
- if(gp)
- resetspinning();
- }
- if(gp == nil) {
- gp = runqget(g->m->p);
- if(gp && g->m->spinning)
- runtime·throw("schedule: spinning with local work");
- }
- if(gp == nil) {
- gp = findrunnable(); // blocks until work is available
- resetspinning();
- }
-
- if(gp->lockedm) {
- // Hands off own p to the locked m,
- // then blocks waiting for a new p.
- startlockedm(gp);
- goto top;
- }
-
- execute(gp);
-}
-
-// dropg removes the association between m and the current goroutine m->curg (gp for short).
-// Typically a caller sets gp's status away from Grunning and then
-// immediately calls dropg to finish the job. The caller is also responsible
-// for arranging that gp will be restarted using runtime·ready at an
-// appropriate time. After calling dropg and arranging for gp to be
-// readied later, the caller can do other work but eventually should
-// call schedule to restart the scheduling of goroutines on this m.
-static void
-dropg(void)
-{
- if(g->m->lockedg == nil) {
- g->m->curg->m = nil;
- g->m->curg = nil;
- }
-}
-
-// Puts the current goroutine into a waiting state and calls unlockf.
-// If unlockf returns false, the goroutine is resumed.
-void
-runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
-{
- void (*fn)(G*);
-
- g->m->waitlock = lock;
- g->m->waitunlockf = unlockf;
- g->waitreason = reason;
- fn = runtime·park_m;
- runtime·mcall(&fn);
-}
-
-bool
-runtime·parkunlock_c(G *gp, void *lock)
-{
- USED(gp);
- runtime·unlock(lock);
- return true;
-}
-
-// Puts the current goroutine into a waiting state and unlocks the lock.
-// The goroutine can be made runnable again by calling runtime·ready(gp).
-void
-runtime·parkunlock(Mutex *lock, String reason)
-{
- runtime·park(runtime·parkunlock_c, lock, reason);
-}
-
-// runtime·park continuation on g0.
-void
-runtime·park_m(G *gp)
-{
- bool ok;
-
- runtime·casgstatus(gp, Grunning, Gwaiting);
- dropg();
-
- if(g->m->waitunlockf) {
- ok = g->m->waitunlockf(gp, g->m->waitlock);
- g->m->waitunlockf = nil;
- g->m->waitlock = nil;
- if(!ok) {
- runtime·casgstatus(gp, Gwaiting, Grunnable);
- execute(gp); // Schedule it back, never returns.
- }
- }
-
- schedule();
-}
-
-// Gosched continuation on g0.
-void
-runtime·gosched_m(G *gp)
-{
- uint32 status;
-
- status = runtime·readgstatus(gp);
- if((status&~Gscan) != Grunning){
- dumpgstatus(gp);
- runtime·throw("bad g status");
- }
- runtime·casgstatus(gp, Grunning, Grunnable);
- dropg();
- runtime·lock(&runtime·sched.lock);
- globrunqput(gp);
- runtime·unlock(&runtime·sched.lock);
-
- schedule();
-}
-
-// Finishes execution of the current goroutine.
-// Must be NOSPLIT because it is called from Go.
-#pragma textflag NOSPLIT
-void
-runtime·goexit1(void)
-{
- void (*fn)(G*);
-
- if(raceenabled)
- runtime·racegoend();
- fn = goexit0;
- runtime·mcall(&fn);
-}
-
-// runtime·goexit continuation on g0.
-static void
-goexit0(G *gp)
-{
- runtime·casgstatus(gp, Grunning, Gdead);
- gp->m = nil;
- gp->lockedm = nil;
- g->m->lockedg = nil;
- gp->paniconfault = 0;
- gp->defer = nil; // should be true already but just in case.
- gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
- gp->writebuf.array = nil;
- gp->writebuf.len = 0;
- gp->writebuf.cap = 0;
- gp->waitreason.str = nil;
- gp->waitreason.len = 0;
- gp->param = nil;
-
- dropg();
-
- if(g->m->locked & ~LockExternal) {
- runtime·printf("invalid m->locked = %d\n", g->m->locked);
- runtime·throw("internal lockOSThread error");
- }
- g->m->locked = 0;
- gfput(g->m->p, gp);
- schedule();
-}
-
-#pragma textflag NOSPLIT
-static void
-save(uintptr pc, uintptr sp)
-{
- g->sched.pc = pc;
- g->sched.sp = sp;
- g->sched.lr = 0;
- g->sched.ret = 0;
- g->sched.ctxt = 0;
- g->sched.g = g;
-}
-
-static void entersyscall_bad(void);
-static void entersyscall_sysmon(void);
-static void entersyscall_gcwait(void);
-
-// The goroutine g is about to enter a system call.
-// Record that it's not using the cpu anymore.
-// This is called only from the go syscall library and cgocall,
-// not from the low-level system calls used by the runtime.
-//
-// Entersyscall cannot split the stack: the runtime·gosave must
-// make g->sched refer to the caller's stack segment, because
-// entersyscall is going to return immediately after.
-//
-// Nothing entersyscall calls can split the stack either.
-// We cannot safely move the stack during an active call to syscall,
-// because we do not know which of the uintptr arguments are
-// really pointers (back into the stack).
-// In practice, this means that we make the fast path run through
-// entersyscall doing no-split things, and the slow path has to use onM
-// to run bigger things on the m stack.
-//
-// reentersyscall is the entry point used by cgo callbacks, where explicitly
-// saved SP and PC are restored. This is needed when exitsyscall will be called
-// from a function further up in the call stack than the parent, as g->syscallsp
-// must always point to a valid stack frame. entersyscall below is the normal
-// entry point for syscalls, which obtains the SP and PC from the caller.
-#pragma textflag NOSPLIT
-void
-runtime·reentersyscall(uintptr pc, uintptr sp)
-{
- void (*fn)(void);
-
- // Disable preemption because during this function g is in Gsyscall status,
- // but can have inconsistent g->sched, do not let GC observe it.
- g->m->locks++;
-
- // Entersyscall must not call any function that might split/grow the stack.
- // (See details in comment above.)
- // Catch calls that might, by replacing the stack guard with something that
- // will trip any stack check and leaving a flag to tell newstack to die.
- g->stackguard0 = StackPreempt;
- g->throwsplit = 1;
-
- // Leave SP around for GC and traceback.
- save(pc, sp);
- g->syscallsp = sp;
- g->syscallpc = pc;
- runtime·casgstatus(g, Grunning, Gsyscall);
- if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
- fn = entersyscall_bad;
- runtime·onM(&fn);
- }
-
- if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomic
- fn = entersyscall_sysmon;
- runtime·onM(&fn);
- save(pc, sp);
- }
-
- g->m->mcache = nil;
- g->m->p->m = nil;
- runtime·atomicstore(&g->m->p->status, Psyscall);
- if(runtime·sched.gcwaiting) {
- fn = entersyscall_gcwait;
- runtime·onM(&fn);
- save(pc, sp);
- }
-
- // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
- // We set stackguard to StackPreempt so that first split stack check calls morestack.
- // Morestack detects this case and throws.
- g->stackguard0 = StackPreempt;
- g->m->locks--;
-}
-
-// Standard syscall entry used by the go syscall library and normal cgo calls.
-#pragma textflag NOSPLIT
-void
-·entersyscall(int32 dummy)
-{
- runtime·reentersyscall((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
-}
-
-static void
-entersyscall_bad(void)
-{
- G *gp;
-
- gp = g->m->curg;
- runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
- gp->syscallsp, gp->stack.lo, gp->stack.hi);
- runtime·throw("entersyscall");
-}
-
-static void
-entersyscall_sysmon(void)
-{
- runtime·lock(&runtime·sched.lock);
- if(runtime·atomicload(&runtime·sched.sysmonwait)) {
- runtime·atomicstore(&runtime·sched.sysmonwait, 0);
- runtime·notewakeup(&runtime·sched.sysmonnote);
- }
- runtime·unlock(&runtime·sched.lock);
-}
-
-static void
-entersyscall_gcwait(void)
-{
- runtime·lock(&runtime·sched.lock);
- if (runtime·sched.stopwait > 0 && runtime·cas(&g->m->p->status, Psyscall, Pgcstop)) {
- if(--runtime·sched.stopwait == 0)
- runtime·notewakeup(&runtime·sched.stopnote);
- }
- runtime·unlock(&runtime·sched.lock);
-}
-
-static void entersyscallblock_handoff(void);
-
-// The same as runtime·entersyscall(), but with a hint that the syscall is blocking.
-#pragma textflag NOSPLIT
-void
-·entersyscallblock(int32 dummy)
-{
- void (*fn)(void);
-
- g->m->locks++; // see comment in entersyscall
- g->throwsplit = 1;
- g->stackguard0 = StackPreempt; // see comment in entersyscall
-
- // Leave SP around for GC and traceback.
- save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
- g->syscallsp = g->sched.sp;
- g->syscallpc = g->sched.pc;
- runtime·casgstatus(g, Grunning, Gsyscall);
- if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
- fn = entersyscall_bad;
- runtime·onM(&fn);
- }
-
- fn = entersyscallblock_handoff;
- runtime·onM(&fn);
-
- // Resave for traceback during blocked call.
- save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
-
- g->m->locks--;
-}
-
-static void
-entersyscallblock_handoff(void)
-{
- handoffp(releasep());
-}
-
-// The goroutine g exited its system call.
-// Arrange for it to run on a cpu again.
-// This is called only from the go syscall library, not
-// from the low-level system calls used by the runtime.
-#pragma textflag NOSPLIT
-void
-·exitsyscall(int32 dummy)
-{
- void (*fn)(G*);
-
- g->m->locks++; // see comment in entersyscall
-
- if(runtime·getcallersp(&dummy) > g->syscallsp)
- runtime·throw("exitsyscall: syscall frame is no longer valid");
-
- g->waitsince = 0;
- if(exitsyscallfast()) {
- // There's a cpu for us, so we can run.
- g->m->p->syscalltick++;
- // We need to cas the status and scan before resuming...
- runtime·casgstatus(g, Gsyscall, Grunning);
-
- // Garbage collector isn't running (since we are),
- // so okay to clear syscallsp.
- g->syscallsp = (uintptr)nil;
- g->m->locks--;
- if(g->preempt) {
- // restore the preemption request in case we've cleared it in newstack
- g->stackguard0 = StackPreempt;
- } else {
- // otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock
- g->stackguard0 = g->stack.lo + StackGuard;
- }
- g->throwsplit = 0;
- return;
- }
-
- g->m->locks--;
-
- // Call the scheduler.
- fn = exitsyscall0;
- runtime·mcall(&fn);
-
- // Scheduler returned, so we're allowed to run now.
- // Delete the syscallsp information that we left for
- // the garbage collector during the system call.
- // Must wait until now because until gosched returns
- // we don't know for sure that the garbage collector
- // is not running.
- g->syscallsp = (uintptr)nil;
- g->m->p->syscalltick++;
- g->throwsplit = 0;
-}
-
-static void exitsyscallfast_pidle(void);
-
-#pragma textflag NOSPLIT
-static bool
-exitsyscallfast(void)
-{
- void (*fn)(void);
-
- // Freezetheworld sets stopwait but does not retake P's.
- if(runtime·sched.stopwait) {
- g->m->mcache = nil;
- g->m->p = nil;
- return false;
- }
-
- // Try to re-acquire the last P.
- if(g->m->p && g->m->p->status == Psyscall && runtime·cas(&g->m->p->status, Psyscall, Prunning)) {
- // There's a cpu for us, so we can run.
- g->m->mcache = g->m->p->mcache;
- g->m->p->m = g->m;
- return true;
- }
- // Try to get any other idle P.
- g->m->mcache = nil;
- g->m->p = nil;
- if(runtime·sched.pidle) {
- fn = exitsyscallfast_pidle;
- runtime·onM(&fn);
- if(g->m->scalararg[0]) {
- g->m->scalararg[0] = 0;
- return true;
- }
- }
- return false;
-}
-
-static void
-exitsyscallfast_pidle(void)
-{
- P *p;
-
- runtime·lock(&runtime·sched.lock);
- p = pidleget();
- if(p && runtime·atomicload(&runtime·sched.sysmonwait)) {
- runtime·atomicstore(&runtime·sched.sysmonwait, 0);
- runtime·notewakeup(&runtime·sched.sysmonnote);
- }
- runtime·unlock(&runtime·sched.lock);
- if(p) {
- acquirep(p);
- g->m->scalararg[0] = 1;
- } else
- g->m->scalararg[0] = 0;
-}
-
-// runtime·exitsyscall slow path on g0.
-// Failed to acquire P, enqueue gp as runnable.
-static void
-exitsyscall0(G *gp)
-{
- P *p;
-
- runtime·casgstatus(gp, Gsyscall, Grunnable);
- dropg();
- runtime·lock(&runtime·sched.lock);
- p = pidleget();
- if(p == nil)
- globrunqput(gp);
- else if(runtime·atomicload(&runtime·sched.sysmonwait)) {
- runtime·atomicstore(&runtime·sched.sysmonwait, 0);
- runtime·notewakeup(&runtime·sched.sysmonnote);
- }
- runtime·unlock(&runtime·sched.lock);
- if(p) {
- acquirep(p);
- execute(gp); // Never returns.
- }
- if(g->m->lockedg) {
- // Wait until another thread schedules gp and so m again.
- stoplockedm();
- execute(gp); // Never returns.
- }
- stopm();
- schedule(); // Never returns.
-}
-
-static void
-beforefork(void)
-{
- G *gp;
-
- gp = g->m->curg;
- // Fork can hang if preempted with signals frequently enough (see issue 5517).
- // Ensure that we stay on the same M where we disable profiling.
- gp->m->locks++;
- if(gp->m->profilehz != 0)
- runtime·resetcpuprofiler(0);
-
- // This function is called before fork in syscall package.
- // Code between fork and exec must not allocate memory nor even try to grow stack.
- // Here we spoil g->stackguard to reliably detect any attempts to grow stack.
- // runtime_AfterFork will undo this in parent process, but not in child.
- gp->stackguard0 = StackFork;
-}
-
-// Called from syscall package before fork.
-#pragma textflag NOSPLIT
-void
-syscall·runtime_BeforeFork(void)
-{
- void (*fn)(void);
-
- fn = beforefork;
- runtime·onM(&fn);
-}
-
-static void
-afterfork(void)
-{
- int32 hz;
- G *gp;
-
- gp = g->m->curg;
- // See the comment in runtime_BeforeFork.
- gp->stackguard0 = gp->stack.lo + StackGuard;
-
- hz = runtime·sched.profilehz;
- if(hz != 0)
- runtime·resetcpuprofiler(hz);
- gp->m->locks--;
-}
-
-// Called from syscall package after fork in parent.
-#pragma textflag NOSPLIT
-void
-syscall·runtime_AfterFork(void)
-{
- void (*fn)(void);
-
- fn = afterfork;
- runtime·onM(&fn);
-}
-
-// Hook used by runtime·malg to call runtime·stackalloc on the
-// scheduler stack. This exists because runtime·stackalloc insists
-// on being called on the scheduler stack, to avoid trying to grow
-// the stack while allocating a new stack segment.
-static void
-mstackalloc(G *gp)
-{
- G *newg;
- uintptr size;
-
- newg = g->m->ptrarg[0];
- size = g->m->scalararg[0];
-
- newg->stack = runtime·stackalloc(size);
-
- runtime·gogo(&gp->sched);
-}
-
-// Allocate a new g, with a stack big enough for stacksize bytes.
-G*
-runtime·malg(int32 stacksize)
-{
- G *newg;
- void (*fn)(G*);
-
- newg = allocg();
- if(stacksize >= 0) {
- stacksize = runtime·round2(StackSystem + stacksize);
- if(g == g->m->g0) {
- // running on scheduler stack already.
- newg->stack = runtime·stackalloc(stacksize);
- } else {
- // have to call stackalloc on scheduler stack.
- g->m->scalararg[0] = stacksize;
- g->m->ptrarg[0] = newg;
- fn = mstackalloc;
- runtime·mcall(&fn);
- g->m->ptrarg[0] = nil;
- }
- newg->stackguard0 = newg->stack.lo + StackGuard;
- newg->stackguard1 = ~(uintptr)0;
- }
- return newg;
-}
-
-static void
-newproc_m(void)
-{
- byte *argp;
- void *callerpc;
- FuncVal *fn;
- int32 siz;
-
- siz = g->m->scalararg[0];
- callerpc = (void*)g->m->scalararg[1];
- argp = g->m->ptrarg[0];
- fn = (FuncVal*)g->m->ptrarg[1];
-
- runtime·newproc1(fn, argp, siz, 0, callerpc);
- g->m->ptrarg[0] = nil;
- g->m->ptrarg[1] = nil;
-}
-
-// Create a new g running fn with siz bytes of arguments.
-// Put it on the queue of g's waiting to run.
-// The compiler turns a go statement into a call to this.
-// Cannot split the stack because it assumes that the arguments
-// are available sequentially after &fn; they would not be
-// copied if a stack split occurred.
-#pragma textflag NOSPLIT
-void
-runtime·newproc(int32 siz, FuncVal* fn, ...)
-{
- byte *argp;
- void (*mfn)(void);
-
- if(thechar == '5' || thechar == '9')
- argp = (byte*)(&fn+2); // skip caller's saved LR
- else
- argp = (byte*)(&fn+1);
-
- g->m->locks++;
- g->m->scalararg[0] = siz;
- g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
- g->m->ptrarg[0] = argp;
- g->m->ptrarg[1] = fn;
- mfn = newproc_m;
- runtime·onM(&mfn);
- g->m->locks--;
-}
-
-void runtime·main(void);
-
-// Create a new g running fn with narg bytes of arguments starting
-// at argp and returning nret bytes of results. callerpc is the
-// address of the go statement that created this. The new g is put
-// on the queue of g's waiting to run.
-G*
-runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
-{
- byte *sp;
- G *newg;
- P *p;
- int32 siz;
-
- if(fn == nil) {
- g->m->throwing = -1; // do not dump full stacks
- runtime·throw("go of nil func value");
- }
- g->m->locks++; // disable preemption because it can be holding p in a local var
- siz = narg + nret;
- siz = (siz+7) & ~7;
-
- // We could allocate a larger initial stack if necessary.
- // Not worth it: this is almost always an error.
- // 4*sizeof(uintreg): extra space added below
- // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
- if(siz >= StackMin - 4*sizeof(uintreg) - sizeof(uintreg))
- runtime·throw("runtime.newproc: function arguments too large for new goroutine");
-
- p = g->m->p;
- if((newg = gfget(p)) == nil) {
- newg = runtime·malg(StackMin);
- runtime·casgstatus(newg, Gidle, Gdead);
- runtime·allgadd(newg); // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
- }
- if(newg->stack.hi == 0)
- runtime·throw("newproc1: newg missing stack");
-
- if(runtime·readgstatus(newg) != Gdead)
- runtime·throw("newproc1: new g is not Gdead");
-
- sp = (byte*)newg->stack.hi;
- sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
- sp -= siz;
- runtime·memmove(sp, argp, narg);
- if(thechar == '5' || thechar == '9') {
- // caller's LR
- sp -= sizeof(void*);
- *(void**)sp = nil;
- }
-
- runtime·memclr((byte*)&newg->sched, sizeof newg->sched);
- newg->sched.sp = (uintptr)sp;
- newg->sched.pc = (uintptr)runtime·goexit + PCQuantum; // +PCQuantum so that previous instruction is in same function
- newg->sched.g = newg;
- runtime·gostartcallfn(&newg->sched, fn);
- newg->gopc = (uintptr)callerpc;
- runtime·casgstatus(newg, Gdead, Grunnable);
-
- if(p->goidcache == p->goidcacheend) {
- // Sched.goidgen is the last allocated id,
- // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
- // At startup sched.goidgen=0, so main goroutine receives goid=1.
- p->goidcache = runtime·xadd64(&runtime·sched.goidgen, GoidCacheBatch);
- p->goidcache -= GoidCacheBatch - 1;
- p->goidcacheend = p->goidcache + GoidCacheBatch;
- }
- newg->goid = p->goidcache++;
- if(raceenabled)
- newg->racectx = runtime·racegostart((void*)callerpc);
- runqput(p, newg);
-
- if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic
- wakep();
- g->m->locks--;
- if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
- g->stackguard0 = StackPreempt;
- return newg;
-}
-
-// Put on gfree list.
-// If local list is too long, transfer a batch to the global list.
-static void
-gfput(P *p, G *gp)
-{
- uintptr stksize;
-
- if(runtime·readgstatus(gp) != Gdead)
- runtime·throw("gfput: bad status (not Gdead)");
-
- stksize = gp->stack.hi - gp->stack.lo;
-
- if(stksize != FixedStack) {
- // non-standard stack size - free it.
- runtime·stackfree(gp->stack);
- gp->stack.lo = 0;
- gp->stack.hi = 0;
- gp->stackguard0 = 0;
- }
- gp->schedlink = p->gfree;
- p->gfree = gp;
- p->gfreecnt++;
- if(p->gfreecnt >= 64) {
- runtime·lock(&runtime·sched.gflock);
- while(p->gfreecnt >= 32) {
- p->gfreecnt--;
- gp = p->gfree;
- p->gfree = gp->schedlink;
- gp->schedlink = runtime·sched.gfree;
- runtime·sched.gfree = gp;
- runtime·sched.ngfree++;
- }
- runtime·unlock(&runtime·sched.gflock);
- }
-}
-
-// Get from gfree list.
-// If local list is empty, grab a batch from global list.
-static G*
-gfget(P *p)
-{
- G *gp;
- void (*fn)(G*);
-
-retry:
- gp = p->gfree;
- if(gp == nil && runtime·sched.gfree) {
- runtime·lock(&runtime·sched.gflock);
- while(p->gfreecnt < 32 && runtime·sched.gfree != nil) {
- p->gfreecnt++;
- gp = runtime·sched.gfree;
- runtime·sched.gfree = gp->schedlink;
- runtime·sched.ngfree--;
- gp->schedlink = p->gfree;
- p->gfree = gp;
- }
- runtime·unlock(&runtime·sched.gflock);
- goto retry;
- }
- if(gp) {
- p->gfree = gp->schedlink;
- p->gfreecnt--;
-
- if(gp->stack.lo == 0) {
- // Stack was deallocated in gfput. Allocate a new one.
- if(g == g->m->g0) {
- gp->stack = runtime·stackalloc(FixedStack);
- } else {
- g->m->scalararg[0] = FixedStack;
- g->m->ptrarg[0] = gp;
- fn = mstackalloc;
- runtime·mcall(&fn);
- g->m->ptrarg[0] = nil;
- }
- gp->stackguard0 = gp->stack.lo + StackGuard;
- } else {
- if(raceenabled)
- runtime·racemalloc((void*)gp->stack.lo, gp->stack.hi - gp->stack.lo);
- }
- }
- return gp;
-}
-
-// Purge all cached G's from gfree list to the global list.
-static void
-gfpurge(P *p)
-{
- G *gp;
-
- runtime·lock(&runtime·sched.gflock);
- while(p->gfreecnt != 0) {
- p->gfreecnt--;
- gp = p->gfree;
- p->gfree = gp->schedlink;
- gp->schedlink = runtime·sched.gfree;
- runtime·sched.gfree = gp;
- runtime·sched.ngfree++;
- }
- runtime·unlock(&runtime·sched.gflock);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·Breakpoint(void)
-{
- runtime·breakpoint();
-}
-
-// lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
-// after they modify m->locked. Do not allow preemption during this call,
-// or else the m might be different in this function than in the caller.
-#pragma textflag NOSPLIT
-static void
-lockOSThread(void)
-{
- g->m->lockedg = g;
- g->lockedm = g->m;
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·LockOSThread(void)
-{
- g->m->locked |= LockExternal;
- lockOSThread();
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·lockOSThread(void)
-{
- g->m->locked += LockInternal;
- lockOSThread();
-}
-
-
-// unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below
-// after they update m->locked. Do not allow preemption during this call,
-// or else the m might be in different in this function than in the caller.
-#pragma textflag NOSPLIT
-static void
-unlockOSThread(void)
-{
- if(g->m->locked != 0)
- return;
- g->m->lockedg = nil;
- g->lockedm = nil;
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·UnlockOSThread(void)
-{
- g->m->locked &= ~LockExternal;
- unlockOSThread();
-}
-
-static void badunlockOSThread(void);
-
-#pragma textflag NOSPLIT
-void
-runtime·unlockOSThread(void)
-{
- void (*fn)(void);
-
- if(g->m->locked < LockInternal) {
- fn = badunlockOSThread;
- runtime·onM(&fn);
- }
- g->m->locked -= LockInternal;
- unlockOSThread();
-}
-
-static void
-badunlockOSThread(void)
-{
- runtime·throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
-}
-
-#pragma textflag NOSPLIT
-int32
-runtime·gcount(void)
-{
- P *p, **pp;
- int32 n;
-
- n = runtime·allglen - runtime·sched.ngfree;
- for(pp=runtime·allp; p=*pp; pp++)
- n -= p->gfreecnt;
- // All these variables can be changed concurrently, so the result can be inconsistent.
- // But at least the current goroutine is running.
- if(n < 1)
- n = 1;
- return n;
-}
-
-int32
-runtime·mcount(void)
-{
- return runtime·sched.mcount;
-}
-
-static struct ProfState {
- uint32 lock;
- int32 hz;
-} prof;
-
-static void System(void) { System(); }
-static void ExternalCode(void) { ExternalCode(); }
-static void GC(void) { GC(); }
-
-extern void runtime·cpuproftick(uintptr*, int32);
-extern byte runtime·etext[];
-
-// Called if we receive a SIGPROF signal.
-void
-runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
-{
- int32 n;
- bool traceback;
- // Do not use global m in this function, use mp instead.
- // On windows one m is sending reports about all the g's, so m means a wrong thing.
- byte m;
- uintptr stk[100];
-
- m = 0;
- USED(m);
-
- if(prof.hz == 0)
- return;
-
- // Profiling runs concurrently with GC, so it must not allocate.
- mp->mallocing++;
-
- // Define that a "user g" is a user-created goroutine, and a "system g"
- // is one that is m->g0 or m->gsignal. We've only made sure that we
- // can unwind user g's, so exclude the system g's.
- //
- // It is not quite as easy as testing gp == m->curg (the current user g)
- // because we might be interrupted for profiling halfway through a
- // goroutine switch. The switch involves updating three (or four) values:
- // g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
- // because once it gets updated the new g is running.
- //
- // When switching from a user g to a system g, LR is not considered live,
- // so the update only affects g, SP, and PC. Since PC must be last, there
- // the possible partial transitions in ordinary execution are (1) g alone is updated,
- // (2) both g and SP are updated, and (3) SP alone is updated.
- // If g is updated, we'll see a system g and not look closer.
- // If SP alone is updated, we can detect the partial transition by checking
- // whether the SP is within g's stack bounds. (We could also require that SP
- // be changed only after g, but the stack bounds check is needed by other
- // cases, so there is no need to impose an additional requirement.)
- //
- // There is one exceptional transition to a system g, not in ordinary execution.
- // When a signal arrives, the operating system starts the signal handler running
- // with an updated PC and SP. The g is updated last, at the beginning of the
- // handler. There are two reasons this is okay. First, until g is updated the
- // g and SP do not match, so the stack bounds check detects the partial transition.
- // Second, signal handlers currently run with signals disabled, so a profiling
- // signal cannot arrive during the handler.
- //
- // When switching from a system g to a user g, there are three possibilities.
- //
- // First, it may be that the g switch has no PC update, because the SP
- // either corresponds to a user g throughout (as in runtime.asmcgocall)
- // or because it has been arranged to look like a user g frame
- // (as in runtime.cgocallback_gofunc). In this case, since the entire
- // transition is a g+SP update, a partial transition updating just one of
- // those will be detected by the stack bounds check.
- //
- // Second, when returning from a signal handler, the PC and SP updates
- // are performed by the operating system in an atomic update, so the g
- // update must be done before them. The stack bounds check detects
- // the partial transition here, and (again) signal handlers run with signals
- // disabled, so a profiling signal cannot arrive then anyway.
- //
- // Third, the common case: it may be that the switch updates g, SP, and PC
- // separately, as in runtime.gogo.
- //
- // Because runtime.gogo is the only instance, we check whether the PC lies
- // within that function, and if so, not ask for a traceback. This approach
- // requires knowing the size of the runtime.gogo function, which we
- // record in arch_*.h and check in runtime_test.go.
- //
- // There is another apparently viable approach, recorded here in case
- // the "PC within runtime.gogo" check turns out not to be usable.
- // It would be possible to delay the update of either g or SP until immediately
- // before the PC update instruction. Then, because of the stack bounds check,
- // the only problematic interrupt point is just before that PC update instruction,
- // and the sigprof handler can detect that instruction and simulate stepping past
- // it in order to reach a consistent state. On ARM, the update of g must be made
- // in two places (in R10 and also in a TLS slot), so the delayed update would
- // need to be the SP update. The sigprof handler must read the instruction at
- // the current PC and if it was the known instruction (for example, JMP BX or
- // MOV R2, PC), use that other register in place of the PC value.
- // The biggest drawback to this solution is that it requires that we can tell
- // whether it's safe to read from the memory pointed at by PC.
- // In a correct program, we can test PC == nil and otherwise read,
- // but if a profiling signal happens at the instant that a program executes
- // a bad jump (before the program manages to handle the resulting fault)
- // the profiling handler could fault trying to read nonexistent memory.
- //
- // To recap, there are no constraints on the assembly being used for the
- // transition. We simply require that g and SP match and that the PC is not
- // in runtime.gogo.
- traceback = true;
- if(gp == nil || gp != mp->curg ||
- (uintptr)sp < gp->stack.lo || gp->stack.hi < (uintptr)sp ||
- ((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGogoBytes))
- traceback = false;
-
- n = 0;
- if(traceback)
- n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, stk, nelem(stk), nil, nil, TraceTrap);
- if(!traceback || n <= 0) {
- // Normal traceback is impossible or has failed.
- // See if it falls into several common cases.
- n = 0;
- if(mp->ncgo > 0 && mp->curg != nil &&
- mp->curg->syscallpc != 0 && mp->curg->syscallsp != 0) {
- // Cgo, we can't unwind and symbolize arbitrary C code,
- // so instead collect Go stack that leads to the cgo call.
- // This is especially important on windows, since all syscalls are cgo calls.
- n = runtime·gentraceback(mp->curg->syscallpc, mp->curg->syscallsp, 0, mp->curg, 0, stk, nelem(stk), nil, nil, 0);
- }
-#ifdef GOOS_windows
- if(n == 0 && mp->libcallg != nil && mp->libcallpc != 0 && mp->libcallsp != 0) {
- // Libcall, i.e. runtime syscall on windows.
- // Collect Go stack that leads to the call.
- n = runtime·gentraceback(mp->libcallpc, mp->libcallsp, 0, mp->libcallg, 0, stk, nelem(stk), nil, nil, 0);
- }
-#endif
- if(n == 0) {
- // If all of the above has failed, account it against abstract "System" or "GC".
- n = 2;
- // "ExternalCode" is better than "etext".
- if((uintptr)pc > (uintptr)runtime·etext)
- pc = (byte*)ExternalCode + PCQuantum;
- stk[0] = (uintptr)pc;
- if(mp->gcing || mp->helpgc)
- stk[1] = (uintptr)GC + PCQuantum;
- else
- stk[1] = (uintptr)System + PCQuantum;
- }
- }
-
- if(prof.hz != 0) {
- // Simple cas-lock to coordinate with setcpuprofilerate.
- while(!runtime·cas(&prof.lock, 0, 1))
- runtime·osyield();
- if(prof.hz != 0)
- runtime·cpuproftick(stk, n);
- runtime·atomicstore(&prof.lock, 0);
- }
- mp->mallocing--;
-}
-
-// Arrange to call fn with a traceback hz times a second.
-void
-runtime·setcpuprofilerate_m(void)
-{
- int32 hz;
-
- hz = g->m->scalararg[0];
- g->m->scalararg[0] = 0;
-
- // Force sane arguments.
- if(hz < 0)
- hz = 0;
-
- // Disable preemption, otherwise we can be rescheduled to another thread
- // that has profiling enabled.
- g->m->locks++;
-
- // Stop profiler on this thread so that it is safe to lock prof.
- // if a profiling signal came in while we had prof locked,
- // it would deadlock.
- runtime·resetcpuprofiler(0);
-
- while(!runtime·cas(&prof.lock, 0, 1))
- runtime·osyield();
- prof.hz = hz;
- runtime·atomicstore(&prof.lock, 0);
-
- runtime·lock(&runtime·sched.lock);
- runtime·sched.profilehz = hz;
- runtime·unlock(&runtime·sched.lock);
-
- if(hz != 0)
- runtime·resetcpuprofiler(hz);
-
- g->m->locks--;
-}
-
-P *runtime·newP(void);
-
-// Change number of processors. The world is stopped, sched is locked.
-// gcworkbufs are not being modified by either the GC or
-// the write barrier code.
-static void
-procresize(int32 new)
-{
- int32 i, old;
- bool empty;
- G *gp;
- P *p;
-
- old = runtime·gomaxprocs;
- if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
- runtime·throw("procresize: invalid arg");
- // initialize new P's
- for(i = 0; i < new; i++) {
- p = runtime·allp[i];
- if(p == nil) {
- p = runtime·newP();
- p->id = i;
- p->status = Pgcstop;
- runtime·atomicstorep(&runtime·allp[i], p);
- }
- if(p->mcache == nil) {
- if(old==0 && i==0)
- p->mcache = g->m->mcache; // bootstrap
- else
- p->mcache = runtime·allocmcache();
- }
- }
-
- // redistribute runnable G's evenly
- // collect all runnable goroutines in global queue preserving FIFO order
- // FIFO order is required to ensure fairness even during frequent GCs
- // see http://golang.org/issue/7126
- empty = false;
- while(!empty) {
- empty = true;
- for(i = 0; i < old; i++) {
- p = runtime·allp[i];
- if(p->runqhead == p->runqtail)
- continue;
- empty = false;
- // pop from tail of local queue
- p->runqtail--;
- gp = p->runq[p->runqtail%nelem(p->runq)];
- // push onto head of global queue
- gp->schedlink = runtime·sched.runqhead;
- runtime·sched.runqhead = gp;
- if(runtime·sched.runqtail == nil)
- runtime·sched.runqtail = gp;
- runtime·sched.runqsize++;
- }
- }
- // fill local queues with at most nelem(p->runq)/2 goroutines
- // start at 1 because current M already executes some G and will acquire allp[0] below,
- // so if we have a spare G we want to put it into allp[1].
- for(i = 1; i < new * nelem(p->runq)/2 && runtime·sched.runqsize > 0; i++) {
- gp = runtime·sched.runqhead;
- runtime·sched.runqhead = gp->schedlink;
- if(runtime·sched.runqhead == nil)
- runtime·sched.runqtail = nil;
- runtime·sched.runqsize--;
- runqput(runtime·allp[i%new], gp);
- }
-
- // free unused P's
- for(i = new; i < old; i++) {
- p = runtime·allp[i];
- runtime·freemcache(p->mcache);
- p->mcache = nil;
- gfpurge(p);
- p->status = Pdead;
- // can't free P itself because it can be referenced by an M in syscall
- }
-
- if(g->m->p)
- g->m->p->m = nil;
- g->m->p = nil;
- g->m->mcache = nil;
- p = runtime·allp[0];
- p->m = nil;
- p->status = Pidle;
- acquirep(p);
- for(i = new-1; i > 0; i--) {
- p = runtime·allp[i];
- p->status = Pidle;
- pidleput(p);
- }
- runtime·atomicstore((uint32*)&runtime·gomaxprocs, new);
-}
-
-// Associate p and the current m.
-static void
-acquirep(P *p)
-{
- if(g->m->p || g->m->mcache)
- runtime·throw("acquirep: already in go");
- if(p->m || p->status != Pidle) {
- runtime·printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p->m ? p->m->id : 0, p->status);
- runtime·throw("acquirep: invalid p state");
- }
- g->m->mcache = p->mcache;
- g->m->p = p;
- p->m = g->m;
- p->status = Prunning;
-}
-
-// Disassociate p and the current m.
-static P*
-releasep(void)
-{
- P *p;
-
- if(g->m->p == nil || g->m->mcache == nil)
- runtime·throw("releasep: invalid arg");
- p = g->m->p;
- if(p->m != g->m || p->mcache != g->m->mcache || p->status != Prunning) {
- runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->mcache=%p p->status=%d\n",
- g->m, g->m->p, p->m, g->m->mcache, p->mcache, p->status);
- runtime·throw("releasep: invalid p state");
- }
- g->m->p = nil;
- g->m->mcache = nil;
- p->m = nil;
- p->status = Pidle;
- return p;
-}
-
-static void
-incidlelocked(int32 v)
-{
- runtime·lock(&runtime·sched.lock);
- runtime·sched.nmidlelocked += v;
- if(v > 0)
- checkdead();
- runtime·unlock(&runtime·sched.lock);
-}
-
-// Check for deadlock situation.
-// The check is based on number of running M's, if 0 -> deadlock.
-static void
-checkdead(void)
-{
- G *gp;
- P *p;
- M *mp;
- int32 run, grunning, s;
- uintptr i;
-
- // -1 for sysmon
- run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidlelocked - 1;
- if(run > 0)
- return;
- // If we are dying because of a signal caught on an already idle thread,
- // freezetheworld will cause all running threads to block.
- // And runtime will essentially enter into deadlock state,
- // except that there is a thread that will call runtime·exit soon.
- if(runtime·panicking > 0)
- return;
- if(run < 0) {
- runtime·printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
- runtime·sched.nmidle, runtime·sched.nmidlelocked, runtime·sched.mcount);
- runtime·throw("checkdead: inconsistent counts");
- }
- grunning = 0;
- runtime·lock(&runtime·allglock);
- for(i = 0; i < runtime·allglen; i++) {
- gp = runtime·allg[i];
- if(gp->issystem)
- continue;
- s = runtime·readgstatus(gp);
- switch(s&~Gscan) {
- case Gwaiting:
- grunning++;
- break;
- case Grunnable:
- case Grunning:
- case Gsyscall:
- runtime·unlock(&runtime·allglock);
- runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
- runtime·throw("checkdead: runnable g");
- break;
- }
- }
- runtime·unlock(&runtime·allglock);
- if(grunning == 0) // possible if main goroutine calls runtime·Goexit()
- runtime·throw("no goroutines (main called runtime.Goexit) - deadlock!");
-
- // Maybe jump time forward for playground.
- if((gp = runtime·timejump()) != nil) {
- runtime·casgstatus(gp, Gwaiting, Grunnable);
- globrunqput(gp);
- p = pidleget();
- if(p == nil)
- runtime·throw("checkdead: no p for timer");
- mp = mget();
- if(mp == nil)
- newm(nil, p);
- else {
- mp->nextp = p;
- runtime·notewakeup(&mp->park);
- }
- return;
- }
-
- g->m->throwing = -1; // do not dump full stacks
- runtime·throw("all goroutines are asleep - deadlock!");
-}
-
-static void
-sysmon(void)
-{
- uint32 idle, delay, nscavenge;
- int64 now, unixnow, lastpoll, lasttrace, lastgc;
- int64 forcegcperiod, scavengelimit, lastscavenge, maxsleep;
- G *gp;
-
- // If we go two minutes without a garbage collection, force one to run.
- forcegcperiod = 2*60*1e9;
- // If a heap span goes unused for 5 minutes after a garbage collection,
- // we hand it back to the operating system.
- scavengelimit = 5*60*1e9;
- if(runtime·debug.scavenge > 0) {
- // Scavenge-a-lot for testing.
- forcegcperiod = 10*1e6;
- scavengelimit = 20*1e6;
- }
- lastscavenge = runtime·nanotime();
- nscavenge = 0;
- // Make wake-up period small enough for the sampling to be correct.
- maxsleep = forcegcperiod/2;
- if(scavengelimit < forcegcperiod)
- maxsleep = scavengelimit/2;
-
- lasttrace = 0;
- idle = 0; // how many cycles in succession we had not wokeup somebody
- delay = 0;
- for(;;) {
- if(idle == 0) // start with 20us sleep...
- delay = 20;
- else if(idle > 50) // start doubling the sleep after 1ms...
- delay *= 2;
- if(delay > 10*1000) // up to 10ms
- delay = 10*1000;
- runtime·usleep(delay);
- if(runtime·debug.schedtrace <= 0 &&
- (runtime·sched.gcwaiting || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs)) { // TODO: fast atomic
- runtime·lock(&runtime·sched.lock);
- if(runtime·atomicload(&runtime·sched.gcwaiting) || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs) {
- runtime·atomicstore(&runtime·sched.sysmonwait, 1);
- runtime·unlock(&runtime·sched.lock);
- runtime·notetsleep(&runtime·sched.sysmonnote, maxsleep);
- runtime·lock(&runtime·sched.lock);
- runtime·atomicstore(&runtime·sched.sysmonwait, 0);
- runtime·noteclear(&runtime·sched.sysmonnote);
- idle = 0;
- delay = 20;
- }
- runtime·unlock(&runtime·sched.lock);
- }
- // poll network if not polled for more than 10ms
- lastpoll = runtime·atomicload64(&runtime·sched.lastpoll);
- now = runtime·nanotime();
- unixnow = runtime·unixnanotime();
- if(lastpoll != 0 && lastpoll + 10*1000*1000 < now) {
- runtime·cas64(&runtime·sched.lastpoll, lastpoll, now);
- gp = runtime·netpoll(false); // non-blocking
- if(gp) {
- // Need to decrement number of idle locked M's
- // (pretending that one more is running) before injectglist.
- // Otherwise it can lead to the following situation:
- // injectglist grabs all P's but before it starts M's to run the P's,
- // another M returns from syscall, finishes running its G,
- // observes that there is no work to do and no other running M's
- // and reports deadlock.
- incidlelocked(-1);
- injectglist(gp);
- incidlelocked(1);
- }
- }
- // retake P's blocked in syscalls
- // and preempt long running G's
- if(retake(now))
- idle = 0;
- else
- idle++;
-
- // check if we need to force a GC
- lastgc = runtime·atomicload64(&mstats.last_gc);
- if(lastgc != 0 && unixnow - lastgc > forcegcperiod && runtime·atomicload(&runtime·forcegc.idle)) {
- runtime·lock(&runtime·forcegc.lock);
- runtime·forcegc.idle = 0;
- runtime·forcegc.g->schedlink = nil;
- injectglist(runtime·forcegc.g);
- runtime·unlock(&runtime·forcegc.lock);
- }
-
- // scavenge heap once in a while
- if(lastscavenge + scavengelimit/2 < now) {
- runtime·MHeap_Scavenge(nscavenge, now, scavengelimit);
- lastscavenge = now;
- nscavenge++;
- }
-
- if(runtime·debug.schedtrace > 0 && lasttrace + runtime·debug.schedtrace*1000000ll <= now) {
- lasttrace = now;
- runtime·schedtrace(runtime·debug.scheddetail);
- }
- }
-}
-
-typedef struct Pdesc Pdesc;
-struct Pdesc
-{
- uint32 schedtick;
- int64 schedwhen;
- uint32 syscalltick;
- int64 syscallwhen;
-};
-#pragma dataflag NOPTR
-static Pdesc pdesc[MaxGomaxprocs];
-
-static uint32
-retake(int64 now)
-{
- uint32 i, s, n;
- int64 t;
- P *p;
- Pdesc *pd;
-
- n = 0;
- for(i = 0; i < runtime·gomaxprocs; i++) {
- p = runtime·allp[i];
- if(p==nil)
- continue;
- pd = &pdesc[i];
- s = p->status;
- if(s == Psyscall) {
- // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
- t = p->syscalltick;
- if(pd->syscalltick != t) {
- pd->syscalltick = t;
- pd->syscallwhen = now;
- continue;
- }
- // On the one hand we don't want to retake Ps if there is no other work to do,
- // but on the other hand we want to retake them eventually
- // because they can prevent the sysmon thread from deep sleep.
- if(p->runqhead == p->runqtail &&
- runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0 &&
- pd->syscallwhen + 10*1000*1000 > now)
- continue;
- // Need to decrement number of idle locked M's
- // (pretending that one more is running) before the CAS.
- // Otherwise the M from which we retake can exit the syscall,
- // increment nmidle and report deadlock.
- incidlelocked(-1);
- if(runtime·cas(&p->status, s, Pidle)) {
- n++;
- handoffp(p);
- }
- incidlelocked(1);
- } else if(s == Prunning) {
- // Preempt G if it's running for more than 10ms.
- t = p->schedtick;
- if(pd->schedtick != t) {
- pd->schedtick = t;
- pd->schedwhen = now;
- continue;
- }
- if(pd->schedwhen + 10*1000*1000 > now)
- continue;
- preemptone(p);
- }
- }
- return n;
-}
-
-// Tell all goroutines that they have been preempted and they should stop.
-// This function is purely best-effort. It can fail to inform a goroutine if a
-// processor just started running it.
-// No locks need to be held.
-// Returns true if preemption request was issued to at least one goroutine.
-static bool
-preemptall(void)
-{
- P *p;
- int32 i;
- bool res;
-
- res = false;
- for(i = 0; i < runtime·gomaxprocs; i++) {
- p = runtime·allp[i];
- if(p == nil || p->status != Prunning)
- continue;
- res |= preemptone(p);
- }
- return res;
-}
-
-// Tell the goroutine running on processor P to stop.
-// This function is purely best-effort. It can incorrectly fail to inform the
-// goroutine. It can send inform the wrong goroutine. Even if it informs the
-// correct goroutine, that goroutine might ignore the request if it is
-// simultaneously executing runtime·newstack.
-// No lock needs to be held.
-// Returns true if preemption request was issued.
-// The actual preemption will happen at some point in the future
-// and will be indicated by the gp->status no longer being
-// Grunning
-static bool
-preemptone(P *p)
-{
- M *mp;
- G *gp;
-
- mp = p->m;
- if(mp == nil || mp == g->m)
- return false;
- gp = mp->curg;
- if(gp == nil || gp == mp->g0)
- return false;
- gp->preempt = true;
- // Every call in a go routine checks for stack overflow by
- // comparing the current stack pointer to gp->stackguard0.
- // Setting gp->stackguard0 to StackPreempt folds
- // preemption into the normal stack overflow check.
- gp->stackguard0 = StackPreempt;
- return true;
-}
-
-void
-runtime·schedtrace(bool detailed)
-{
- static int64 starttime;
- int64 now;
- int64 id1, id2, id3;
- int32 i, t, h;
- uintptr gi;
- int8 *fmt;
- M *mp, *lockedm;
- G *gp, *lockedg;
- P *p;
-
- now = runtime·nanotime();
- if(starttime == 0)
- starttime = now;
-
- runtime·lock(&runtime·sched.lock);
- runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d spinningthreads=%d idlethreads=%d runqueue=%d",
- (now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidle, runtime·sched.mcount,
- runtime·sched.nmspinning, runtime·sched.nmidle, runtime·sched.runqsize);
- if(detailed) {
- runtime·printf(" gcwaiting=%d nmidlelocked=%d stopwait=%d sysmonwait=%d\n",
- runtime·sched.gcwaiting, runtime·sched.nmidlelocked,
- runtime·sched.stopwait, runtime·sched.sysmonwait);
- }
- // We must be careful while reading data from P's, M's and G's.
- // Even if we hold schedlock, most data can be changed concurrently.
- // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
- for(i = 0; i < runtime·gomaxprocs; i++) {
- p = runtime·allp[i];
- if(p == nil)
- continue;
- mp = p->m;
- h = runtime·atomicload(&p->runqhead);
- t = runtime·atomicload(&p->runqtail);
- if(detailed)
- runtime·printf(" P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d gfreecnt=%d\n",
- i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
- else {
- // In non-detailed mode format lengths of per-P run queues as:
- // [len1 len2 len3 len4]
- fmt = " %d";
- if(runtime·gomaxprocs == 1)
- fmt = " [%d]\n";
- else if(i == 0)
- fmt = " [%d";
- else if(i == runtime·gomaxprocs-1)
- fmt = " %d]\n";
- runtime·printf(fmt, t-h);
- }
- }
- if(!detailed) {
- runtime·unlock(&runtime·sched.lock);
- return;
- }
- for(mp = runtime·allm; mp; mp = mp->alllink) {
- p = mp->p;
- gp = mp->curg;
- lockedg = mp->lockedg;
- id1 = -1;
- if(p)
- id1 = p->id;
- id2 = -1;
- if(gp)
- id2 = gp->goid;
- id3 = -1;
- if(lockedg)
- id3 = lockedg->goid;
- runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gcing=%d"
- " locks=%d dying=%d helpgc=%d spinning=%d blocked=%d lockedg=%D\n",
- mp->id, id1, id2,
- mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
- mp->spinning, g->m->blocked, id3);
- }
- runtime·lock(&runtime·allglock);
- for(gi = 0; gi < runtime·allglen; gi++) {
- gp = runtime·allg[gi];
- mp = gp->m;
- lockedm = gp->lockedm;
- runtime·printf(" G%D: status=%d(%S) m=%d lockedm=%d\n",
- gp->goid, runtime·readgstatus(gp), gp->waitreason, mp ? mp->id : -1,
- lockedm ? lockedm->id : -1);
- }
- runtime·unlock(&runtime·allglock);
- runtime·unlock(&runtime·sched.lock);
-}
-
-// Put mp on midle list.
-// Sched must be locked.
-static void
-mput(M *mp)
-{
- mp->schedlink = runtime·sched.midle;
- runtime·sched.midle = mp;
- runtime·sched.nmidle++;
- checkdead();
-}
-
-// Try to get an m from midle list.
-// Sched must be locked.
-static M*
-mget(void)
-{
- M *mp;
-
- if((mp = runtime·sched.midle) != nil){
- runtime·sched.midle = mp->schedlink;
- runtime·sched.nmidle--;
- }
- return mp;
-}
-
-// Put gp on the global runnable queue.
-// Sched must be locked.
-static void
-globrunqput(G *gp)
-{
- gp->schedlink = nil;
- if(runtime·sched.runqtail)
- runtime·sched.runqtail->schedlink = gp;
- else
- runtime·sched.runqhead = gp;
- runtime·sched.runqtail = gp;
- runtime·sched.runqsize++;
-}
-
-// Put a batch of runnable goroutines on the global runnable queue.
-// Sched must be locked.
-static void
-globrunqputbatch(G *ghead, G *gtail, int32 n)
-{
- gtail->schedlink = nil;
- if(runtime·sched.runqtail)
- runtime·sched.runqtail->schedlink = ghead;
- else
- runtime·sched.runqhead = ghead;
- runtime·sched.runqtail = gtail;
- runtime·sched.runqsize += n;
-}
-
-// Try get a batch of G's from the global runnable queue.
-// Sched must be locked.
-static G*
-globrunqget(P *p, int32 max)
-{
- G *gp, *gp1;
- int32 n;
-
- if(runtime·sched.runqsize == 0)
- return nil;
- n = runtime·sched.runqsize/runtime·gomaxprocs+1;
- if(n > runtime·sched.runqsize)
- n = runtime·sched.runqsize;
- if(max > 0 && n > max)
- n = max;
- if(n > nelem(p->runq)/2)
- n = nelem(p->runq)/2;
- runtime·sched.runqsize -= n;
- if(runtime·sched.runqsize == 0)
- runtime·sched.runqtail = nil;
- gp = runtime·sched.runqhead;
- runtime·sched.runqhead = gp->schedlink;
- n--;
- while(n--) {
- gp1 = runtime·sched.runqhead;
- runtime·sched.runqhead = gp1->schedlink;
- runqput(p, gp1);
- }
- return gp;
-}
-
-// Put p to on pidle list.
-// Sched must be locked.
-static void
-pidleput(P *p)
-{
- p->link = runtime·sched.pidle;
- runtime·sched.pidle = p;
- runtime·xadd(&runtime·sched.npidle, 1); // TODO: fast atomic
-}
-
-// Try get a p from pidle list.
-// Sched must be locked.
-static P*
-pidleget(void)
-{
- P *p;
-
- p = runtime·sched.pidle;
- if(p) {
- runtime·sched.pidle = p->link;
- runtime·xadd(&runtime·sched.npidle, -1); // TODO: fast atomic
- }
- return p;
-}
-
-// Try to put g on local runnable queue.
-// If it's full, put onto global queue.
-// Executed only by the owner P.
-static void
-runqput(P *p, G *gp)
-{
- uint32 h, t;
-
-retry:
- h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
- t = p->runqtail;
- if(t - h < nelem(p->runq)) {
- p->runq[t%nelem(p->runq)] = gp;
- runtime·atomicstore(&p->runqtail, t+1); // store-release, makes the item available for consumption
- return;
- }
- if(runqputslow(p, gp, h, t))
- return;
- // the queue is not full, now the put above must suceed
- goto retry;
-}
-
-// Put g and a batch of work from local runnable queue on global queue.
-// Executed only by the owner P.
-static bool
-runqputslow(P *p, G *gp, uint32 h, uint32 t)
-{
- G *batch[nelem(p->runq)/2+1];
- uint32 n, i;
-
- // First, grab a batch from local queue.
- n = t-h;
- n = n/2;
- if(n != nelem(p->runq)/2)
- runtime·throw("runqputslow: queue is not full");
- for(i=0; i<n; i++)
- batch[i] = p->runq[(h+i)%nelem(p->runq)];
- if(!runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
- return false;
- batch[n] = gp;
- // Link the goroutines.
- for(i=0; i<n; i++)
- batch[i]->schedlink = batch[i+1];
- // Now put the batch on global queue.
- runtime·lock(&runtime·sched.lock);
- globrunqputbatch(batch[0], batch[n], n+1);
- runtime·unlock(&runtime·sched.lock);
- return true;
-}
-
-// Get g from local runnable queue.
-// Executed only by the owner P.
-static G*
-runqget(P *p)
-{
- G *gp;
- uint32 t, h;
-
- for(;;) {
- h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
- t = p->runqtail;
- if(t == h)
- return nil;
- gp = p->runq[h%nelem(p->runq)];
- if(runtime·cas(&p->runqhead, h, h+1)) // cas-release, commits consume
- return gp;
- }
-}
-
-// Grabs a batch of goroutines from local runnable queue.
-// batch array must be of size nelem(p->runq)/2. Returns number of grabbed goroutines.
-// Can be executed by any P.
-static uint32
-runqgrab(P *p, G **batch)
-{
- uint32 t, h, n, i;
-
- for(;;) {
- h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
- t = runtime·atomicload(&p->runqtail); // load-acquire, synchronize with the producer
- n = t-h;
- n = n - n/2;
- if(n == 0)
- break;
- if(n > nelem(p->runq)/2) // read inconsistent h and t
- continue;
- for(i=0; i<n; i++)
- batch[i] = p->runq[(h+i)%nelem(p->runq)];
- if(runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
- break;
- }
- return n;
-}
-
-// Steal half of elements from local runnable queue of p2
-// and put onto local runnable queue of p.
-// Returns one of the stolen elements (or nil if failed).
-static G*
-runqsteal(P *p, P *p2)
-{
- G *gp;
- G *batch[nelem(p->runq)/2];
- uint32 t, h, n, i;
-
- n = runqgrab(p2, batch);
- if(n == 0)
- return nil;
- n--;
- gp = batch[n];
- if(n == 0)
- return gp;
- h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
- t = p->runqtail;
- if(t - h + n >= nelem(p->runq))
- runtime·throw("runqsteal: runq overflow");
- for(i=0; i<n; i++, t++)
- p->runq[t%nelem(p->runq)] = batch[i];
- runtime·atomicstore(&p->runqtail, t); // store-release, makes the item available for consumption
- return gp;
-}
-
-void
-runtime·testSchedLocalQueue(void)
-{
- P *p;
- G *gs;
- int32 i, j;
-
- p = (P*)runtime·mallocgc(sizeof(*p), nil, FlagNoScan);
- gs = (G*)runtime·mallocgc(nelem(p->runq)*sizeof(*gs), nil, FlagNoScan);
-
- for(i = 0; i < nelem(p->runq); i++) {
- if(runqget(p) != nil)
- runtime·throw("runq is not empty initially");
- for(j = 0; j < i; j++)
- runqput(p, &gs[i]);
- for(j = 0; j < i; j++) {
- if(runqget(p) != &gs[i]) {
- runtime·printf("bad element at iter %d/%d\n", i, j);
- runtime·throw("bad element");
- }
- }
- if(runqget(p) != nil)
- runtime·throw("runq is not empty afterwards");
- }
-}
-
-void
-runtime·testSchedLocalQueueSteal(void)
-{
- P *p1, *p2;
- G *gs, *gp;
- int32 i, j, s;
-
- p1 = (P*)runtime·mallocgc(sizeof(*p1), nil, FlagNoScan);
- p2 = (P*)runtime·mallocgc(sizeof(*p2), nil, FlagNoScan);
- gs = (G*)runtime·mallocgc(nelem(p1->runq)*sizeof(*gs), nil, FlagNoScan);
-
- for(i = 0; i < nelem(p1->runq); i++) {
- for(j = 0; j < i; j++) {
- gs[j].sig = 0;
- runqput(p1, &gs[j]);
- }
- gp = runqsteal(p2, p1);
- s = 0;
- if(gp) {
- s++;
- gp->sig++;
- }
- while(gp = runqget(p2)) {
- s++;
- gp->sig++;
- }
- while(gp = runqget(p1))
- gp->sig++;
- for(j = 0; j < i; j++) {
- if(gs[j].sig != 1) {
- runtime·printf("bad element %d(%d) at iter %d\n", j, gs[j].sig, i);
- runtime·throw("bad element");
- }
- }
- if(s != i/2 && s != i/2+1) {
- runtime·printf("bad steal %d, want %d or %d, iter %d\n",
- s, i/2, i/2+1, i);
- runtime·throw("bad steal");
- }
- }
-}
-
-void
-runtime·setmaxthreads_m(void)
-{
- int32 in;
- int32 out;
-
- in = g->m->scalararg[0];
-
- runtime·lock(&runtime·sched.lock);
- out = runtime·sched.maxmcount;
- runtime·sched.maxmcount = in;
- checkmcount();
- runtime·unlock(&runtime·sched.lock);
-
- g->m->scalararg[0] = out;
-}
-
-static int8 experiment[] = GOEXPERIMENT; // defined in zaexperiment.h
-
-static bool
-haveexperiment(int8 *name)
-{
- int32 i, j;
-
- for(i=0; i<sizeof(experiment); i++) {
- if((i == 0 || experiment[i-1] == ',') && experiment[i] == name[0]) {
- for(j=0; name[j]; j++)
- if(experiment[i+j] != name[j])
- goto nomatch;
- if(experiment[i+j] != '\0' && experiment[i+j] != ',')
- goto nomatch;
- return 1;
- }
- nomatch:;
- }
- return 0;
-}
-
-#pragma textflag NOSPLIT
-void
-sync·runtime_procPin(intptr p)
-{
- M *mp;
-
- mp = g->m;
- // Disable preemption.
- mp->locks++;
- p = mp->p->id;
- FLUSH(&p);
-}
-
-#pragma textflag NOSPLIT
-void
-sync·runtime_procUnpin()
-{
- g->m->locks--;
-}
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index f41ffbff3..12e2e71e9 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -6,8 +6,6 @@ package runtime
import "unsafe"
-func newsysmon()
-
func runtime_init()
func main_init()
func main_main()
@@ -29,7 +27,7 @@ func main() {
maxstacksize = 250000000
}
- onM(newsysmon)
+ systemstack(newsysmon)
// Lock the main goroutine onto this, the main OS thread,
// during initialization. Most programs won't care, but a few
@@ -55,6 +53,24 @@ func main() {
memstats.enablegc = true // now that runtime is initialized, GC is okay
+ if iscgo {
+ if _cgo_thread_start == nil {
+ gothrow("_cgo_thread_start missing")
+ }
+ if _cgo_malloc == nil {
+ gothrow("_cgo_malloc missing")
+ }
+ if _cgo_free == nil {
+ gothrow("_cgo_free missing")
+ }
+ if _cgo_setenv == nil {
+ gothrow("_cgo_setenv missing")
+ }
+ if _cgo_unsetenv == nil {
+ gothrow("_cgo_unsetenv missing")
+ }
+ }
+
main_init()
needUnlock = false
@@ -80,8 +96,6 @@ func main() {
}
}
-var parkunlock_c byte
-
// start forcegc helper goroutine
func init() {
go forcegchelper()
@@ -115,7 +129,7 @@ func Gosched() {
// Puts the current goroutine into a waiting state and calls unlockf.
// If unlockf returns false, the goroutine is resumed.
-func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
+func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string) {
mp := acquirem()
gp := mp.curg
status := readgstatus(gp)
@@ -123,7 +137,7 @@ func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
gothrow("gopark: bad g status")
}
mp.waitlock = lock
- mp.waitunlockf = unlockf
+ mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
gp.waitreason = reason
releasem(mp)
// can't do anything that might move the G between Ms here.
@@ -133,14 +147,13 @@ func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
// Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling goready(gp).
func goparkunlock(lock *mutex, reason string) {
- gopark(unsafe.Pointer(&parkunlock_c), unsafe.Pointer(lock), reason)
+ gopark(parkunlock_c, unsafe.Pointer(lock), reason)
}
func goready(gp *g) {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(gp)
- onM(ready_m)
- releasem(mp)
+ systemstack(func() {
+ ready(gp)
+ })
}
//go:nosplit
@@ -226,6 +239,11 @@ func newG() *g {
return new(g)
}
+var (
+ allgs []*g
+ allglock mutex
+)
+
func allgadd(gp *g) {
if readgstatus(gp) == _Gidle {
gothrow("allgadd: bad status Gidle")
diff --git a/src/runtime/proc1.go b/src/runtime/proc1.go
new file mode 100644
index 000000000..8c941dd35
--- /dev/null
+++ b/src/runtime/proc1.go
@@ -0,0 +1,3186 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+var (
+ m0 m
+ g0 g
+)
+
+// Goroutine scheduler
+// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
+//
+// The main concepts are:
+// G - goroutine.
+// M - worker thread, or machine.
+// P - processor, a resource that is required to execute Go code.
+// M must have an associated P to execute Go code, however it can be
+// blocked or in a syscall w/o an associated P.
+//
+// Design doc at http://golang.org/s/go11sched.
+
+const (
+ // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
+ // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
+ _GoidCacheBatch = 16
+)
+
+/*
+SchedT sched;
+int32 gomaxprocs;
+uint32 needextram;
+bool iscgo;
+M m0;
+G g0; // idle goroutine for m0
+G* lastg;
+M* allm;
+M* extram;
+P* allp[MaxGomaxprocs+1];
+int8* goos;
+int32 ncpu;
+int32 newprocs;
+
+Mutex allglock; // the following vars are protected by this lock or by stoptheworld
+G** allg;
+Slice allgs;
+uintptr allglen;
+ForceGCState forcegc;
+
+void mstart(void);
+static void runqput(P*, G*);
+static G* runqget(P*);
+static bool runqputslow(P*, G*, uint32, uint32);
+static G* runqsteal(P*, P*);
+static void mput(M*);
+static M* mget(void);
+static void mcommoninit(M*);
+static void schedule(void);
+static void procresize(int32);
+static void acquirep(P*);
+static P* releasep(void);
+static void newm(void(*)(void), P*);
+static void stopm(void);
+static void startm(P*, bool);
+static void handoffp(P*);
+static void wakep(void);
+static void stoplockedm(void);
+static void startlockedm(G*);
+static void sysmon(void);
+static uint32 retake(int64);
+static void incidlelocked(int32);
+static void checkdead(void);
+static void exitsyscall0(G*);
+void park_m(G*);
+static void goexit0(G*);
+static void gfput(P*, G*);
+static G* gfget(P*);
+static void gfpurge(P*);
+static void globrunqput(G*);
+static void globrunqputbatch(G*, G*, int32);
+static G* globrunqget(P*, int32);
+static P* pidleget(void);
+static void pidleput(P*);
+static void injectglist(G*);
+static bool preemptall(void);
+static bool preemptone(P*);
+static bool exitsyscallfast(void);
+static bool haveexperiment(int8*);
+void allgadd(G*);
+static void dropg(void);
+
+extern String buildVersion;
+*/
+
+// The bootstrap sequence is:
+//
+// call osinit
+// call schedinit
+// make & queue new G
+// call runtime·mstart
+//
+// The new G calls runtime·main.
+func schedinit() {
+ // raceinit must be the first call to race detector.
+ // In particular, it must be done before mallocinit below calls racemapshadow.
+ _g_ := getg()
+ if raceenabled {
+ _g_.racectx = raceinit()
+ }
+
+ sched.maxmcount = 10000
+
+ tracebackinit()
+ symtabinit()
+ stackinit()
+ mallocinit()
+ mcommoninit(_g_.m)
+
+ goargs()
+ goenvs()
+ parsedebugvars()
+ gcinit()
+
+ sched.lastpoll = uint64(nanotime())
+ procs := 1
+ if n := goatoi(gogetenv("GOMAXPROCS")); n > 0 {
+ if n > _MaxGomaxprocs {
+ n = _MaxGomaxprocs
+ }
+ procs = n
+ }
+ procresize(int32(procs))
+
+ if buildVersion == "" {
+ // Condition should never trigger. This code just serves
+ // to ensure runtime·buildVersion is kept in the resulting binary.
+ buildVersion = "unknown"
+ }
+}
+
+func newsysmon() {
+ _newm(sysmon, nil)
+}
+
+func dumpgstatus(gp *g) {
+ _g_ := getg()
+ print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
+ print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
+}
+
+func checkmcount() {
+ // sched lock is held
+ if sched.mcount > sched.maxmcount {
+ print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
+ gothrow("thread exhaustion")
+ }
+}
+
+func mcommoninit(mp *m) {
+ _g_ := getg()
+
+ // g0 stack won't make sense for user (and is not necessary unwindable).
+ if _g_ != _g_.m.g0 {
+ callers(1, &mp.createstack[0], len(mp.createstack))
+ }
+
+ mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks())
+ if mp.fastrand == 0 {
+ mp.fastrand = 0x49f6428a
+ }
+
+ lock(&sched.lock)
+ mp.id = sched.mcount
+ sched.mcount++
+ checkmcount()
+ mpreinit(mp)
+ if mp.gsignal != nil {
+ mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
+ }
+
+ // Add to allm so garbage collector doesn't free g->m
+ // when it is just in a register or thread-local storage.
+ mp.alllink = allm
+
+ // NumCgoCall() iterates over allm w/o schedlock,
+ // so we need to publish it safely.
+ atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
+ unlock(&sched.lock)
+}
+
+// Mark gp ready to run.
+func ready(gp *g) {
+ status := readgstatus(gp)
+
+ // Mark runnable.
+ _g_ := getg()
+ _g_.m.locks++ // disable preemption because it can be holding p in a local var
+ if status&^_Gscan != _Gwaiting {
+ dumpgstatus(gp)
+ gothrow("bad g->status in ready")
+ }
+
+ // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ runqput(_g_.m.p, gp)
+ if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { // TODO: fast atomic
+ wakep()
+ }
+ _g_.m.locks--
+ if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
+ _g_.stackguard0 = stackPreempt
+ }
+}
+
+func gcprocs() int32 {
+ // Figure out how many CPUs to use during GC.
+ // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
+ lock(&sched.lock)
+ n := gomaxprocs
+ if n > ncpu {
+ n = ncpu
+ }
+ if n > _MaxGcproc {
+ n = _MaxGcproc
+ }
+ if n > sched.nmidle+1 { // one M is currently running
+ n = sched.nmidle + 1
+ }
+ unlock(&sched.lock)
+ return n
+}
+
+func needaddgcproc() bool {
+ lock(&sched.lock)
+ n := gomaxprocs
+ if n > ncpu {
+ n = ncpu
+ }
+ if n > _MaxGcproc {
+ n = _MaxGcproc
+ }
+ n -= sched.nmidle + 1 // one M is currently running
+ unlock(&sched.lock)
+ return n > 0
+}
+
+func helpgc(nproc int32) {
+ _g_ := getg()
+ lock(&sched.lock)
+ pos := 0
+ for n := int32(1); n < nproc; n++ { // one M is currently running
+ if allp[pos].mcache == _g_.m.mcache {
+ pos++
+ }
+ mp := mget()
+ if mp == nil {
+ gothrow("gcprocs inconsistency")
+ }
+ mp.helpgc = n
+ mp.mcache = allp[pos].mcache
+ pos++
+ notewakeup(&mp.park)
+ }
+ unlock(&sched.lock)
+}
+
+// Similar to stoptheworld but best-effort and can be called several times.
+// There is no reverse operation, used during crashing.
+// This function must not lock any mutexes.
+func freezetheworld() {
+ if gomaxprocs == 1 {
+ return
+ }
+ // stopwait and preemption requests can be lost
+ // due to races with concurrently executing threads,
+ // so try several times
+ for i := 0; i < 5; i++ {
+ // this should tell the scheduler to not start any new goroutines
+ sched.stopwait = 0x7fffffff
+ atomicstore(&sched.gcwaiting, 1)
+ // this should stop running goroutines
+ if !preemptall() {
+ break // no running goroutines
+ }
+ usleep(1000)
+ }
+ // to be sure
+ usleep(1000)
+ preemptall()
+ usleep(1000)
+}
+
+func isscanstatus(status uint32) bool {
+ if status == _Gscan {
+ gothrow("isscanstatus: Bad status Gscan")
+ }
+ return status&_Gscan == _Gscan
+}
+
+// All reads and writes of g's status go through readgstatus, casgstatus
+// castogscanstatus, casfrom_Gscanstatus.
+//go:nosplit
+func readgstatus(gp *g) uint32 {
+ return atomicload(&gp.atomicstatus)
+}
+
+// The Gscanstatuses are acting like locks and this releases them.
+// If it proves to be a performance hit we should be able to make these
+// simple atomic stores but for now we are going to throw if
+// we see an inconsistent state.
+func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
+ success := false
+
+ // Check that transition is valid.
+ switch oldval {
+ case _Gscanrunnable,
+ _Gscanwaiting,
+ _Gscanrunning,
+ _Gscansyscall:
+ if newval == oldval&^_Gscan {
+ success = cas(&gp.atomicstatus, oldval, newval)
+ }
+ case _Gscanenqueue:
+ if newval == _Gwaiting {
+ success = cas(&gp.atomicstatus, oldval, newval)
+ }
+ }
+ if !success {
+ print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
+ dumpgstatus(gp)
+ gothrow("casfrom_Gscanstatus: gp->status is not in scan state")
+ }
+}
+
+// This will return false if the gp is not in the expected status and the cas fails.
+// This acts like a lock acquire while the casfromgstatus acts like a lock release.
+func castogscanstatus(gp *g, oldval, newval uint32) bool {
+ switch oldval {
+ case _Grunnable,
+ _Gwaiting,
+ _Gsyscall:
+ if newval == oldval|_Gscan {
+ return cas(&gp.atomicstatus, oldval, newval)
+ }
+ case _Grunning:
+ if newval == _Gscanrunning || newval == _Gscanenqueue {
+ return cas(&gp.atomicstatus, oldval, newval)
+ }
+ }
+ print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
+ gothrow("castogscanstatus")
+ panic("not reached")
+}
+
+// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
+// and casfrom_Gscanstatus instead.
+// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
+// put it in the Gscan state is finished.
+//go:nosplit
+func casgstatus(gp *g, oldval, newval uint32) {
+ if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
+ systemstack(func() {
+ print("casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
+ gothrow("casgstatus: bad incoming values")
+ })
+ }
+
+ // loop if gp->atomicstatus is in a scan state giving
+ // GC time to finish and change the state to oldval.
+ for !cas(&gp.atomicstatus, oldval, newval) {
+ }
+}
+
+// stopg ensures that gp is stopped at a GC safe point where its stack can be scanned
+// or in the context of a moving collector the pointers can be flipped from pointing
+// to old object to pointing to new objects.
+// If stopg returns true, the caller knows gp is at a GC safe point and will remain there until
+// the caller calls restartg.
+// If stopg returns false, the caller is not responsible for calling restartg. This can happen
+// if another thread, either the gp itself or another GC thread is taking the responsibility
+// to do the GC work related to this thread.
+func stopg(gp *g) bool {
+ for {
+ if gp.gcworkdone {
+ return false
+ }
+
+ switch s := readgstatus(gp); s {
+ default:
+ dumpgstatus(gp)
+ gothrow("stopg: gp->atomicstatus is not valid")
+
+ case _Gdead:
+ return false
+
+ case _Gcopystack:
+ // Loop until a new stack is in place.
+
+ case _Grunnable,
+ _Gsyscall,
+ _Gwaiting:
+ // Claim goroutine by setting scan bit.
+ if !castogscanstatus(gp, s, s|_Gscan) {
+ break
+ }
+ // In scan state, do work.
+ gcphasework(gp)
+ return true
+
+ case _Gscanrunnable,
+ _Gscanwaiting,
+ _Gscansyscall:
+ // Goroutine already claimed by another GC helper.
+ return false
+
+ case _Grunning:
+ if gcphase == _GCscan {
+ // Running routines not scanned during
+ // GCscan phase, we only scan non-running routines.
+ gp.gcworkdone = true
+ return false
+ }
+
+ // Claim goroutine, so we aren't racing with a status
+ // transition away from Grunning.
+ if !castogscanstatus(gp, _Grunning, _Gscanrunning) {
+ break
+ }
+
+ // Mark gp for preemption.
+ if !gp.gcworkdone {
+ gp.preemptscan = true
+ gp.preempt = true
+ gp.stackguard0 = stackPreempt
+ }
+
+ // Unclaim.
+ casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
+ return false
+ }
+ }
+}
+
+// The GC requests that this routine be moved from a scanmumble state to a mumble state.
+func restartg(gp *g) {
+ s := readgstatus(gp)
+ switch s {
+ default:
+ dumpgstatus(gp)
+ gothrow("restartg: unexpected status")
+
+ case _Gdead:
+ // ok
+
+ case _Gscanrunnable,
+ _Gscanwaiting,
+ _Gscansyscall:
+ casfrom_Gscanstatus(gp, s, s&^_Gscan)
+
+ // Scan is now completed.
+ // Goroutine now needs to be made runnable.
+ // We put it on the global run queue; ready blocks on the global scheduler lock.
+ case _Gscanenqueue:
+ casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting)
+ if gp != getg().m.curg {
+ gothrow("processing Gscanenqueue on wrong m")
+ }
+ dropg()
+ ready(gp)
+ }
+}
+
+func stopscanstart(gp *g) {
+ _g_ := getg()
+ if _g_ == gp {
+ gothrow("GC not moved to G0")
+ }
+ if stopg(gp) {
+ if !isscanstatus(readgstatus(gp)) {
+ dumpgstatus(gp)
+ gothrow("GC not in scan state")
+ }
+ restartg(gp)
+ }
+}
+
+// Runs on g0 and does the actual work after putting the g back on the run queue.
+func mquiesce(gpmaster *g) {
+ // enqueue the calling goroutine.
+ restartg(gpmaster)
+
+ activeglen := len(allgs)
+ for i := 0; i < activeglen; i++ {
+ gp := allgs[i]
+ if readgstatus(gp) == _Gdead {
+ gp.gcworkdone = true // noop scan.
+ } else {
+ gp.gcworkdone = false
+ }
+ stopscanstart(gp)
+ }
+
+ // Check that the G's gcwork (such as scanning) has been done. If not do it now.
+ // You can end up doing work here if the page trap on a Grunning Goroutine has
+ // not been sprung or in some race situations. For example a runnable goes dead
+ // and is started up again with a gp->gcworkdone set to false.
+ for i := 0; i < activeglen; i++ {
+ gp := allgs[i]
+ for !gp.gcworkdone {
+ status := readgstatus(gp)
+ if status == _Gdead {
+ //do nothing, scan not needed.
+ gp.gcworkdone = true // scan is a noop
+ break
+ }
+ if status == _Grunning && gp.stackguard0 == uintptr(stackPreempt) && notetsleep(&sched.stopnote, 100*1000) { // nanosecond arg
+ noteclear(&sched.stopnote)
+ } else {
+ stopscanstart(gp)
+ }
+ }
+ }
+
+ for i := 0; i < activeglen; i++ {
+ gp := allgs[i]
+ status := readgstatus(gp)
+ if isscanstatus(status) {
+ print("mstopandscang:bottom: post scan bad status gp=", gp, " has status ", hex(status), "\n")
+ dumpgstatus(gp)
+ }
+ if !gp.gcworkdone && status != _Gdead {
+ print("mstopandscang:bottom: post scan gp=", gp, "->gcworkdone still false\n")
+ dumpgstatus(gp)
+ }
+ }
+
+ schedule() // Never returns.
+}
+
+// quiesce moves all the goroutines to a GC safepoint which for now is a at preemption point.
+// If the global gcphase is GCmark quiesce will ensure that all of the goroutine's stacks
+// have been scanned before it returns.
+func quiesce(mastergp *g) {
+ castogscanstatus(mastergp, _Grunning, _Gscanenqueue)
+ // Now move this to the g0 (aka m) stack.
+ // g0 will potentially scan this thread and put mastergp on the runqueue
+ mcall(mquiesce)
+}
+
+// This is used by the GC as well as the routines that do stack dumps. In the case
+// of GC all the routines can be reliably stopped. This is not always the case
+// when the system is in panic or being exited.
+func stoptheworld() {
+ _g_ := getg()
+
+ // If we hold a lock, then we won't be able to stop another M
+ // that is blocked trying to acquire the lock.
+ if _g_.m.locks > 0 {
+ gothrow("stoptheworld: holding locks")
+ }
+
+ lock(&sched.lock)
+ sched.stopwait = gomaxprocs
+ atomicstore(&sched.gcwaiting, 1)
+ preemptall()
+ // stop current P
+ _g_.m.p.status = _Pgcstop // Pgcstop is only diagnostic.
+ sched.stopwait--
+ // try to retake all P's in Psyscall status
+ for i := 0; i < int(gomaxprocs); i++ {
+ p := allp[i]
+ s := p.status
+ if s == _Psyscall && cas(&p.status, s, _Pgcstop) {
+ sched.stopwait--
+ }
+ }
+ // stop idle P's
+ for {
+ p := pidleget()
+ if p == nil {
+ break
+ }
+ p.status = _Pgcstop
+ sched.stopwait--
+ }
+ wait := sched.stopwait > 0
+ unlock(&sched.lock)
+
+ // wait for remaining P's to stop voluntarily
+ if wait {
+ for {
+ // wait for 100us, then try to re-preempt in case of any races
+ if notetsleep(&sched.stopnote, 100*1000) {
+ noteclear(&sched.stopnote)
+ break
+ }
+ preemptall()
+ }
+ }
+ if sched.stopwait != 0 {
+ gothrow("stoptheworld: not stopped")
+ }
+ for i := 0; i < int(gomaxprocs); i++ {
+ p := allp[i]
+ if p.status != _Pgcstop {
+ gothrow("stoptheworld: not stopped")
+ }
+ }
+}
+
+func mhelpgc() {
+ _g_ := getg()
+ _g_.m.helpgc = -1
+}
+
+func starttheworld() {
+ _g_ := getg()
+
+ _g_.m.locks++ // disable preemption because it can be holding p in a local var
+ gp := netpoll(false) // non-blocking
+ injectglist(gp)
+ add := needaddgcproc()
+ lock(&sched.lock)
+ if newprocs != 0 {
+ procresize(newprocs)
+ newprocs = 0
+ } else {
+ procresize(gomaxprocs)
+ }
+ sched.gcwaiting = 0
+
+ var p1 *p
+ for {
+ p := pidleget()
+ if p == nil {
+ break
+ }
+ // procresize() puts p's with work at the beginning of the list.
+ // Once we reach a p without a run queue, the rest don't have one either.
+ if p.runqhead == p.runqtail {
+ pidleput(p)
+ break
+ }
+ p.m = mget()
+ p.link = p1
+ p1 = p
+ }
+ if sched.sysmonwait != 0 {
+ sched.sysmonwait = 0
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+
+ for p1 != nil {
+ p := p1
+ p1 = p1.link
+ if p.m != nil {
+ mp := p.m
+ p.m = nil
+ if mp.nextp != nil {
+ gothrow("starttheworld: inconsistent mp->nextp")
+ }
+ mp.nextp = p
+ notewakeup(&mp.park)
+ } else {
+ // Start M to run P. Do not start another M below.
+ _newm(nil, p)
+ add = false
+ }
+ }
+
+ if add {
+ // If GC could have used another helper proc, start one now,
+ // in the hope that it will be available next time.
+ // It would have been even better to start it before the collection,
+ // but doing so requires allocating memory, so it's tricky to
+ // coordinate. This lazy approach works out in practice:
+ // we don't mind if the first couple gc rounds don't have quite
+ // the maximum number of procs.
+ _newm(mhelpgc, nil)
+ }
+ _g_.m.locks--
+ if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
+ _g_.stackguard0 = stackPreempt
+ }
+}
+
+// Called to start an M.
+//go:nosplit
+func mstart() {
+ _g_ := getg()
+
+ if _g_.stack.lo == 0 {
+ // Initialize stack bounds from system stack.
+ // Cgo may have left stack size in stack.hi.
+ size := _g_.stack.hi
+ if size == 0 {
+ size = 8192
+ }
+ _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
+ _g_.stack.lo = _g_.stack.hi - size + 1024
+ }
+ // Initialize stack guards so that we can start calling
+ // both Go and C functions with stack growth prologues.
+ _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ _g_.stackguard1 = _g_.stackguard0
+ mstart1()
+}
+
+func mstart1() {
+ _g_ := getg()
+
+ if _g_ != _g_.m.g0 {
+ gothrow("bad runtime·mstart")
+ }
+
+ // Record top of stack for use by mcall.
+ // Once we call schedule we're never coming back,
+ // so other calls can reuse this stack space.
+ gosave(&_g_.m.g0.sched)
+ _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used
+ asminit()
+ minit()
+
+ // Install signal handlers; after minit so that minit can
+ // prepare the thread to be able to handle the signals.
+ if _g_.m == &m0 {
+ initsig()
+ }
+
+ if _g_.m.mstartfn != nil {
+ fn := *(*func())(unsafe.Pointer(&_g_.m.mstartfn))
+ fn()
+ }
+
+ if _g_.m.helpgc != 0 {
+ _g_.m.helpgc = 0
+ stopm()
+ } else if _g_.m != &m0 {
+ acquirep(_g_.m.nextp)
+ _g_.m.nextp = nil
+ }
+ schedule()
+
+ // TODO(brainman): This point is never reached, because scheduler
+ // does not release os threads at the moment. But once this path
+ // is enabled, we must remove our seh here.
+}
+
+// When running with cgo, we call _cgo_thread_start
+// to start threads for us so that we can play nicely with
+// foreign code.
+var cgoThreadStart unsafe.Pointer
+
+type cgothreadstart struct {
+ g *g
+ tls *uint64
+ fn unsafe.Pointer
+}
+
+// Allocate a new m unassociated with any thread.
+// Can use p for allocation context if needed.
+func allocm(_p_ *p) *m {
+ _g_ := getg()
+ _g_.m.locks++ // disable GC because it can be called from sysmon
+ if _g_.m.p == nil {
+ acquirep(_p_) // temporarily borrow p for mallocs in this function
+ }
+ mp := newM()
+ mcommoninit(mp)
+
+ // In case of cgo or Solaris, pthread_create will make us a stack.
+ // Windows and Plan 9 will layout sched stack on OS stack.
+ if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" {
+ mp.g0 = malg(-1)
+ } else {
+ mp.g0 = malg(8192)
+ }
+ mp.g0.m = mp
+
+ if _p_ == _g_.m.p {
+ releasep()
+ }
+ _g_.m.locks--
+ if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
+ _g_.stackguard0 = stackPreempt
+ }
+
+ return mp
+}
+
+func allocg() *g {
+ return newG()
+}
+
+// needm is called when a cgo callback happens on a
+// thread without an m (a thread not created by Go).
+// In this case, needm is expected to find an m to use
+// and return with m, g initialized correctly.
+// Since m and g are not set now (likely nil, but see below)
+// needm is limited in what routines it can call. In particular
+// it can only call nosplit functions (textflag 7) and cannot
+// do any scheduling that requires an m.
+//
+// In order to avoid needing heavy lifting here, we adopt
+// the following strategy: there is a stack of available m's
+// that can be stolen. Using compare-and-swap
+// to pop from the stack has ABA races, so we simulate
+// a lock by doing an exchange (via casp) to steal the stack
+// head and replace the top pointer with MLOCKED (1).
+// This serves as a simple spin lock that we can use even
+// without an m. The thread that locks the stack in this way
+// unlocks the stack by storing a valid stack head pointer.
+//
+// In order to make sure that there is always an m structure
+// available to be stolen, we maintain the invariant that there
+// is always one more than needed. At the beginning of the
+// program (if cgo is in use) the list is seeded with a single m.
+// If needm finds that it has taken the last m off the list, its job
+// is - once it has installed its own m so that it can do things like
+// allocate memory - to create a spare m and put it on the list.
+//
+// Each of these extra m's also has a g0 and a curg that are
+// pressed into service as the scheduling stack and current
+// goroutine for the duration of the cgo callback.
+//
+// When the callback is done with the m, it calls dropm to
+// put the m back on the list.
+//go:nosplit
+func needm(x byte) {
+ if needextram != 0 {
+ // Can happen if C/C++ code calls Go from a global ctor.
+ // Can not throw, because scheduler is not initialized yet.
+ // XXX
+ // write(2, unsafe.Pointer("fatal error: cgo callback before cgo call\n"), sizeof("fatal error: cgo callback before cgo call\n") - 1)
+ exit(1)
+ }
+
+ // Lock extra list, take head, unlock popped list.
+ // nilokay=false is safe here because of the invariant above,
+ // that the extra list always contains or will soon contain
+ // at least one m.
+ mp := lockextra(false)
+
+ // Set needextram when we've just emptied the list,
+ // so that the eventual call into cgocallbackg will
+ // allocate a new m for the extra list. We delay the
+ // allocation until then so that it can be done
+ // after exitsyscall makes sure it is okay to be
+ // running at all (that is, there's no garbage collection
+ // running right now).
+ mp.needextram = mp.schedlink == nil
+ unlockextra(mp.schedlink)
+
+ // Install g (= m->g0) and set the stack bounds
+ // to match the current stack. We don't actually know
+ // how big the stack is, like we don't know how big any
+ // scheduling stack is, but we assume there's at least 32 kB,
+ // which is more than enough for us.
+ setg(mp.g0)
+ _g_ := getg()
+ _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
+ _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
+ _g_.stackguard0 = _g_.stack.lo + _StackGuard
+
+ // Initialize this thread to use the m.
+ asminit()
+ minit()
+}
+
+// newextram allocates an m and puts it on the extra list.
+// It is called with a working local m, so that it can do things
+// like call schedlock and allocate.
+func newextram() {
+ // Create extra goroutine locked to extra m.
+ // The goroutine is the context in which the cgo callback will run.
+ // The sched.pc will never be returned to, but setting it to
+ // goexit makes clear to the traceback routines where
+ // the goroutine stack ends.
+ mp := allocm(nil)
+ gp := malg(4096)
+ gp.sched.pc = funcPC(goexit) + _PCQuantum
+ gp.sched.sp = gp.stack.hi
+ gp.sched.sp -= 4 * regSize // extra space in case of reads slightly beyond frame
+ gp.sched.lr = 0
+ gp.sched.g = gp
+ gp.syscallpc = gp.sched.pc
+ gp.syscallsp = gp.sched.sp
+ // malg returns status as Gidle, change to Gsyscall before adding to allg
+ // where GC will see it.
+ casgstatus(gp, _Gidle, _Gsyscall)
+ gp.m = mp
+ mp.curg = gp
+ mp.locked = _LockInternal
+ mp.lockedg = gp
+ gp.lockedm = mp
+ gp.goid = int64(xadd64(&sched.goidgen, 1))
+ if raceenabled {
+ gp.racectx = racegostart(funcPC(newextram))
+ }
+ // put on allg for garbage collector
+ allgadd(gp)
+
+ // Add m to the extra list.
+ mnext := lockextra(true)
+ mp.schedlink = mnext
+ unlockextra(mp)
+}
+
+// dropm is called when a cgo callback has called needm but is now
+// done with the callback and returning back into the non-Go thread.
+// It puts the current m back onto the extra list.
+//
+// The main expense here is the call to signalstack to release the
+// m's signal stack, and then the call to needm on the next callback
+// from this thread. It is tempting to try to save the m for next time,
+// which would eliminate both these costs, but there might not be
+// a next time: the current thread (which Go does not control) might exit.
+// If we saved the m for that thread, there would be an m leak each time
+// such a thread exited. Instead, we acquire and release an m on each
+// call. These should typically not be scheduling operations, just a few
+// atomics, so the cost should be small.
+//
+// TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
+// variable using pthread_key_create. Unlike the pthread keys we already use
+// on OS X, this dummy key would never be read by Go code. It would exist
+// only so that we could register at thread-exit-time destructor.
+// That destructor would put the m back onto the extra list.
+// This is purely a performance optimization. The current version,
+// in which dropm happens on each cgo call, is still correct too.
+// We may have to keep the current version on systems with cgo
+// but without pthreads, like Windows.
+func dropm() {
+ // Undo whatever initialization minit did during needm.
+ unminit()
+
+ // Clear m and g, and return m to the extra list.
+ // After the call to setmg we can only call nosplit functions.
+ mp := getg().m
+ setg(nil)
+
+ mnext := lockextra(true)
+ mp.schedlink = mnext
+ unlockextra(mp)
+}
+
+var extram uintptr
+
+// lockextra locks the extra list and returns the list head.
+// The caller must unlock the list by storing a new list head
+// to extram. If nilokay is true, then lockextra will
+// return a nil list head if that's what it finds. If nilokay is false,
+// lockextra will keep waiting until the list head is no longer nil.
+//go:nosplit
+func lockextra(nilokay bool) *m {
+ const locked = 1
+
+ for {
+ old := atomicloaduintptr(&extram)
+ if old == locked {
+ yield := osyield
+ yield()
+ continue
+ }
+ if old == 0 && !nilokay {
+ usleep(1)
+ continue
+ }
+ if casuintptr(&extram, old, locked) {
+ return (*m)(unsafe.Pointer(old))
+ }
+ yield := osyield
+ yield()
+ continue
+ }
+}
+
+//go:nosplit
+func unlockextra(mp *m) {
+ atomicstoreuintptr(&extram, uintptr(unsafe.Pointer(mp)))
+}
+
+// Create a new m. It will start off with a call to fn, or else the scheduler.
+func _newm(fn func(), _p_ *p) {
+ mp := allocm(_p_)
+ mp.nextp = _p_
+ mp.mstartfn = *(*unsafe.Pointer)(unsafe.Pointer(&fn))
+
+ if iscgo {
+ var ts cgothreadstart
+ if _cgo_thread_start == nil {
+ gothrow("_cgo_thread_start missing")
+ }
+ ts.g = mp.g0
+ ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
+ ts.fn = unsafe.Pointer(funcPC(mstart))
+ asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
+ return
+ }
+ newosproc(mp, unsafe.Pointer(mp.g0.stack.hi))
+}
+
+// Stops execution of the current m until new work is available.
+// Returns with acquired P.
+func stopm() {
+ _g_ := getg()
+
+ if _g_.m.locks != 0 {
+ gothrow("stopm holding locks")
+ }
+ if _g_.m.p != nil {
+ gothrow("stopm holding p")
+ }
+ if _g_.m.spinning {
+ _g_.m.spinning = false
+ xadd(&sched.nmspinning, -1)
+ }
+
+retry:
+ lock(&sched.lock)
+ mput(_g_.m)
+ unlock(&sched.lock)
+ notesleep(&_g_.m.park)
+ noteclear(&_g_.m.park)
+ if _g_.m.helpgc != 0 {
+ gchelper()
+ _g_.m.helpgc = 0
+ _g_.m.mcache = nil
+ goto retry
+ }
+ acquirep(_g_.m.nextp)
+ _g_.m.nextp = nil
+}
+
+func mspinning() {
+ getg().m.spinning = true
+}
+
+// Schedules some M to run the p (creates an M if necessary).
+// If p==nil, tries to get an idle P, if no idle P's does nothing.
+func startm(_p_ *p, spinning bool) {
+ lock(&sched.lock)
+ if _p_ == nil {
+ _p_ = pidleget()
+ if _p_ == nil {
+ unlock(&sched.lock)
+ if spinning {
+ xadd(&sched.nmspinning, -1)
+ }
+ return
+ }
+ }
+ mp := mget()
+ unlock(&sched.lock)
+ if mp == nil {
+ var fn func()
+ if spinning {
+ fn = mspinning
+ }
+ _newm(fn, _p_)
+ return
+ }
+ if mp.spinning {
+ gothrow("startm: m is spinning")
+ }
+ if mp.nextp != nil {
+ gothrow("startm: m has p")
+ }
+ mp.spinning = spinning
+ mp.nextp = _p_
+ notewakeup(&mp.park)
+}
+
+// Hands off P from syscall or locked M.
+func handoffp(_p_ *p) {
+ // if it has local work, start it straight away
+ if _p_.runqhead != _p_.runqtail || sched.runqsize != 0 {
+ startm(_p_, false)
+ return
+ }
+ // no local work, check that there are no spinning/idle M's,
+ // otherwise our help is not required
+ if atomicload(&sched.nmspinning)+atomicload(&sched.npidle) == 0 && cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
+ startm(_p_, true)
+ return
+ }
+ lock(&sched.lock)
+ if sched.gcwaiting != 0 {
+ _p_.status = _Pgcstop
+ sched.stopwait--
+ if sched.stopwait == 0 {
+ notewakeup(&sched.stopnote)
+ }
+ unlock(&sched.lock)
+ return
+ }
+ if sched.runqsize != 0 {
+ unlock(&sched.lock)
+ startm(_p_, false)
+ return
+ }
+ // If this is the last running P and nobody is polling network,
+ // need to wakeup another M to poll network.
+ if sched.npidle == uint32(gomaxprocs-1) && atomicload64(&sched.lastpoll) != 0 {
+ unlock(&sched.lock)
+ startm(_p_, false)
+ return
+ }
+ pidleput(_p_)
+ unlock(&sched.lock)
+}
+
+// Tries to add one more P to execute G's.
+// Called when a G is made runnable (newproc, ready).
+func wakep() {
+ // be conservative about spinning threads
+ if !cas(&sched.nmspinning, 0, 1) {
+ return
+ }
+ startm(nil, true)
+}
+
+// Stops execution of the current m that is locked to a g until the g is runnable again.
+// Returns with acquired P.
+func stoplockedm() {
+ _g_ := getg()
+
+ if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
+ gothrow("stoplockedm: inconsistent locking")
+ }
+ if _g_.m.p != nil {
+ // Schedule another M to run this p.
+ _p_ := releasep()
+ handoffp(_p_)
+ }
+ incidlelocked(1)
+ // Wait until another thread schedules lockedg again.
+ notesleep(&_g_.m.park)
+ noteclear(&_g_.m.park)
+ status := readgstatus(_g_.m.lockedg)
+ if status&^_Gscan != _Grunnable {
+ print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
+ dumpgstatus(_g_)
+ gothrow("stoplockedm: not runnable")
+ }
+ acquirep(_g_.m.nextp)
+ _g_.m.nextp = nil
+}
+
+// Schedules the locked m to run the locked gp.
+func startlockedm(gp *g) {
+ _g_ := getg()
+
+ mp := gp.lockedm
+ if mp == _g_.m {
+ gothrow("startlockedm: locked to me")
+ }
+ if mp.nextp != nil {
+ gothrow("startlockedm: m has p")
+ }
+ // directly handoff current P to the locked m
+ incidlelocked(-1)
+ _p_ := releasep()
+ mp.nextp = _p_
+ notewakeup(&mp.park)
+ stopm()
+}
+
+// Stops the current m for stoptheworld.
+// Returns when the world is restarted.
+func gcstopm() {
+ _g_ := getg()
+
+ if sched.gcwaiting == 0 {
+ gothrow("gcstopm: not waiting for gc")
+ }
+ if _g_.m.spinning {
+ _g_.m.spinning = false
+ xadd(&sched.nmspinning, -1)
+ }
+ _p_ := releasep()
+ lock(&sched.lock)
+ _p_.status = _Pgcstop
+ sched.stopwait--
+ if sched.stopwait == 0 {
+ notewakeup(&sched.stopnote)
+ }
+ unlock(&sched.lock)
+ stopm()
+}
+
+// Schedules gp to run on the current M.
+// Never returns.
+func execute(gp *g) {
+ _g_ := getg()
+
+ casgstatus(gp, _Grunnable, _Grunning)
+ gp.waitsince = 0
+ gp.preempt = false
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+ _g_.m.p.schedtick++
+ _g_.m.curg = gp
+ gp.m = _g_.m
+
+ // Check whether the profiler needs to be turned on or off.
+ hz := sched.profilehz
+ if _g_.m.profilehz != hz {
+ resetcpuprofiler(hz)
+ }
+
+ gogo(&gp.sched)
+}
+
+// Finds a runnable goroutine to execute.
+// Tries to steal from other P's, get g from global queue, poll network.
+func findrunnable() *g {
+ _g_ := getg()
+
+top:
+ if sched.gcwaiting != 0 {
+ gcstopm()
+ goto top
+ }
+ if fingwait && fingwake {
+ if gp := wakefing(); gp != nil {
+ ready(gp)
+ }
+ }
+
+ // local runq
+ if gp := runqget(_g_.m.p); gp != nil {
+ return gp
+ }
+
+ // global runq
+ if sched.runqsize != 0 {
+ lock(&sched.lock)
+ gp := globrunqget(_g_.m.p, 0)
+ unlock(&sched.lock)
+ if gp != nil {
+ return gp
+ }
+ }
+
+ // poll network - returns list of goroutines
+ if gp := netpoll(false); gp != nil { // non-blocking
+ injectglist(gp.schedlink)
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ return gp
+ }
+
+ // If number of spinning M's >= number of busy P's, block.
+ // This is necessary to prevent excessive CPU consumption
+ // when GOMAXPROCS>>1 but the program parallelism is low.
+ if !_g_.m.spinning && 2*atomicload(&sched.nmspinning) >= uint32(gomaxprocs)-atomicload(&sched.npidle) { // TODO: fast atomic
+ goto stop
+ }
+ if !_g_.m.spinning {
+ _g_.m.spinning = true
+ xadd(&sched.nmspinning, 1)
+ }
+ // random steal from other P's
+ for i := 0; i < int(2*gomaxprocs); i++ {
+ if sched.gcwaiting != 0 {
+ goto top
+ }
+ _p_ := allp[fastrand1()%uint32(gomaxprocs)]
+ var gp *g
+ if _p_ == _g_.m.p {
+ gp = runqget(_p_)
+ } else {
+ gp = runqsteal(_g_.m.p, _p_)
+ }
+ if gp != nil {
+ return gp
+ }
+ }
+stop:
+
+ // return P and block
+ lock(&sched.lock)
+ if sched.gcwaiting != 0 {
+ unlock(&sched.lock)
+ goto top
+ }
+ if sched.runqsize != 0 {
+ gp := globrunqget(_g_.m.p, 0)
+ unlock(&sched.lock)
+ return gp
+ }
+ _p_ := releasep()
+ pidleput(_p_)
+ unlock(&sched.lock)
+ if _g_.m.spinning {
+ _g_.m.spinning = false
+ xadd(&sched.nmspinning, -1)
+ }
+
+ // check all runqueues once again
+ for i := 0; i < int(gomaxprocs); i++ {
+ _p_ := allp[i]
+ if _p_ != nil && _p_.runqhead != _p_.runqtail {
+ lock(&sched.lock)
+ _p_ = pidleget()
+ unlock(&sched.lock)
+ if _p_ != nil {
+ acquirep(_p_)
+ goto top
+ }
+ break
+ }
+ }
+
+ // poll network
+ if xchg64(&sched.lastpoll, 0) != 0 {
+ if _g_.m.p != nil {
+ gothrow("findrunnable: netpoll with p")
+ }
+ if _g_.m.spinning {
+ gothrow("findrunnable: netpoll with spinning")
+ }
+ gp := netpoll(true) // block until new work is available
+ atomicstore64(&sched.lastpoll, uint64(nanotime()))
+ if gp != nil {
+ lock(&sched.lock)
+ _p_ = pidleget()
+ unlock(&sched.lock)
+ if _p_ != nil {
+ acquirep(_p_)
+ injectglist(gp.schedlink)
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ return gp
+ }
+ injectglist(gp)
+ }
+ }
+ stopm()
+ goto top
+}
+
+func resetspinning() {
+ _g_ := getg()
+
+ var nmspinning uint32
+ if _g_.m.spinning {
+ _g_.m.spinning = false
+ nmspinning = xadd(&sched.nmspinning, -1)
+ if nmspinning < 0 {
+ gothrow("findrunnable: negative nmspinning")
+ }
+ } else {
+ nmspinning = atomicload(&sched.nmspinning)
+ }
+
+ // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
+ // so see if we need to wakeup another P here.
+ if nmspinning == 0 && atomicload(&sched.npidle) > 0 {
+ wakep()
+ }
+}
+
+// Injects the list of runnable G's into the scheduler.
+// Can run concurrently with GC.
+func injectglist(glist *g) {
+ if glist == nil {
+ return
+ }
+ lock(&sched.lock)
+ var n int
+ for n = 0; glist != nil; n++ {
+ gp := glist
+ glist = gp.schedlink
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ globrunqput(gp)
+ }
+ unlock(&sched.lock)
+ for ; n != 0 && sched.npidle != 0; n-- {
+ startm(nil, false)
+ }
+}
+
+// One round of scheduler: find a runnable goroutine and execute it.
+// Never returns.
+func schedule() {
+ _g_ := getg()
+
+ if _g_.m.locks != 0 {
+ gothrow("schedule: holding locks")
+ }
+
+ if _g_.m.lockedg != nil {
+ stoplockedm()
+ execute(_g_.m.lockedg) // Never returns.
+ }
+
+top:
+ if sched.gcwaiting != 0 {
+ gcstopm()
+ goto top
+ }
+
+ var gp *g
+ // Check the global runnable queue once in a while to ensure fairness.
+ // Otherwise two goroutines can completely occupy the local runqueue
+ // by constantly respawning each other.
+ tick := _g_.m.p.schedtick
+ // This is a fancy way to say tick%61==0,
+ // it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
+ if uint64(tick)-((uint64(tick)*0x4325c53f)>>36)*61 == 0 && sched.runqsize > 0 {
+ lock(&sched.lock)
+ gp = globrunqget(_g_.m.p, 1)
+ unlock(&sched.lock)
+ if gp != nil {
+ resetspinning()
+ }
+ }
+ if gp == nil {
+ gp = runqget(_g_.m.p)
+ if gp != nil && _g_.m.spinning {
+ gothrow("schedule: spinning with local work")
+ }
+ }
+ if gp == nil {
+ gp = findrunnable() // blocks until work is available
+ resetspinning()
+ }
+
+ if gp.lockedm != nil {
+ // Hands off own p to the locked m,
+ // then blocks waiting for a new p.
+ startlockedm(gp)
+ goto top
+ }
+
+ execute(gp)
+}
+
+// dropg removes the association between m and the current goroutine m->curg (gp for short).
+// Typically a caller sets gp's status away from Grunning and then
+// immediately calls dropg to finish the job. The caller is also responsible
+// for arranging that gp will be restarted using ready at an
+// appropriate time. After calling dropg and arranging for gp to be
+// readied later, the caller can do other work but eventually should
+// call schedule to restart the scheduling of goroutines on this m.
+func dropg() {
+ _g_ := getg()
+
+ if _g_.m.lockedg == nil {
+ _g_.m.curg.m = nil
+ _g_.m.curg = nil
+ }
+}
+
+// Puts the current goroutine into a waiting state and calls unlockf.
+// If unlockf returns false, the goroutine is resumed.
+func park(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string) {
+ _g_ := getg()
+
+ _g_.m.waitlock = lock
+ _g_.m.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
+ _g_.waitreason = reason
+ mcall(park_m)
+}
+
+func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
+ unlock((*mutex)(lock))
+ return true
+}
+
+// Puts the current goroutine into a waiting state and unlocks the lock.
+// The goroutine can be made runnable again by calling ready(gp).
+func parkunlock(lock *mutex, reason string) {
+ park(parkunlock_c, unsafe.Pointer(lock), reason)
+}
+
+// park continuation on g0.
+func park_m(gp *g) {
+ _g_ := getg()
+
+ casgstatus(gp, _Grunning, _Gwaiting)
+ dropg()
+
+ if _g_.m.waitunlockf != nil {
+ fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
+ ok := fn(gp, _g_.m.waitlock)
+ _g_.m.waitunlockf = nil
+ _g_.m.waitlock = nil
+ if !ok {
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ execute(gp) // Schedule it back, never returns.
+ }
+ }
+ schedule()
+}
+
+// Gosched continuation on g0.
+func gosched_m(gp *g) {
+ status := readgstatus(gp)
+ if status&^_Gscan != _Grunning {
+ dumpgstatus(gp)
+ gothrow("bad g status")
+ }
+ casgstatus(gp, _Grunning, _Grunnable)
+ dropg()
+ lock(&sched.lock)
+ globrunqput(gp)
+ unlock(&sched.lock)
+
+ schedule()
+}
+
+// Finishes execution of the current goroutine.
+// Must be NOSPLIT because it is called from Go. (TODO - probably not anymore)
+//go:nosplit
+func goexit1() {
+ if raceenabled {
+ racegoend()
+ }
+ mcall(goexit0)
+}
+
+// goexit continuation on g0.
+func goexit0(gp *g) {
+ _g_ := getg()
+
+ casgstatus(gp, _Grunning, _Gdead)
+ gp.m = nil
+ gp.lockedm = nil
+ _g_.m.lockedg = nil
+ gp.paniconfault = false
+ gp._defer = nil // should be true already but just in case.
+ gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
+ gp.writebuf = nil
+ gp.waitreason = ""
+ gp.param = nil
+
+ dropg()
+
+ if _g_.m.locked&^_LockExternal != 0 {
+ print("invalid m->locked = ", _g_.m.locked, "\n")
+ gothrow("internal lockOSThread error")
+ }
+ _g_.m.locked = 0
+ gfput(_g_.m.p, gp)
+ schedule()
+}
+
+//go:nosplit
+func save(pc, sp uintptr) {
+ _g_ := getg()
+
+ _g_.sched.pc = pc
+ _g_.sched.sp = sp
+ _g_.sched.lr = 0
+ _g_.sched.ret = 0
+ _g_.sched.ctxt = nil
+ // write as uintptr to avoid write barrier, which will smash _g_.sched.
+ *(*uintptr)(unsafe.Pointer(&_g_.sched.g)) = uintptr(unsafe.Pointer(_g_))
+}
+
+// The goroutine g is about to enter a system call.
+// Record that it's not using the cpu anymore.
+// This is called only from the go syscall library and cgocall,
+// not from the low-level system calls used by the
+//
+// Entersyscall cannot split the stack: the gosave must
+// make g->sched refer to the caller's stack segment, because
+// entersyscall is going to return immediately after.
+//
+// Nothing entersyscall calls can split the stack either.
+// We cannot safely move the stack during an active call to syscall,
+// because we do not know which of the uintptr arguments are
+// really pointers (back into the stack).
+// In practice, this means that we make the fast path run through
+// entersyscall doing no-split things, and the slow path has to use systemstack
+// to run bigger things on the system stack.
+//
+// reentersyscall is the entry point used by cgo callbacks, where explicitly
+// saved SP and PC are restored. This is needed when exitsyscall will be called
+// from a function further up in the call stack than the parent, as g->syscallsp
+// must always point to a valid stack frame. entersyscall below is the normal
+// entry point for syscalls, which obtains the SP and PC from the caller.
+//go:nosplit
+func reentersyscall(pc, sp uintptr) {
+ _g_ := getg()
+
+ // Disable preemption because during this function g is in Gsyscall status,
+ // but can have inconsistent g->sched, do not let GC observe it.
+ _g_.m.locks++
+
+ // Entersyscall must not call any function that might split/grow the stack.
+ // (See details in comment above.)
+ // Catch calls that might, by replacing the stack guard with something that
+ // will trip any stack check and leaving a flag to tell newstack to die.
+ _g_.stackguard0 = stackPreempt
+ _g_.throwsplit = true
+
+ // Leave SP around for GC and traceback.
+ save(pc, sp)
+ _g_.syscallsp = sp
+ _g_.syscallpc = pc
+ casgstatus(_g_, _Grunning, _Gsyscall)
+ if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ systemstack(func() {
+ print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ gothrow("entersyscall")
+ })
+ }
+
+ if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic
+ systemstack(entersyscall_sysmon)
+ save(pc, sp)
+ }
+
+ _g_.m.mcache = nil
+ _g_.m.p.m = nil
+ atomicstore(&_g_.m.p.status, _Psyscall)
+ if sched.gcwaiting != 0 {
+ systemstack(entersyscall_gcwait)
+ save(pc, sp)
+ }
+
+ // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
+ // We set _StackGuard to StackPreempt so that first split stack check calls morestack.
+ // Morestack detects this case and throws.
+ _g_.stackguard0 = stackPreempt
+ _g_.m.locks--
+}
+
+// Standard syscall entry used by the go syscall library and normal cgo calls.
+//go:nosplit
+func entersyscall(dummy int32) {
+ reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
+}
+
+func entersyscall_sysmon() {
+ lock(&sched.lock)
+ if atomicload(&sched.sysmonwait) != 0 {
+ atomicstore(&sched.sysmonwait, 0)
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+}
+
+func entersyscall_gcwait() {
+ _g_ := getg()
+
+ lock(&sched.lock)
+ if sched.stopwait > 0 && cas(&_g_.m.p.status, _Psyscall, _Pgcstop) {
+ if sched.stopwait--; sched.stopwait == 0 {
+ notewakeup(&sched.stopnote)
+ }
+ }
+ unlock(&sched.lock)
+}
+
+// The same as entersyscall(), but with a hint that the syscall is blocking.
+//go:nosplit
+func entersyscallblock(dummy int32) {
+ _g_ := getg()
+
+ _g_.m.locks++ // see comment in entersyscall
+ _g_.throwsplit = true
+ _g_.stackguard0 = stackPreempt // see comment in entersyscall
+
+ // Leave SP around for GC and traceback.
+ pc := getcallerpc(unsafe.Pointer(&dummy))
+ sp := getcallersp(unsafe.Pointer(&dummy))
+ save(pc, sp)
+ _g_.syscallsp = _g_.sched.sp
+ _g_.syscallpc = _g_.sched.pc
+ if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ sp1 := sp
+ sp2 := _g_.sched.sp
+ sp3 := _g_.syscallsp
+ systemstack(func() {
+ print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ gothrow("entersyscallblock")
+ })
+ }
+ casgstatus(_g_, _Grunning, _Gsyscall)
+ if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ systemstack(func() {
+ print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ gothrow("entersyscallblock")
+ })
+ }
+
+ systemstack(entersyscallblock_handoff)
+
+ // Resave for traceback during blocked call.
+ save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
+
+ _g_.m.locks--
+}
+
+func entersyscallblock_handoff() {
+ handoffp(releasep())
+}
+
+// The goroutine g exited its system call.
+// Arrange for it to run on a cpu again.
+// This is called only from the go syscall library, not
+// from the low-level system calls used by the
+//go:nosplit
+func exitsyscall(dummy int32) {
+ _g_ := getg()
+
+ _g_.m.locks++ // see comment in entersyscall
+ if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
+ gothrow("exitsyscall: syscall frame is no longer valid")
+ }
+
+ _g_.waitsince = 0
+ if exitsyscallfast() {
+ if _g_.m.mcache == nil {
+ gothrow("lost mcache")
+ }
+ // There's a cpu for us, so we can run.
+ _g_.m.p.syscalltick++
+ // We need to cas the status and scan before resuming...
+ casgstatus(_g_, _Gsyscall, _Grunning)
+
+ // Garbage collector isn't running (since we are),
+ // so okay to clear syscallsp.
+ _g_.syscallsp = 0
+ _g_.m.locks--
+ if _g_.preempt {
+ // restore the preemption request in case we've cleared it in newstack
+ _g_.stackguard0 = stackPreempt
+ } else {
+ // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
+ _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ }
+ _g_.throwsplit = false
+ return
+ }
+
+ _g_.m.locks--
+
+ // Call the scheduler.
+ mcall(exitsyscall0)
+
+ if _g_.m.mcache == nil {
+ gothrow("lost mcache")
+ }
+
+ // Scheduler returned, so we're allowed to run now.
+ // Delete the syscallsp information that we left for
+ // the garbage collector during the system call.
+ // Must wait until now because until gosched returns
+ // we don't know for sure that the garbage collector
+ // is not running.
+ _g_.syscallsp = 0
+ _g_.m.p.syscalltick++
+ _g_.throwsplit = false
+}
+
+//go:nosplit
+func exitsyscallfast() bool {
+ _g_ := getg()
+
+ // Freezetheworld sets stopwait but does not retake P's.
+ if sched.stopwait != 0 {
+ _g_.m.mcache = nil
+ _g_.m.p = nil
+ return false
+ }
+
+ // Try to re-acquire the last P.
+ if _g_.m.p != nil && _g_.m.p.status == _Psyscall && cas(&_g_.m.p.status, _Psyscall, _Prunning) {
+ // There's a cpu for us, so we can run.
+ _g_.m.mcache = _g_.m.p.mcache
+ _g_.m.p.m = _g_.m
+ return true
+ }
+
+ // Try to get any other idle P.
+ _g_.m.mcache = nil
+ _g_.m.p = nil
+ if sched.pidle != nil {
+ var ok bool
+ systemstack(func() {
+ ok = exitsyscallfast_pidle()
+ })
+ if ok {
+ return true
+ }
+ }
+ return false
+}
+
+func exitsyscallfast_pidle() bool {
+ lock(&sched.lock)
+ _p_ := pidleget()
+ if _p_ != nil && atomicload(&sched.sysmonwait) != 0 {
+ atomicstore(&sched.sysmonwait, 0)
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+ if _p_ != nil {
+ acquirep(_p_)
+ return true
+ }
+ return false
+}
+
+// exitsyscall slow path on g0.
+// Failed to acquire P, enqueue gp as runnable.
+func exitsyscall0(gp *g) {
+ _g_ := getg()
+
+ casgstatus(gp, _Gsyscall, _Grunnable)
+ dropg()
+ lock(&sched.lock)
+ _p_ := pidleget()
+ if _p_ == nil {
+ globrunqput(gp)
+ } else if atomicload(&sched.sysmonwait) != 0 {
+ atomicstore(&sched.sysmonwait, 0)
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+ if _p_ != nil {
+ acquirep(_p_)
+ execute(gp) // Never returns.
+ }
+ if _g_.m.lockedg != nil {
+ // Wait until another thread schedules gp and so m again.
+ stoplockedm()
+ execute(gp) // Never returns.
+ }
+ stopm()
+ schedule() // Never returns.
+}
+
+func beforefork() {
+ gp := getg().m.curg
+
+ // Fork can hang if preempted with signals frequently enough (see issue 5517).
+ // Ensure that we stay on the same M where we disable profiling.
+ gp.m.locks++
+ if gp.m.profilehz != 0 {
+ resetcpuprofiler(0)
+ }
+
+ // This function is called before fork in syscall package.
+ // Code between fork and exec must not allocate memory nor even try to grow stack.
+ // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
+ // runtime_AfterFork will undo this in parent process, but not in child.
+ gp.stackguard0 = stackFork
+}
+
+// Called from syscall package before fork.
+//go:nosplit
+func syscall_BeforeFork() {
+ systemstack(beforefork)
+}
+
+func afterfork() {
+ gp := getg().m.curg
+
+ // See the comment in beforefork.
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+
+ hz := sched.profilehz
+ if hz != 0 {
+ resetcpuprofiler(hz)
+ }
+ gp.m.locks--
+}
+
+// Called from syscall package after fork in parent.
+//go:nosplit
+func syscall_AfterFork() {
+ systemstack(afterfork)
+}
+
+// Allocate a new g, with a stack big enough for stacksize bytes.
+func malg(stacksize int32) *g {
+ newg := allocg()
+ if stacksize >= 0 {
+ stacksize = round2(_StackSystem + stacksize)
+ systemstack(func() {
+ newg.stack = stackalloc(uint32(stacksize))
+ })
+ newg.stackguard0 = newg.stack.lo + _StackGuard
+ newg.stackguard1 = ^uintptr(0)
+ }
+ return newg
+}
+
+// Create a new g running fn with siz bytes of arguments.
+// Put it on the queue of g's waiting to run.
+// The compiler turns a go statement into a call to this.
+// Cannot split the stack because it assumes that the arguments
+// are available sequentially after &fn; they would not be
+// copied if a stack split occurred.
+//go:nosplit
+func newproc(siz int32, fn *funcval) {
+ argp := add(unsafe.Pointer(&fn), ptrSize)
+ if hasLinkRegister {
+ argp = add(argp, ptrSize) // skip caller's saved LR
+ }
+
+ pc := getcallerpc(unsafe.Pointer(&siz))
+ systemstack(func() {
+ newproc1(fn, (*uint8)(argp), siz, 0, pc)
+ })
+}
+
+// Create a new g running fn with narg bytes of arguments starting
+// at argp and returning nret bytes of results. callerpc is the
+// address of the go statement that created this. The new g is put
+// on the queue of g's waiting to run.
+func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g {
+ _g_ := getg()
+
+ if fn == nil {
+ _g_.m.throwing = -1 // do not dump full stacks
+ gothrow("go of nil func value")
+ }
+ _g_.m.locks++ // disable preemption because it can be holding p in a local var
+ siz := narg + nret
+ siz = (siz + 7) &^ 7
+
+ // We could allocate a larger initial stack if necessary.
+ // Not worth it: this is almost always an error.
+ // 4*sizeof(uintreg): extra space added below
+ // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
+ if siz >= _StackMin-4*regSize-regSize {
+ gothrow("newproc: function arguments too large for new goroutine")
+ }
+
+ _p_ := _g_.m.p
+ newg := gfget(_p_)
+ if newg == nil {
+ newg = malg(_StackMin)
+ casgstatus(newg, _Gidle, _Gdead)
+ allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
+ }
+ if newg.stack.hi == 0 {
+ gothrow("newproc1: newg missing stack")
+ }
+
+ if readgstatus(newg) != _Gdead {
+ gothrow("newproc1: new g is not Gdead")
+ }
+
+ sp := newg.stack.hi
+ sp -= 4 * regSize // extra space in case of reads slightly beyond frame
+ sp -= uintptr(siz)
+ memmove(unsafe.Pointer(sp), unsafe.Pointer(argp), uintptr(narg))
+ if hasLinkRegister {
+ // caller's LR
+ sp -= ptrSize
+ *(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil
+ }
+
+ memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
+ newg.sched.sp = sp
+ newg.sched.pc = funcPC(goexit) + _PCQuantum // +PCQuantum so that previous instruction is in same function
+ newg.sched.g = newg
+ gostartcallfn(&newg.sched, fn)
+ newg.gopc = callerpc
+ casgstatus(newg, _Gdead, _Grunnable)
+
+ if _p_.goidcache == _p_.goidcacheend {
+ // Sched.goidgen is the last allocated id,
+ // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
+ // At startup sched.goidgen=0, so main goroutine receives goid=1.
+ _p_.goidcache = xadd64(&sched.goidgen, _GoidCacheBatch)
+ _p_.goidcache -= _GoidCacheBatch - 1
+ _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
+ }
+ newg.goid = int64(_p_.goidcache)
+ _p_.goidcache++
+ if raceenabled {
+ newg.racectx = racegostart(callerpc)
+ }
+ runqput(_p_, newg)
+
+ if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic
+ wakep()
+ }
+ _g_.m.locks--
+ if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
+ _g_.stackguard0 = stackPreempt
+ }
+ return newg
+}
+
+// Put on gfree list.
+// If local list is too long, transfer a batch to the global list.
+func gfput(_p_ *p, gp *g) {
+ if readgstatus(gp) != _Gdead {
+ gothrow("gfput: bad status (not Gdead)")
+ }
+
+ stksize := gp.stack.hi - gp.stack.lo
+
+ if stksize != _FixedStack {
+ // non-standard stack size - free it.
+ stackfree(gp.stack)
+ gp.stack.lo = 0
+ gp.stack.hi = 0
+ gp.stackguard0 = 0
+ }
+
+ gp.schedlink = _p_.gfree
+ _p_.gfree = gp
+ _p_.gfreecnt++
+ if _p_.gfreecnt >= 64 {
+ lock(&sched.gflock)
+ for _p_.gfreecnt >= 32 {
+ _p_.gfreecnt--
+ gp = _p_.gfree
+ _p_.gfree = gp.schedlink
+ gp.schedlink = sched.gfree
+ sched.gfree = gp
+ sched.ngfree++
+ }
+ unlock(&sched.gflock)
+ }
+}
+
+// Get from gfree list.
+// If local list is empty, grab a batch from global list.
+func gfget(_p_ *p) *g {
+retry:
+ gp := _p_.gfree
+ if gp == nil && sched.gfree != nil {
+ lock(&sched.gflock)
+ for _p_.gfreecnt < 32 && sched.gfree != nil {
+ _p_.gfreecnt++
+ gp = sched.gfree
+ sched.gfree = gp.schedlink
+ sched.ngfree--
+ gp.schedlink = _p_.gfree
+ _p_.gfree = gp
+ }
+ unlock(&sched.gflock)
+ goto retry
+ }
+ if gp != nil {
+ _p_.gfree = gp.schedlink
+ _p_.gfreecnt--
+ if gp.stack.lo == 0 {
+ // Stack was deallocated in gfput. Allocate a new one.
+ systemstack(func() {
+ gp.stack = stackalloc(_FixedStack)
+ })
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+ } else {
+ if raceenabled {
+ racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
+ }
+ }
+ }
+ return gp
+}
+
+// Purge all cached G's from gfree list to the global list.
+func gfpurge(_p_ *p) {
+ lock(&sched.gflock)
+ for _p_.gfreecnt != 0 {
+ _p_.gfreecnt--
+ gp := _p_.gfree
+ _p_.gfree = gp.schedlink
+ gp.schedlink = sched.gfree
+ sched.gfree = gp
+ sched.ngfree++
+ }
+ unlock(&sched.gflock)
+}
+
+// Breakpoint executes a breakpoint trap.
+func Breakpoint() {
+ breakpoint()
+}
+
+// dolockOSThread is called by LockOSThread and lockOSThread below
+// after they modify m.locked. Do not allow preemption during this call,
+// or else the m might be different in this function than in the caller.
+//go:nosplit
+func dolockOSThread() {
+ _g_ := getg()
+ _g_.m.lockedg = _g_
+ _g_.lockedm = _g_.m
+}
+
+//go:nosplit
+
+// LockOSThread wires the calling goroutine to its current operating system thread.
+// Until the calling goroutine exits or calls UnlockOSThread, it will always
+// execute in that thread, and no other goroutine can.
+func LockOSThread() {
+ getg().m.locked |= _LockExternal
+ dolockOSThread()
+}
+
+//go:nosplit
+func lockOSThread() {
+ getg().m.locked += _LockInternal
+ dolockOSThread()
+}
+
+// dounlockOSThread is called by UnlockOSThread and unlockOSThread below
+// after they update m->locked. Do not allow preemption during this call,
+// or else the m might be in different in this function than in the caller.
+//go:nosplit
+func dounlockOSThread() {
+ _g_ := getg()
+ if _g_.m.locked != 0 {
+ return
+ }
+ _g_.m.lockedg = nil
+ _g_.lockedm = nil
+}
+
+//go:nosplit
+
+// UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
+// If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
+func UnlockOSThread() {
+ getg().m.locked &^= _LockExternal
+ dounlockOSThread()
+}
+
+//go:nosplit
+func unlockOSThread() {
+ _g_ := getg()
+ if _g_.m.locked < _LockInternal {
+ systemstack(badunlockosthread)
+ }
+ _g_.m.locked -= _LockInternal
+ dounlockOSThread()
+}
+
+func badunlockosthread() {
+ gothrow("runtime: internal error: misuse of lockOSThread/unlockOSThread")
+}
+
+func gcount() int32 {
+ n := int32(allglen) - sched.ngfree
+ for i := 0; ; i++ {
+ _p_ := allp[i]
+ if _p_ == nil {
+ break
+ }
+ n -= _p_.gfreecnt
+ }
+
+ // All these variables can be changed concurrently, so the result can be inconsistent.
+ // But at least the current goroutine is running.
+ if n < 1 {
+ n = 1
+ }
+ return n
+}
+
+func mcount() int32 {
+ return sched.mcount
+}
+
+var prof struct {
+ lock uint32
+ hz int32
+}
+
+func _System() { _System() }
+func _ExternalCode() { _ExternalCode() }
+func _GC() { _GC() }
+
+var etext struct{}
+
+// Called if we receive a SIGPROF signal.
+func sigprof(pc *uint8, sp *uint8, lr *uint8, gp *g, mp *m) {
+ var n int32
+ var traceback bool
+ var stk [100]uintptr
+
+ if prof.hz == 0 {
+ return
+ }
+
+ // Profiling runs concurrently with GC, so it must not allocate.
+ mp.mallocing++
+
+ // Define that a "user g" is a user-created goroutine, and a "system g"
+ // is one that is m->g0 or m->gsignal. We've only made sure that we
+ // can unwind user g's, so exclude the system g's.
+ //
+ // It is not quite as easy as testing gp == m->curg (the current user g)
+ // because we might be interrupted for profiling halfway through a
+ // goroutine switch. The switch involves updating three (or four) values:
+ // g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
+ // because once it gets updated the new g is running.
+ //
+ // When switching from a user g to a system g, LR is not considered live,
+ // so the update only affects g, SP, and PC. Since PC must be last, there
+ // the possible partial transitions in ordinary execution are (1) g alone is updated,
+ // (2) both g and SP are updated, and (3) SP alone is updated.
+ // If g is updated, we'll see a system g and not look closer.
+ // If SP alone is updated, we can detect the partial transition by checking
+ // whether the SP is within g's stack bounds. (We could also require that SP
+ // be changed only after g, but the stack bounds check is needed by other
+ // cases, so there is no need to impose an additional requirement.)
+ //
+ // There is one exceptional transition to a system g, not in ordinary execution.
+ // When a signal arrives, the operating system starts the signal handler running
+ // with an updated PC and SP. The g is updated last, at the beginning of the
+ // handler. There are two reasons this is okay. First, until g is updated the
+ // g and SP do not match, so the stack bounds check detects the partial transition.
+ // Second, signal handlers currently run with signals disabled, so a profiling
+ // signal cannot arrive during the handler.
+ //
+ // When switching from a system g to a user g, there are three possibilities.
+ //
+ // First, it may be that the g switch has no PC update, because the SP
+ // either corresponds to a user g throughout (as in asmcgocall)
+ // or because it has been arranged to look like a user g frame
+ // (as in cgocallback_gofunc). In this case, since the entire
+ // transition is a g+SP update, a partial transition updating just one of
+ // those will be detected by the stack bounds check.
+ //
+ // Second, when returning from a signal handler, the PC and SP updates
+ // are performed by the operating system in an atomic update, so the g
+ // update must be done before them. The stack bounds check detects
+ // the partial transition here, and (again) signal handlers run with signals
+ // disabled, so a profiling signal cannot arrive then anyway.
+ //
+ // Third, the common case: it may be that the switch updates g, SP, and PC
+ // separately, as in gogo.
+ //
+ // Because gogo is the only instance, we check whether the PC lies
+ // within that function, and if so, not ask for a traceback. This approach
+ // requires knowing the size of the gogo function, which we
+ // record in arch_*.h and check in runtime_test.go.
+ //
+ // There is another apparently viable approach, recorded here in case
+ // the "PC within gogo" check turns out not to be usable.
+ // It would be possible to delay the update of either g or SP until immediately
+ // before the PC update instruction. Then, because of the stack bounds check,
+ // the only problematic interrupt point is just before that PC update instruction,
+ // and the sigprof handler can detect that instruction and simulate stepping past
+ // it in order to reach a consistent state. On ARM, the update of g must be made
+ // in two places (in R10 and also in a TLS slot), so the delayed update would
+ // need to be the SP update. The sigprof handler must read the instruction at
+ // the current PC and if it was the known instruction (for example, JMP BX or
+ // MOV R2, PC), use that other register in place of the PC value.
+ // The biggest drawback to this solution is that it requires that we can tell
+ // whether it's safe to read from the memory pointed at by PC.
+ // In a correct program, we can test PC == nil and otherwise read,
+ // but if a profiling signal happens at the instant that a program executes
+ // a bad jump (before the program manages to handle the resulting fault)
+ // the profiling handler could fault trying to read nonexistent memory.
+ //
+ // To recap, there are no constraints on the assembly being used for the
+ // transition. We simply require that g and SP match and that the PC is not
+ // in gogo.
+ traceback = true
+ usp := uintptr(unsafe.Pointer(sp))
+ gogo := funcPC(gogo)
+ if gp == nil || gp != mp.curg ||
+ usp < gp.stack.lo || gp.stack.hi < usp ||
+ (gogo <= uintptr(unsafe.Pointer(pc)) && uintptr(unsafe.Pointer(pc)) < gogo+_RuntimeGogoBytes) {
+ traceback = false
+ }
+
+ n = 0
+ if traceback {
+ n = int32(gentraceback(uintptr(unsafe.Pointer(pc)), uintptr(unsafe.Pointer(sp)), uintptr(unsafe.Pointer(lr)), gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap))
+ }
+ if !traceback || n <= 0 {
+ // Normal traceback is impossible or has failed.
+ // See if it falls into several common cases.
+ n = 0
+ if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
+ // Cgo, we can't unwind and symbolize arbitrary C code,
+ // so instead collect Go stack that leads to the cgo call.
+ // This is especially important on windows, since all syscalls are cgo calls.
+ n = int32(gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0))
+ }
+ if GOOS == "windows" && n == 0 && mp.libcallg != nil && mp.libcallpc != 0 && mp.libcallsp != 0 {
+ // Libcall, i.e. runtime syscall on windows.
+ // Collect Go stack that leads to the call.
+ n = int32(gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg, 0, &stk[0], len(stk), nil, nil, 0))
+ }
+ if n == 0 {
+ // If all of the above has failed, account it against abstract "System" or "GC".
+ n = 2
+ // "ExternalCode" is better than "etext".
+ if uintptr(unsafe.Pointer(pc)) > uintptr(unsafe.Pointer(&etext)) {
+ pc = (*uint8)(unsafe.Pointer(uintptr(funcPC(_ExternalCode) + _PCQuantum)))
+ }
+ stk[0] = uintptr(unsafe.Pointer(pc))
+ if mp.gcing != 0 || mp.helpgc != 0 {
+ stk[1] = funcPC(_GC) + _PCQuantum
+ } else {
+ stk[1] = funcPC(_System) + _PCQuantum
+ }
+ }
+ }
+
+ if prof.hz != 0 {
+ // Simple cas-lock to coordinate with setcpuprofilerate.
+ for !cas(&prof.lock, 0, 1) {
+ osyield()
+ }
+ if prof.hz != 0 {
+ cpuproftick(&stk[0], n)
+ }
+ atomicstore(&prof.lock, 0)
+ }
+ mp.mallocing--
+}
+
+// Arrange to call fn with a traceback hz times a second.
+func setcpuprofilerate_m(hz int32) {
+ // Force sane arguments.
+ if hz < 0 {
+ hz = 0
+ }
+
+ // Disable preemption, otherwise we can be rescheduled to another thread
+ // that has profiling enabled.
+ _g_ := getg()
+ _g_.m.locks++
+
+ // Stop profiler on this thread so that it is safe to lock prof.
+ // if a profiling signal came in while we had prof locked,
+ // it would deadlock.
+ resetcpuprofiler(0)
+
+ for !cas(&prof.lock, 0, 1) {
+ osyield()
+ }
+ prof.hz = hz
+ atomicstore(&prof.lock, 0)
+
+ lock(&sched.lock)
+ sched.profilehz = hz
+ unlock(&sched.lock)
+
+ if hz != 0 {
+ resetcpuprofiler(hz)
+ }
+
+ _g_.m.locks--
+}
+
+// Change number of processors. The world is stopped, sched is locked.
+// gcworkbufs are not being modified by either the GC or
+// the write barrier code.
+func procresize(new int32) {
+ old := gomaxprocs
+ if old < 0 || old > _MaxGomaxprocs || new <= 0 || new > _MaxGomaxprocs {
+ gothrow("procresize: invalid arg")
+ }
+
+ // initialize new P's
+ for i := int32(0); i < new; i++ {
+ p := allp[i]
+ if p == nil {
+ p = newP()
+ p.id = i
+ p.status = _Pgcstop
+ atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(p))
+ }
+ if p.mcache == nil {
+ if old == 0 && i == 0 {
+ if getg().m.mcache == nil {
+ gothrow("missing mcache?")
+ }
+ p.mcache = getg().m.mcache // bootstrap
+ } else {
+ p.mcache = allocmcache()
+ }
+ }
+ }
+
+ // redistribute runnable G's evenly
+ // collect all runnable goroutines in global queue preserving FIFO order
+ // FIFO order is required to ensure fairness even during frequent GCs
+ // see http://golang.org/issue/7126
+ empty := false
+ for !empty {
+ empty = true
+ for i := int32(0); i < old; i++ {
+ p := allp[i]
+ if p.runqhead == p.runqtail {
+ continue
+ }
+ empty = false
+ // pop from tail of local queue
+ p.runqtail--
+ gp := p.runq[p.runqtail%uint32(len(p.runq))]
+ // push onto head of global queue
+ gp.schedlink = sched.runqhead
+ sched.runqhead = gp
+ if sched.runqtail == nil {
+ sched.runqtail = gp
+ }
+ sched.runqsize++
+ }
+ }
+
+ // fill local queues with at most len(p.runq)/2 goroutines
+ // start at 1 because current M already executes some G and will acquire allp[0] below,
+ // so if we have a spare G we want to put it into allp[1].
+ var _p_ p
+ for i := int32(1); i < new*int32(len(_p_.runq))/2 && sched.runqsize > 0; i++ {
+ gp := sched.runqhead
+ sched.runqhead = gp.schedlink
+ if sched.runqhead == nil {
+ sched.runqtail = nil
+ }
+ sched.runqsize--
+ runqput(allp[i%new], gp)
+ }
+
+ // free unused P's
+ for i := new; i < old; i++ {
+ p := allp[i]
+ freemcache(p.mcache)
+ p.mcache = nil
+ gfpurge(p)
+ p.status = _Pdead
+ // can't free P itself because it can be referenced by an M in syscall
+ }
+
+ _g_ := getg()
+ if _g_.m.p != nil {
+ _g_.m.p.m = nil
+ }
+ _g_.m.p = nil
+ _g_.m.mcache = nil
+ p := allp[0]
+ p.m = nil
+ p.status = _Pidle
+ acquirep(p)
+ for i := new - 1; i > 0; i-- {
+ p := allp[i]
+ p.status = _Pidle
+ pidleput(p)
+ }
+ var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
+ atomicstore((*uint32)(unsafe.Pointer(int32p)), uint32(new))
+}
+
+// Associate p and the current m.
+func acquirep(_p_ *p) {
+ _g_ := getg()
+
+ if _g_.m.p != nil || _g_.m.mcache != nil {
+ gothrow("acquirep: already in go")
+ }
+ if _p_.m != nil || _p_.status != _Pidle {
+ id := int32(0)
+ if _p_.m != nil {
+ id = _p_.m.id
+ }
+ print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
+ gothrow("acquirep: invalid p state")
+ }
+ _g_.m.mcache = _p_.mcache
+ _g_.m.p = _p_
+ _p_.m = _g_.m
+ _p_.status = _Prunning
+}
+
+// Disassociate p and the current m.
+func releasep() *p {
+ _g_ := getg()
+
+ if _g_.m.p == nil || _g_.m.mcache == nil {
+ gothrow("releasep: invalid arg")
+ }
+ _p_ := _g_.m.p
+ if _p_.m != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
+ print("releasep: m=", _g_.m, " m->p=", _g_.m.p, " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
+ gothrow("releasep: invalid p state")
+ }
+ _g_.m.p = nil
+ _g_.m.mcache = nil
+ _p_.m = nil
+ _p_.status = _Pidle
+ return _p_
+}
+
+func incidlelocked(v int32) {
+ lock(&sched.lock)
+ sched.nmidlelocked += v
+ if v > 0 {
+ checkdead()
+ }
+ unlock(&sched.lock)
+}
+
+// Check for deadlock situation.
+// The check is based on number of running M's, if 0 -> deadlock.
+func checkdead() {
+ // If we are dying because of a signal caught on an already idle thread,
+ // freezetheworld will cause all running threads to block.
+ // And runtime will essentially enter into deadlock state,
+ // except that there is a thread that will call exit soon.
+ if panicking > 0 {
+ return
+ }
+
+ // -1 for sysmon
+ run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1
+ if run > 0 {
+ return
+ }
+ if run < 0 {
+ print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
+ gothrow("checkdead: inconsistent counts")
+ }
+
+ grunning := 0
+ lock(&allglock)
+ for i := 0; i < len(allgs); i++ {
+ gp := allgs[i]
+ if gp.issystem {
+ continue
+ }
+ s := readgstatus(gp)
+ switch s &^ _Gscan {
+ case _Gwaiting:
+ grunning++
+ case _Grunnable,
+ _Grunning,
+ _Gsyscall:
+ unlock(&allglock)
+ print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
+ gothrow("checkdead: runnable g")
+ }
+ }
+ unlock(&allglock)
+ if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
+ gothrow("no goroutines (main called runtime.Goexit) - deadlock!")
+ }
+
+ // Maybe jump time forward for playground.
+ gp := timejump()
+ if gp != nil {
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ globrunqput(gp)
+ _p_ := pidleget()
+ if _p_ == nil {
+ gothrow("checkdead: no p for timer")
+ }
+ mp := mget()
+ if mp == nil {
+ _newm(nil, _p_)
+ } else {
+ mp.nextp = _p_
+ notewakeup(&mp.park)
+ }
+ return
+ }
+
+ getg().m.throwing = -1 // do not dump full stacks
+ gothrow("all goroutines are asleep - deadlock!")
+}
+
+func sysmon() {
+ // If we go two minutes without a garbage collection, force one to run.
+ forcegcperiod := int64(2 * 60 * 1e9)
+
+ // If a heap span goes unused for 5 minutes after a garbage collection,
+ // we hand it back to the operating system.
+ scavengelimit := int64(5 * 60 * 1e9)
+
+ if debug.scavenge > 0 {
+ // Scavenge-a-lot for testing.
+ forcegcperiod = 10 * 1e6
+ scavengelimit = 20 * 1e6
+ }
+
+ lastscavenge := nanotime()
+ nscavenge := 0
+
+ // Make wake-up period small enough for the sampling to be correct.
+ maxsleep := forcegcperiod / 2
+ if scavengelimit < forcegcperiod {
+ maxsleep = scavengelimit / 2
+ }
+
+ lasttrace := int64(0)
+ idle := 0 // how many cycles in succession we had not wokeup somebody
+ delay := uint32(0)
+ for {
+ if idle == 0 { // start with 20us sleep...
+ delay = 20
+ } else if idle > 50 { // start doubling the sleep after 1ms...
+ delay *= 2
+ }
+ if delay > 10*1000 { // up to 10ms
+ delay = 10 * 1000
+ }
+ usleep(delay)
+ if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic
+ lock(&sched.lock)
+ if atomicload(&sched.gcwaiting) != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs) {
+ atomicstore(&sched.sysmonwait, 1)
+ unlock(&sched.lock)
+ notetsleep(&sched.sysmonnote, maxsleep)
+ lock(&sched.lock)
+ atomicstore(&sched.sysmonwait, 0)
+ noteclear(&sched.sysmonnote)
+ idle = 0
+ delay = 20
+ }
+ unlock(&sched.lock)
+ }
+ // poll network if not polled for more than 10ms
+ lastpoll := int64(atomicload64(&sched.lastpoll))
+ now := nanotime()
+ unixnow := unixnanotime()
+ if lastpoll != 0 && lastpoll+10*1000*1000 < now {
+ cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
+ gp := netpoll(false) // non-blocking - returns list of goroutines
+ if gp != nil {
+ // Need to decrement number of idle locked M's
+ // (pretending that one more is running) before injectglist.
+ // Otherwise it can lead to the following situation:
+ // injectglist grabs all P's but before it starts M's to run the P's,
+ // another M returns from syscall, finishes running its G,
+ // observes that there is no work to do and no other running M's
+ // and reports deadlock.
+ incidlelocked(-1)
+ injectglist(gp)
+ incidlelocked(1)
+ }
+ }
+ // retake P's blocked in syscalls
+ // and preempt long running G's
+ if retake(now) != 0 {
+ idle = 0
+ } else {
+ idle++
+ }
+ // check if we need to force a GC
+ lastgc := int64(atomicload64(&memstats.last_gc))
+ if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomicload(&forcegc.idle) != 0 {
+ lock(&forcegc.lock)
+ forcegc.idle = 0
+ forcegc.g.schedlink = nil
+ injectglist(forcegc.g)
+ unlock(&forcegc.lock)
+ }
+ // scavenge heap once in a while
+ if lastscavenge+scavengelimit/2 < now {
+ mHeap_Scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
+ lastscavenge = now
+ nscavenge++
+ }
+ if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace*1000000) <= now {
+ lasttrace = now
+ schedtrace(debug.scheddetail > 0)
+ }
+ }
+}
+
+var pdesc [_MaxGomaxprocs]struct {
+ schedtick uint32
+ schedwhen int64
+ syscalltick uint32
+ syscallwhen int64
+}
+
+func retake(now int64) uint32 {
+ n := 0
+ for i := int32(0); i < gomaxprocs; i++ {
+ _p_ := allp[i]
+ if _p_ == nil {
+ continue
+ }
+ pd := &pdesc[i]
+ s := _p_.status
+ if s == _Psyscall {
+ // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
+ t := int64(_p_.syscalltick)
+ if int64(pd.syscalltick) != t {
+ pd.syscalltick = uint32(t)
+ pd.syscallwhen = now
+ continue
+ }
+ // On the one hand we don't want to retake Ps if there is no other work to do,
+ // but on the other hand we want to retake them eventually
+ // because they can prevent the sysmon thread from deep sleep.
+ if _p_.runqhead == _p_.runqtail && atomicload(&sched.nmspinning)+atomicload(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
+ continue
+ }
+ // Need to decrement number of idle locked M's
+ // (pretending that one more is running) before the CAS.
+ // Otherwise the M from which we retake can exit the syscall,
+ // increment nmidle and report deadlock.
+ incidlelocked(-1)
+ if cas(&_p_.status, s, _Pidle) {
+ n++
+ handoffp(_p_)
+ }
+ incidlelocked(1)
+ } else if s == _Prunning {
+ // Preempt G if it's running for more than 10ms.
+ t := int64(_p_.schedtick)
+ if int64(pd.schedtick) != t {
+ pd.schedtick = uint32(t)
+ pd.schedwhen = now
+ continue
+ }
+ if pd.schedwhen+10*1000*1000 > now {
+ continue
+ }
+ preemptone(_p_)
+ }
+ }
+ return uint32(n)
+}
+
+// Tell all goroutines that they have been preempted and they should stop.
+// This function is purely best-effort. It can fail to inform a goroutine if a
+// processor just started running it.
+// No locks need to be held.
+// Returns true if preemption request was issued to at least one goroutine.
+func preemptall() bool {
+ res := false
+ for i := int32(0); i < gomaxprocs; i++ {
+ _p_ := allp[i]
+ if _p_ == nil || _p_.status != _Prunning {
+ continue
+ }
+ if preemptone(_p_) {
+ res = true
+ }
+ }
+ return res
+}
+
+// Tell the goroutine running on processor P to stop.
+// This function is purely best-effort. It can incorrectly fail to inform the
+// goroutine. It can send inform the wrong goroutine. Even if it informs the
+// correct goroutine, that goroutine might ignore the request if it is
+// simultaneously executing newstack.
+// No lock needs to be held.
+// Returns true if preemption request was issued.
+// The actual preemption will happen at some point in the future
+// and will be indicated by the gp->status no longer being
+// Grunning
+func preemptone(_p_ *p) bool {
+ mp := _p_.m
+ if mp == nil || mp == getg().m {
+ return false
+ }
+ gp := mp.curg
+ if gp == nil || gp == mp.g0 {
+ return false
+ }
+
+ gp.preempt = true
+
+ // Every call in a go routine checks for stack overflow by
+ // comparing the current stack pointer to gp->stackguard0.
+ // Setting gp->stackguard0 to StackPreempt folds
+ // preemption into the normal stack overflow check.
+ gp.stackguard0 = stackPreempt
+ return true
+}
+
+var starttime int64
+
+func schedtrace(detailed bool) {
+ now := nanotime()
+ if starttime == 0 {
+ starttime = now
+ }
+
+ lock(&sched.lock)
+ print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
+ if detailed {
+ print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
+ }
+ // We must be careful while reading data from P's, M's and G's.
+ // Even if we hold schedlock, most data can be changed concurrently.
+ // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
+ for i := int32(0); i < gomaxprocs; i++ {
+ _p_ := allp[i]
+ if _p_ == nil {
+ continue
+ }
+ mp := _p_.m
+ h := atomicload(&_p_.runqhead)
+ t := atomicload(&_p_.runqtail)
+ if detailed {
+ id := int32(-1)
+ if mp != nil {
+ id = mp.id
+ }
+ print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
+ } else {
+ // In non-detailed mode format lengths of per-P run queues as:
+ // [len1 len2 len3 len4]
+ print(" ")
+ if i == 0 {
+ print("[")
+ }
+ print(t - h)
+ if i == gomaxprocs-1 {
+ print("]\n")
+ }
+ }
+ }
+
+ if !detailed {
+ unlock(&sched.lock)
+ return
+ }
+
+ for mp := allm; mp != nil; mp = mp.alllink {
+ _p_ := mp.p
+ gp := mp.curg
+ lockedg := mp.lockedg
+ id1 := int32(-1)
+ if _p_ != nil {
+ id1 = _p_.id
+ }
+ id2 := int64(-1)
+ if gp != nil {
+ id2 = gp.goid
+ }
+ id3 := int64(-1)
+ if lockedg != nil {
+ id3 = lockedg.goid
+ }
+ print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " gcing=", mp.gcing, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", getg().m.blocked, " lockedg=", id3, "\n")
+ }
+
+ lock(&allglock)
+ for gi := 0; gi < len(allgs); gi++ {
+ gp := allgs[gi]
+ mp := gp.m
+ lockedm := gp.lockedm
+ id1 := int32(-1)
+ if mp != nil {
+ id1 = mp.id
+ }
+ id2 := int32(-1)
+ if lockedm != nil {
+ id2 = lockedm.id
+ }
+ print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
+ }
+ unlock(&allglock)
+ unlock(&sched.lock)
+}
+
+// Put mp on midle list.
+// Sched must be locked.
+func mput(mp *m) {
+ mp.schedlink = sched.midle
+ sched.midle = mp
+ sched.nmidle++
+ checkdead()
+}
+
+// Try to get an m from midle list.
+// Sched must be locked.
+func mget() *m {
+ mp := sched.midle
+ if mp != nil {
+ sched.midle = mp.schedlink
+ sched.nmidle--
+ }
+ return mp
+}
+
+// Put gp on the global runnable queue.
+// Sched must be locked.
+func globrunqput(gp *g) {
+ gp.schedlink = nil
+ if sched.runqtail != nil {
+ sched.runqtail.schedlink = gp
+ } else {
+ sched.runqhead = gp
+ }
+ sched.runqtail = gp
+ sched.runqsize++
+}
+
+// Put a batch of runnable goroutines on the global runnable queue.
+// Sched must be locked.
+func globrunqputbatch(ghead *g, gtail *g, n int32) {
+ gtail.schedlink = nil
+ if sched.runqtail != nil {
+ sched.runqtail.schedlink = ghead
+ } else {
+ sched.runqhead = ghead
+ }
+ sched.runqtail = gtail
+ sched.runqsize += n
+}
+
+// Try get a batch of G's from the global runnable queue.
+// Sched must be locked.
+func globrunqget(_p_ *p, max int32) *g {
+ if sched.runqsize == 0 {
+ return nil
+ }
+
+ n := sched.runqsize/gomaxprocs + 1
+ if n > sched.runqsize {
+ n = sched.runqsize
+ }
+ if max > 0 && n > max {
+ n = max
+ }
+ if n > int32(len(_p_.runq))/2 {
+ n = int32(len(_p_.runq)) / 2
+ }
+
+ sched.runqsize -= n
+ if sched.runqsize == 0 {
+ sched.runqtail = nil
+ }
+
+ gp := sched.runqhead
+ sched.runqhead = gp.schedlink
+ n--
+ for ; n > 0; n-- {
+ gp1 := sched.runqhead
+ sched.runqhead = gp1.schedlink
+ runqput(_p_, gp1)
+ }
+ return gp
+}
+
+// Put p to on _Pidle list.
+// Sched must be locked.
+func pidleput(_p_ *p) {
+ _p_.link = sched.pidle
+ sched.pidle = _p_
+ xadd(&sched.npidle, 1) // TODO: fast atomic
+}
+
+// Try get a p from _Pidle list.
+// Sched must be locked.
+func pidleget() *p {
+ _p_ := sched.pidle
+ if _p_ != nil {
+ sched.pidle = _p_.link
+ xadd(&sched.npidle, -1) // TODO: fast atomic
+ }
+ return _p_
+}
+
+// Try to put g on local runnable queue.
+// If it's full, put onto global queue.
+// Executed only by the owner P.
+func runqput(_p_ *p, gp *g) {
+retry:
+ h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
+ t := _p_.runqtail
+ if t-h < uint32(len(_p_.runq)) {
+ _p_.runq[t%uint32(len(_p_.runq))] = gp
+ atomicstore(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
+ return
+ }
+ if runqputslow(_p_, gp, h, t) {
+ return
+ }
+ // the queue is not full, now the put above must suceed
+ goto retry
+}
+
+// Put g and a batch of work from local runnable queue on global queue.
+// Executed only by the owner P.
+func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
+ var batch [len(_p_.runq)/2 + 1]*g
+
+ // First, grab a batch from local queue.
+ n := t - h
+ n = n / 2
+ if n != uint32(len(_p_.runq)/2) {
+ gothrow("runqputslow: queue is not full")
+ }
+ for i := uint32(0); i < n; i++ {
+ batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))]
+ }
+ if !cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ return false
+ }
+ batch[n] = gp
+
+ // Link the goroutines.
+ for i := uint32(0); i < n; i++ {
+ batch[i].schedlink = batch[i+1]
+ }
+
+ // Now put the batch on global queue.
+ lock(&sched.lock)
+ globrunqputbatch(batch[0], batch[n], int32(n+1))
+ unlock(&sched.lock)
+ return true
+}
+
+// Get g from local runnable queue.
+// Executed only by the owner P.
+func runqget(_p_ *p) *g {
+ for {
+ h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ t := _p_.runqtail
+ if t == h {
+ return nil
+ }
+ gp := _p_.runq[h%uint32(len(_p_.runq))]
+ if cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
+ return gp
+ }
+ }
+}
+
+// Grabs a batch of goroutines from local runnable queue.
+// batch array must be of size len(p->runq)/2. Returns number of grabbed goroutines.
+// Can be executed by any P.
+func runqgrab(_p_ *p, batch []*g) uint32 {
+ for {
+ h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers
+ t := atomicload(&_p_.runqtail) // load-acquire, synchronize with the producer
+ n := t - h
+ n = n - n/2
+ if n == 0 {
+ return 0
+ }
+ if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
+ continue
+ }
+ for i := uint32(0); i < n; i++ {
+ batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))]
+ }
+ if cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ return n
+ }
+ }
+}
+
+// Steal half of elements from local runnable queue of p2
+// and put onto local runnable queue of p.
+// Returns one of the stolen elements (or nil if failed).
+func runqsteal(_p_, p2 *p) *g {
+ var batch [len(_p_.runq) / 2]*g
+
+ n := runqgrab(p2, batch[:])
+ if n == 0 {
+ return nil
+ }
+ n--
+ gp := batch[n]
+ if n == 0 {
+ return gp
+ }
+ h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
+ t := _p_.runqtail
+ if t-h+n >= uint32(len(_p_.runq)) {
+ gothrow("runqsteal: runq overflow")
+ }
+ for i := uint32(0); i < n; i++ {
+ _p_.runq[(t+i)%uint32(len(_p_.runq))] = batch[i]
+ }
+ atomicstore(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
+ return gp
+}
+
+func testSchedLocalQueue() {
+ _p_ := new(p)
+ gs := make([]g, len(_p_.runq))
+ for i := 0; i < len(_p_.runq); i++ {
+ if runqget(_p_) != nil {
+ gothrow("runq is not empty initially")
+ }
+ for j := 0; j < i; j++ {
+ runqput(_p_, &gs[i])
+ }
+ for j := 0; j < i; j++ {
+ if runqget(_p_) != &gs[i] {
+ print("bad element at iter ", i, "/", j, "\n")
+ gothrow("bad element")
+ }
+ }
+ if runqget(_p_) != nil {
+ gothrow("runq is not empty afterwards")
+ }
+ }
+}
+
+func testSchedLocalQueueSteal() {
+ p1 := new(p)
+ p2 := new(p)
+ gs := make([]g, len(p1.runq))
+ for i := 0; i < len(p1.runq); i++ {
+ for j := 0; j < i; j++ {
+ gs[j].sig = 0
+ runqput(p1, &gs[j])
+ }
+ gp := runqsteal(p2, p1)
+ s := 0
+ if gp != nil {
+ s++
+ gp.sig++
+ }
+ for {
+ gp = runqget(p2)
+ if gp == nil {
+ break
+ }
+ s++
+ gp.sig++
+ }
+ for {
+ gp = runqget(p1)
+ if gp == nil {
+ break
+ }
+ gp.sig++
+ }
+ for j := 0; j < i; j++ {
+ if gs[j].sig != 1 {
+ print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
+ gothrow("bad element")
+ }
+ }
+ if s != i/2 && s != i/2+1 {
+ print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
+ gothrow("bad steal")
+ }
+ }
+}
+
+func setMaxThreads(in int) (out int) {
+ lock(&sched.lock)
+ out = int(sched.maxmcount)
+ sched.maxmcount = int32(in)
+ checkmcount()
+ unlock(&sched.lock)
+ return
+}
+
+var goexperiment string = "GOEXPERIMENT" // TODO: defined in zaexperiment.h
+
+func haveexperiment(name string) bool {
+ x := goexperiment
+ for x != "" {
+ xname := ""
+ i := index(x, ",")
+ if i < 0 {
+ xname, x = x, ""
+ } else {
+ xname, x = x[:i], x[i+1:]
+ }
+ if xname == name {
+ return true
+ }
+ }
+ return false
+}
+
+//go:nosplit
+func sync_procPin() int {
+ _g_ := getg()
+ mp := _g_.m
+
+ mp.locks++
+ return int(mp.p.id)
+}
+
+//go:nosplit
+func sync_procUnpin() {
+ _g_ := getg()
+ _g_.m.locks--
+}
diff --git a/src/runtime/race.c b/src/runtime/race.c
deleted file mode 100644
index 9ac73fbcc..000000000
--- a/src/runtime/race.c
+++ /dev/null
@@ -1,314 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Implementation of the race detector API.
-// +build race
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-#include "race.h"
-#include "type.h"
-#include "typekind.h"
-#include "textflag.h"
-
-// Race runtime functions called via runtime·racecall.
-void __tsan_init(void);
-void __tsan_fini(void);
-void __tsan_map_shadow(void);
-void __tsan_finalizer_goroutine(void);
-void __tsan_go_start(void);
-void __tsan_go_end(void);
-void __tsan_malloc(void);
-void __tsan_acquire(void);
-void __tsan_release(void);
-void __tsan_release_merge(void);
-void __tsan_go_ignore_sync_begin(void);
-void __tsan_go_ignore_sync_end(void);
-
-// Mimic what cmd/cgo would do.
-#pragma cgo_import_static __tsan_init
-#pragma cgo_import_static __tsan_fini
-#pragma cgo_import_static __tsan_map_shadow
-#pragma cgo_import_static __tsan_finalizer_goroutine
-#pragma cgo_import_static __tsan_go_start
-#pragma cgo_import_static __tsan_go_end
-#pragma cgo_import_static __tsan_malloc
-#pragma cgo_import_static __tsan_acquire
-#pragma cgo_import_static __tsan_release
-#pragma cgo_import_static __tsan_release_merge
-#pragma cgo_import_static __tsan_go_ignore_sync_begin
-#pragma cgo_import_static __tsan_go_ignore_sync_end
-
-// These are called from race_amd64.s.
-#pragma cgo_import_static __tsan_read
-#pragma cgo_import_static __tsan_read_pc
-#pragma cgo_import_static __tsan_read_range
-#pragma cgo_import_static __tsan_write
-#pragma cgo_import_static __tsan_write_pc
-#pragma cgo_import_static __tsan_write_range
-#pragma cgo_import_static __tsan_func_enter
-#pragma cgo_import_static __tsan_func_exit
-
-#pragma cgo_import_static __tsan_go_atomic32_load
-#pragma cgo_import_static __tsan_go_atomic64_load
-#pragma cgo_import_static __tsan_go_atomic32_store
-#pragma cgo_import_static __tsan_go_atomic64_store
-#pragma cgo_import_static __tsan_go_atomic32_exchange
-#pragma cgo_import_static __tsan_go_atomic64_exchange
-#pragma cgo_import_static __tsan_go_atomic32_fetch_add
-#pragma cgo_import_static __tsan_go_atomic64_fetch_add
-#pragma cgo_import_static __tsan_go_atomic32_compare_exchange
-#pragma cgo_import_static __tsan_go_atomic64_compare_exchange
-
-extern byte runtime·noptrdata[];
-extern byte runtime·enoptrbss[];
-
-// start/end of heap for race_amd64.s
-uintptr runtime·racearenastart;
-uintptr runtime·racearenaend;
-
-void runtime·racefuncenter(void *callpc);
-void runtime·racefuncexit(void);
-void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc);
-void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc);
-void runtime·racesymbolizethunk(void*);
-
-// racecall allows calling an arbitrary function f from C race runtime
-// with up to 4 uintptr arguments.
-void runtime·racecall(void(*f)(void), ...);
-
-// checks if the address has shadow (i.e. heap or data/bss)
-#pragma textflag NOSPLIT
-static bool
-isvalidaddr(uintptr addr)
-{
- if(addr >= runtime·racearenastart && addr < runtime·racearenaend)
- return true;
- if(addr >= (uintptr)runtime·noptrdata && addr < (uintptr)runtime·enoptrbss)
- return true;
- return false;
-}
-
-#pragma textflag NOSPLIT
-uintptr
-runtime·raceinit(void)
-{
- uintptr racectx, start, size;
-
- // cgo is required to initialize libc, which is used by race runtime
- if(!runtime·iscgo)
- runtime·throw("raceinit: race build must use cgo");
- runtime·racecall(__tsan_init, &racectx, runtime·racesymbolizethunk);
- // Round data segment to page boundaries, because it's used in mmap().
- start = (uintptr)runtime·noptrdata & ~(PageSize-1);
- size = ROUND((uintptr)runtime·enoptrbss - start, PageSize);
- runtime·racecall(__tsan_map_shadow, start, size);
- return racectx;
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racefini(void)
-{
- runtime·racecall(__tsan_fini);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racemapshadow(void *addr, uintptr size)
-{
- if(runtime·racearenastart == 0)
- runtime·racearenastart = (uintptr)addr;
- if(runtime·racearenaend < (uintptr)addr+size)
- runtime·racearenaend = (uintptr)addr+size;
- runtime·racecall(__tsan_map_shadow, addr, size);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racemalloc(void *p, uintptr sz)
-{
- runtime·racecall(__tsan_malloc, p, sz);
-}
-
-#pragma textflag NOSPLIT
-uintptr
-runtime·racegostart(void *pc)
-{
- uintptr racectx;
- G *spawng;
-
- if(g->m->curg != nil)
- spawng = g->m->curg;
- else
- spawng = g;
-
- runtime·racecall(__tsan_go_start, spawng->racectx, &racectx, pc);
- return racectx;
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racegoend(void)
-{
- runtime·racecall(__tsan_go_end, g->racectx);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racewriterangepc(void *addr, uintptr sz, void *callpc, void *pc)
-{
- if(g != g->m->curg) {
- // The call is coming from manual instrumentation of Go code running on g0/gsignal.
- // Not interesting.
- return;
- }
- if(callpc != nil)
- runtime·racefuncenter(callpc);
- runtime·racewriterangepc1(addr, sz, pc);
- if(callpc != nil)
- runtime·racefuncexit();
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racereadrangepc(void *addr, uintptr sz, void *callpc, void *pc)
-{
- if(g != g->m->curg) {
- // The call is coming from manual instrumentation of Go code running on g0/gsignal.
- // Not interesting.
- return;
- }
- if(callpc != nil)
- runtime·racefuncenter(callpc);
- runtime·racereadrangepc1(addr, sz, pc);
- if(callpc != nil)
- runtime·racefuncexit();
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racewriteobjectpc(void *addr, Type *t, void *callpc, void *pc)
-{
- uint8 kind;
-
- kind = t->kind & KindMask;
- if(kind == KindArray || kind == KindStruct)
- runtime·racewriterangepc(addr, t->size, callpc, pc);
- else
- runtime·racewritepc(addr, callpc, pc);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racereadobjectpc(void *addr, Type *t, void *callpc, void *pc)
-{
- uint8 kind;
-
- kind = t->kind & KindMask;
- if(kind == KindArray || kind == KindStruct)
- runtime·racereadrangepc(addr, t->size, callpc, pc);
- else
- runtime·racereadpc(addr, callpc, pc);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·raceacquire(void *addr)
-{
- runtime·raceacquireg(g, addr);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·raceacquireg(G *gp, void *addr)
-{
- if(g->raceignore || !isvalidaddr((uintptr)addr))
- return;
- runtime·racecall(__tsan_acquire, gp->racectx, addr);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racerelease(void *addr)
-{
- if(g->raceignore || !isvalidaddr((uintptr)addr))
- return;
- runtime·racereleaseg(g, addr);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racereleaseg(G *gp, void *addr)
-{
- if(g->raceignore || !isvalidaddr((uintptr)addr))
- return;
- runtime·racecall(__tsan_release, gp->racectx, addr);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racereleasemerge(void *addr)
-{
- runtime·racereleasemergeg(g, addr);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racereleasemergeg(G *gp, void *addr)
-{
- if(g->raceignore || !isvalidaddr((uintptr)addr))
- return;
- runtime·racecall(__tsan_release_merge, gp->racectx, addr);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·racefingo(void)
-{
- runtime·racecall(__tsan_finalizer_goroutine, g->racectx);
-}
-
-// func RaceAcquire(addr unsafe.Pointer)
-#pragma textflag NOSPLIT
-void
-runtime·RaceAcquire(void *addr)
-{
- runtime·raceacquire(addr);
-}
-
-// func RaceRelease(addr unsafe.Pointer)
-#pragma textflag NOSPLIT
-void
-runtime·RaceRelease(void *addr)
-{
- runtime·racerelease(addr);
-}
-
-// func RaceReleaseMerge(addr unsafe.Pointer)
-#pragma textflag NOSPLIT
-void
-runtime·RaceReleaseMerge(void *addr)
-{
- runtime·racereleasemerge(addr);
-}
-
-// func RaceDisable()
-#pragma textflag NOSPLIT
-void
-runtime·RaceDisable(void)
-{
- if(g->raceignore++ == 0)
- runtime·racecall(__tsan_go_ignore_sync_begin, g->racectx);
-}
-
-// func RaceEnable()
-#pragma textflag NOSPLIT
-void
-runtime·RaceEnable(void)
-{
- if(--g->raceignore == 0)
- runtime·racecall(__tsan_go_ignore_sync_end, g->racectx);
-}
diff --git a/src/runtime/race.go b/src/runtime/race.go
index bb0ee6df6..649cd7295 100644
--- a/src/runtime/race.go
+++ b/src/runtime/race.go
@@ -12,18 +12,6 @@ import (
"unsafe"
)
-func racefini()
-
-// RaceDisable disables handling of race events in the current goroutine.
-func RaceDisable()
-
-// RaceEnable re-enables handling of race events in the current goroutine.
-func RaceEnable()
-
-func RaceAcquire(addr unsafe.Pointer)
-func RaceRelease(addr unsafe.Pointer)
-func RaceReleaseMerge(addr unsafe.Pointer)
-
func RaceRead(addr unsafe.Pointer)
func RaceWrite(addr unsafe.Pointer)
func RaceReadRange(addr unsafe.Pointer, len int)
@@ -67,32 +55,6 @@ func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
//go:noescape
func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
-//go:noescape
-func racereadrangepc(addr unsafe.Pointer, len uintptr, callpc, pc uintptr)
-
-//go:noescape
-func racewriterangepc(addr unsafe.Pointer, len uintptr, callpc, pc uintptr)
-
-//go:noescape
-func raceacquire(addr unsafe.Pointer)
-
-//go:noescape
-func racerelease(addr unsafe.Pointer)
-
-//go:noescape
-func raceacquireg(gp *g, addr unsafe.Pointer)
-
-//go:noescape
-func racereleaseg(gp *g, addr unsafe.Pointer)
-
-func racefingo()
-
-//go:noescape
-func racemalloc(p unsafe.Pointer, size uintptr)
-
-//go:noescape
-func racereleasemerge(addr unsafe.Pointer)
-
type symbolizeContext struct {
pc uintptr
fn *byte
@@ -118,8 +80,8 @@ func racesymbolize(ctx *symbolizeContext) {
}
ctx.fn = funcname(f)
- var file string
- ctx.line = uintptr(funcline(f, ctx.pc, &file))
+ file, line := funcline(f, ctx.pc)
+ ctx.line = uintptr(line)
ctx.file = &bytes(file)[0] // assume NUL-terminated
ctx.off = ctx.pc - f.entry
ctx.res = 1
diff --git a/src/runtime/race.h b/src/runtime/race.h
deleted file mode 100644
index fee31e09f..000000000
--- a/src/runtime/race.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Definitions related to data race detection.
-
-#ifdef RACE
-enum { raceenabled = 1 };
-#else
-enum { raceenabled = 0 };
-#endif
-
-// Initialize race detection subsystem.
-uintptr runtime·raceinit(void);
-// Finalize race detection subsystem, does not return.
-void runtime·racefini(void);
-
-void runtime·racemapshadow(void *addr, uintptr size);
-void runtime·racemalloc(void *p, uintptr sz);
-uintptr runtime·racegostart(void *pc);
-void runtime·racegoend(void);
-void runtime·racewritepc(void *addr, void *callpc, void *pc);
-void runtime·racereadpc(void *addr, void *callpc, void *pc);
-void runtime·racewriterangepc(void *addr, uintptr sz, void *callpc, void *pc);
-void runtime·racereadrangepc(void *addr, uintptr sz, void *callpc, void *pc);
-void runtime·racereadobjectpc(void *addr, Type *t, void *callpc, void *pc);
-void runtime·racewriteobjectpc(void *addr, Type *t, void *callpc, void *pc);
-void runtime·racefingo(void);
-void runtime·raceacquire(void *addr);
-void runtime·raceacquireg(G *gp, void *addr);
-void runtime·racerelease(void *addr);
-void runtime·racereleaseg(G *gp, void *addr);
-void runtime·racereleasemerge(void *addr);
-void runtime·racereleasemergeg(G *gp, void *addr);
diff --git a/src/runtime/race0.go b/src/runtime/race0.go
index 5d90cc859..dadb6083f 100644
--- a/src/runtime/race0.go
+++ b/src/runtime/race0.go
@@ -18,7 +18,7 @@ const raceenabled = false
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
-func raceinit() { gothrow("race") }
+func raceinit() uintptr { gothrow("race"); return 0 }
func racefini() { gothrow("race") }
func racemapshadow(addr unsafe.Pointer, size uintptr) { gothrow("race") }
func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
diff --git a/src/runtime/race1.go b/src/runtime/race1.go
new file mode 100644
index 000000000..4c580429c
--- /dev/null
+++ b/src/runtime/race1.go
@@ -0,0 +1,304 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implementation of the race detector API.
+// +build race
+
+package runtime
+
+import "unsafe"
+
+// Race runtime functions called via runtime·racecall.
+//go:linkname __tsan_init __tsan_init
+var __tsan_init byte
+
+//go:linkname __tsan_fini __tsan_fini
+var __tsan_fini byte
+
+//go:linkname __tsan_map_shadow __tsan_map_shadow
+var __tsan_map_shadow byte
+
+//go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
+var __tsan_finalizer_goroutine byte
+
+//go:linkname __tsan_go_start __tsan_go_start
+var __tsan_go_start byte
+
+//go:linkname __tsan_go_end __tsan_go_end
+var __tsan_go_end byte
+
+//go:linkname __tsan_malloc __tsan_malloc
+var __tsan_malloc byte
+
+//go:linkname __tsan_acquire __tsan_acquire
+var __tsan_acquire byte
+
+//go:linkname __tsan_release __tsan_release
+var __tsan_release byte
+
+//go:linkname __tsan_release_merge __tsan_release_merge
+var __tsan_release_merge byte
+
+//go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
+var __tsan_go_ignore_sync_begin byte
+
+//go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
+var __tsan_go_ignore_sync_end byte
+
+// Mimic what cmd/cgo would do.
+//go:cgo_import_static __tsan_init
+//go:cgo_import_static __tsan_fini
+//go:cgo_import_static __tsan_map_shadow
+//go:cgo_import_static __tsan_finalizer_goroutine
+//go:cgo_import_static __tsan_go_start
+//go:cgo_import_static __tsan_go_end
+//go:cgo_import_static __tsan_malloc
+//go:cgo_import_static __tsan_acquire
+//go:cgo_import_static __tsan_release
+//go:cgo_import_static __tsan_release_merge
+//go:cgo_import_static __tsan_go_ignore_sync_begin
+//go:cgo_import_static __tsan_go_ignore_sync_end
+
+// These are called from race_amd64.s.
+//go:cgo_import_static __tsan_read
+//go:cgo_import_static __tsan_read_pc
+//go:cgo_import_static __tsan_read_range
+//go:cgo_import_static __tsan_write
+//go:cgo_import_static __tsan_write_pc
+//go:cgo_import_static __tsan_write_range
+//go:cgo_import_static __tsan_func_enter
+//go:cgo_import_static __tsan_func_exit
+
+//go:cgo_import_static __tsan_go_atomic32_load
+//go:cgo_import_static __tsan_go_atomic64_load
+//go:cgo_import_static __tsan_go_atomic32_store
+//go:cgo_import_static __tsan_go_atomic64_store
+//go:cgo_import_static __tsan_go_atomic32_exchange
+//go:cgo_import_static __tsan_go_atomic64_exchange
+//go:cgo_import_static __tsan_go_atomic32_fetch_add
+//go:cgo_import_static __tsan_go_atomic64_fetch_add
+//go:cgo_import_static __tsan_go_atomic32_compare_exchange
+//go:cgo_import_static __tsan_go_atomic64_compare_exchange
+
+// start/end of heap for race_amd64.s
+var racearenastart uintptr
+var racearenaend uintptr
+
+func racefuncenter(uintptr)
+func racefuncexit()
+func racereadrangepc1(uintptr, uintptr, uintptr)
+func racewriterangepc1(uintptr, uintptr, uintptr)
+func racesymbolizethunk(uintptr)
+
+// racecall allows calling an arbitrary function f from C race runtime
+// with up to 4 uintptr arguments.
+func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
+
+// checks if the address has shadow (i.e. heap or data/bss)
+//go:nosplit
+func isvalidaddr(addr unsafe.Pointer) bool {
+ return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
+ uintptr(unsafe.Pointer(&noptrdata)) <= uintptr(addr) && uintptr(addr) < uintptr(unsafe.Pointer(&enoptrbss))
+}
+
+//go:nosplit
+func raceinit() uintptr {
+ // cgo is required to initialize libc, which is used by race runtime
+ if !iscgo {
+ gothrow("raceinit: race build must use cgo")
+ }
+
+ var racectx uintptr
+ racecall(&__tsan_init, uintptr(unsafe.Pointer(&racectx)), funcPC(racesymbolizethunk), 0, 0)
+
+ // Round data segment to page boundaries, because it's used in mmap().
+ start := uintptr(unsafe.Pointer(&noptrdata)) &^ (_PageSize - 1)
+ size := round(uintptr(unsafe.Pointer(&enoptrbss))-start, _PageSize)
+ racecall(&__tsan_map_shadow, start, size, 0, 0)
+
+ return racectx
+}
+
+//go:nosplit
+func racefini() {
+ racecall(&__tsan_fini, 0, 0, 0, 0)
+}
+
+//go:nosplit
+func racemapshadow(addr unsafe.Pointer, size uintptr) {
+ if racearenastart == 0 {
+ racearenastart = uintptr(addr)
+ }
+ if racearenaend < uintptr(addr)+size {
+ racearenaend = uintptr(addr) + size
+ }
+ racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
+}
+
+//go:nosplit
+func racemalloc(p unsafe.Pointer, sz uintptr) {
+ racecall(&__tsan_malloc, uintptr(p), sz, 0, 0)
+}
+
+//go:nosplit
+func racegostart(pc uintptr) uintptr {
+ _g_ := getg()
+ var spawng *g
+ if _g_.m.curg != nil {
+ spawng = _g_.m.curg
+ } else {
+ spawng = _g_
+ }
+
+ var racectx uintptr
+ racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
+ return racectx
+}
+
+//go:nosplit
+func racegoend() {
+ racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
+}
+
+//go:nosplit
+func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
+ _g_ := getg()
+ if _g_ != _g_.m.curg {
+ // The call is coming from manual instrumentation of Go code running on g0/gsignal.
+ // Not interesting.
+ return
+ }
+ if callpc != 0 {
+ racefuncenter(callpc)
+ }
+ racewriterangepc1(uintptr(addr), sz, pc)
+ if callpc != 0 {
+ racefuncexit()
+ }
+}
+
+//go:nosplit
+func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
+ _g_ := getg()
+ if _g_ != _g_.m.curg {
+ // The call is coming from manual instrumentation of Go code running on g0/gsignal.
+ // Not interesting.
+ return
+ }
+ if callpc != 0 {
+ racefuncenter(callpc)
+ }
+ racereadrangepc1(uintptr(addr), sz, pc)
+ if callpc != 0 {
+ racefuncexit()
+ }
+}
+
+//go:nosplit
+func racewriteobjectpc(addr unsafe.Pointer, t *_type, callpc, pc uintptr) {
+ kind := t.kind & _KindMask
+ if kind == _KindArray || kind == _KindStruct {
+ racewriterangepc(addr, t.size, callpc, pc)
+ } else {
+ racewritepc(addr, callpc, pc)
+ }
+}
+
+//go:nosplit
+func racereadobjectpc(addr unsafe.Pointer, t *_type, callpc, pc uintptr) {
+ kind := t.kind & _KindMask
+ if kind == _KindArray || kind == _KindStruct {
+ racereadrangepc(addr, t.size, callpc, pc)
+ } else {
+ racereadpc(addr, callpc, pc)
+ }
+}
+
+//go:nosplit
+func raceacquire(addr unsafe.Pointer) {
+ raceacquireg(getg(), addr)
+}
+
+//go:nosplit
+func raceacquireg(gp *g, addr unsafe.Pointer) {
+ if getg().raceignore != 0 || !isvalidaddr(addr) {
+ return
+ }
+ racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
+}
+
+//go:nosplit
+func racerelease(addr unsafe.Pointer) {
+ _g_ := getg()
+ if _g_.raceignore != 0 || !isvalidaddr(addr) {
+ return
+ }
+ racereleaseg(_g_, addr)
+}
+
+//go:nosplit
+func racereleaseg(gp *g, addr unsafe.Pointer) {
+ if getg().raceignore != 0 || !isvalidaddr(addr) {
+ return
+ }
+ racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
+}
+
+//go:nosplit
+func racereleasemerge(addr unsafe.Pointer) {
+ racereleasemergeg(getg(), addr)
+}
+
+//go:nosplit
+func racereleasemergeg(gp *g, addr unsafe.Pointer) {
+ if getg().raceignore != 0 || !isvalidaddr(addr) {
+ return
+ }
+ racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
+}
+
+//go:nosplit
+func racefingo() {
+ racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
+}
+
+//go:nosplit
+
+func RaceAcquire(addr unsafe.Pointer) {
+ raceacquire(addr)
+}
+
+//go:nosplit
+
+func RaceRelease(addr unsafe.Pointer) {
+ racerelease(addr)
+}
+
+//go:nosplit
+
+func RaceReleaseMerge(addr unsafe.Pointer) {
+ racereleasemerge(addr)
+}
+
+//go:nosplit
+
+// RaceEnable re-enables handling of race events in the current goroutine.
+func RaceDisable() {
+ _g_ := getg()
+ if _g_.raceignore == 0 {
+ racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
+ }
+ _g_.raceignore++
+}
+
+//go:nosplit
+
+// RaceDisable disables handling of race events in the current goroutine.
+func RaceEnable() {
+ _g_ := getg()
+ _g_.raceignore--
+ if _g_.raceignore == 0 {
+ racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
+ }
+}
diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s
index 15b18ff8f..a7f44870a 100644
--- a/src/runtime/race_amd64.s
+++ b/src/runtime/race_amd64.s
@@ -4,7 +4,8 @@
// +build race
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "funcdata.h"
#include "textflag.h"
diff --git a/src/runtime/rdebug.go b/src/runtime/rdebug.go
index e5e691122..f2766d793 100644
--- a/src/runtime/rdebug.go
+++ b/src/runtime/rdebug.go
@@ -10,15 +10,6 @@ func setMaxStack(in int) (out int) {
return out
}
-func setGCPercent(in int32) (out int32) {
- mp := acquirem()
- mp.scalararg[0] = uintptr(int(in))
- onM(setgcpercent_m)
- out = int32(int(mp.scalararg[0]))
- releasem(mp)
- return out
-}
-
func setPanicOnFault(new bool) (old bool) {
mp := acquirem()
old = mp.curg.paniconfault
@@ -26,12 +17,3 @@ func setPanicOnFault(new bool) (old bool) {
releasem(mp)
return old
}
-
-func setMaxThreads(in int) (out int) {
- mp := acquirem()
- mp.scalararg[0] = uintptr(in)
- onM(setmaxthreads_m)
- out = int(mp.scalararg[0])
- releasem(mp)
- return out
-}
diff --git a/src/runtime/rt0_linux_386.s b/src/runtime/rt0_linux_386.s
index 352e594d5..47fd908e7 100644
--- a/src/runtime/rt0_linux_386.s
+++ b/src/runtime/rt0_linux_386.s
@@ -9,7 +9,6 @@ TEXT _rt0_386_linux(SB),NOSPLIT,$8
LEAL 12(SP), BX
MOVL AX, 0(SP)
MOVL BX, 4(SP)
- CALL runtime·linux_setup_vdso(SB)
CALL main(SB)
INT $3
diff --git a/src/runtime/rt0_windows_amd64.s b/src/runtime/rt0_windows_amd64.s
index 197f52e11..df956ba36 100644
--- a/src/runtime/rt0_windows_amd64.s
+++ b/src/runtime/rt0_windows_amd64.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
TEXT _rt0_amd64_windows(SB),NOSPLIT,$-8
diff --git a/src/runtime/runtime.c b/src/runtime/runtime.c
deleted file mode 100644
index a68414284..000000000
--- a/src/runtime/runtime.c
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "stack.h"
-#include "arch_GOARCH.h"
-#include "textflag.h"
-#include "malloc.h"
-
-// Keep a cached value to make gotraceback fast,
-// since we call it on every call to gentraceback.
-// The cached value is a uint32 in which the low bit
-// is the "crash" setting and the top 31 bits are the
-// gotraceback value.
-static uint32 traceback_cache = 2<<1;
-
-// The GOTRACEBACK environment variable controls the
-// behavior of a Go program that is crashing and exiting.
-// GOTRACEBACK=0 suppress all tracebacks
-// GOTRACEBACK=1 default behavior - show tracebacks but exclude runtime frames
-// GOTRACEBACK=2 show tracebacks including runtime frames
-// GOTRACEBACK=crash show tracebacks including runtime frames, then crash (core dump etc)
-#pragma textflag NOSPLIT
-int32
-runtime·gotraceback(bool *crash)
-{
- if(crash != nil)
- *crash = false;
- if(g->m->traceback != 0)
- return g->m->traceback;
- if(crash != nil)
- *crash = traceback_cache&1;
- return traceback_cache>>1;
-}
-
-int32
-runtime·mcmp(byte *s1, byte *s2, uintptr n)
-{
- uintptr i;
- byte c1, c2;
-
- for(i=0; i<n; i++) {
- c1 = s1[i];
- c2 = s2[i];
- if(c1 < c2)
- return -1;
- if(c1 > c2)
- return +1;
- }
- return 0;
-}
-
-
-byte*
-runtime·mchr(byte *p, byte c, byte *ep)
-{
- for(; p < ep; p++)
- if(*p == c)
- return p;
- return nil;
-}
-
-static int32 argc;
-
-#pragma dataflag NOPTR /* argv not a heap pointer */
-static uint8** argv;
-
-extern Slice runtime·argslice;
-extern Slice runtime·envs;
-
-void (*runtime·sysargs)(int32, uint8**);
-
-void
-runtime·args(int32 c, uint8 **v)
-{
- argc = c;
- argv = v;
- if(runtime·sysargs != nil)
- runtime·sysargs(c, v);
-}
-
-int32 runtime·isplan9;
-int32 runtime·issolaris;
-int32 runtime·iswindows;
-
-// Information about what cpu features are available.
-// Set on startup in asm_{x86/amd64}.s.
-uint32 runtime·cpuid_ecx;
-uint32 runtime·cpuid_edx;
-
-void
-runtime·goargs(void)
-{
- String *s;
- int32 i;
-
- // for windows implementation see "os" package
- if(Windows)
- return;
-
- runtime·argslice = runtime·makeStringSlice(argc);
- s = (String*)runtime·argslice.array;
- for(i=0; i<argc; i++)
- s[i] = runtime·gostringnocopy(argv[i]);
-}
-
-void
-runtime·goenvs_unix(void)
-{
- String *s;
- int32 i, n;
-
- for(n=0; argv[argc+1+n] != 0; n++)
- ;
-
- runtime·envs = runtime·makeStringSlice(n);
- s = (String*)runtime·envs.array;
- for(i=0; i<n; i++)
- s[i] = runtime·gostringnocopy(argv[argc+1+i]);
-}
-
-#pragma textflag NOSPLIT
-Slice
-runtime·environ()
-{
- return runtime·envs;
-}
-
-int32
-runtime·atoi(byte *p)
-{
- int32 n;
-
- n = 0;
- while('0' <= *p && *p <= '9')
- n = n*10 + *p++ - '0';
- return n;
-}
-
-static void
-TestAtomic64(void)
-{
- uint64 z64, x64;
-
- z64 = 42;
- x64 = 0;
- PREFETCH(&z64);
- if(runtime·cas64(&z64, x64, 1))
- runtime·throw("cas64 failed");
- if(x64 != 0)
- runtime·throw("cas64 failed");
- x64 = 42;
- if(!runtime·cas64(&z64, x64, 1))
- runtime·throw("cas64 failed");
- if(x64 != 42 || z64 != 1)
- runtime·throw("cas64 failed");
- if(runtime·atomicload64(&z64) != 1)
- runtime·throw("load64 failed");
- runtime·atomicstore64(&z64, (1ull<<40)+1);
- if(runtime·atomicload64(&z64) != (1ull<<40)+1)
- runtime·throw("store64 failed");
- if(runtime·xadd64(&z64, (1ull<<40)+1) != (2ull<<40)+2)
- runtime·throw("xadd64 failed");
- if(runtime·atomicload64(&z64) != (2ull<<40)+2)
- runtime·throw("xadd64 failed");
- if(runtime·xchg64(&z64, (3ull<<40)+3) != (2ull<<40)+2)
- runtime·throw("xchg64 failed");
- if(runtime·atomicload64(&z64) != (3ull<<40)+3)
- runtime·throw("xchg64 failed");
-}
-
-void
-runtime·check(void)
-{
- int8 a;
- uint8 b;
- int16 c;
- uint16 d;
- int32 e;
- uint32 f;
- int64 g;
- uint64 h;
- float32 i, i1;
- float64 j, j1;
- byte *k, *k1;
- uint16* l;
- byte m[4];
- struct x1 {
- byte x;
- };
- struct y1 {
- struct x1 x1;
- byte y;
- };
-
- if(sizeof(a) != 1) runtime·throw("bad a");
- if(sizeof(b) != 1) runtime·throw("bad b");
- if(sizeof(c) != 2) runtime·throw("bad c");
- if(sizeof(d) != 2) runtime·throw("bad d");
- if(sizeof(e) != 4) runtime·throw("bad e");
- if(sizeof(f) != 4) runtime·throw("bad f");
- if(sizeof(g) != 8) runtime·throw("bad g");
- if(sizeof(h) != 8) runtime·throw("bad h");
- if(sizeof(i) != 4) runtime·throw("bad i");
- if(sizeof(j) != 8) runtime·throw("bad j");
- if(sizeof(k) != sizeof(uintptr)) runtime·throw("bad k");
- if(sizeof(l) != sizeof(uintptr)) runtime·throw("bad l");
- if(sizeof(struct x1) != 1) runtime·throw("bad sizeof x1");
- if(offsetof(struct y1, y) != 1) runtime·throw("bad offsetof y1.y");
- if(sizeof(struct y1) != 2) runtime·throw("bad sizeof y1");
-
- if(runtime·timediv(12345LL*1000000000+54321, 1000000000, &e) != 12345 || e != 54321)
- runtime·throw("bad timediv");
-
- uint32 z;
- z = 1;
- if(!runtime·cas(&z, 1, 2))
- runtime·throw("cas1");
- if(z != 2)
- runtime·throw("cas2");
-
- z = 4;
- if(runtime·cas(&z, 5, 6))
- runtime·throw("cas3");
- if(z != 4)
- runtime·throw("cas4");
-
- z = 0xffffffff;
- if(!runtime·cas(&z, 0xffffffff, 0xfffffffe))
- runtime·throw("cas5");
- if(z != 0xfffffffe)
- runtime·throw("cas6");
-
- k = (byte*)0xfedcb123;
- if(sizeof(void*) == 8)
- k = (byte*)((uintptr)k<<10);
- if(runtime·casp((void**)&k, nil, nil))
- runtime·throw("casp1");
- k1 = k+1;
- if(!runtime·casp((void**)&k, k, k1))
- runtime·throw("casp2");
- if(k != k1)
- runtime·throw("casp3");
-
- m[0] = m[1] = m[2] = m[3] = 0x1;
- runtime·atomicor8(&m[1], 0xf0);
- if (m[0] != 0x1 || m[1] != 0xf1 || m[2] != 0x1 || m[3] != 0x1)
- runtime·throw("atomicor8");
-
- *(uint64*)&j = ~0ULL;
- if(j == j)
- runtime·throw("float64nan");
- if(!(j != j))
- runtime·throw("float64nan1");
-
- *(uint64*)&j1 = ~1ULL;
- if(j == j1)
- runtime·throw("float64nan2");
- if(!(j != j1))
- runtime·throw("float64nan3");
-
- *(uint32*)&i = ~0UL;
- if(i == i)
- runtime·throw("float32nan");
- if(!(i != i))
- runtime·throw("float32nan1");
-
- *(uint32*)&i1 = ~1UL;
- if(i == i1)
- runtime·throw("float32nan2");
- if(!(i != i1))
- runtime·throw("float32nan3");
-
- TestAtomic64();
-
- if(FixedStack != runtime·round2(FixedStack))
- runtime·throw("FixedStack is not power-of-2");
-}
-
-#pragma dataflag NOPTR
-DebugVars runtime·debug;
-
-typedef struct DbgVar DbgVar;
-struct DbgVar
-{
- int8* name;
- int32* value;
-};
-
-// Do we report invalid pointers found during stack or heap scans?
-int32 runtime·invalidptr = 1;
-
-#pragma dataflag NOPTR /* dbgvar has no heap pointers */
-static DbgVar dbgvar[] = {
- {"allocfreetrace", &runtime·debug.allocfreetrace},
- {"invalidptr", &runtime·invalidptr},
- {"efence", &runtime·debug.efence},
- {"gctrace", &runtime·debug.gctrace},
- {"gcdead", &runtime·debug.gcdead},
- {"scheddetail", &runtime·debug.scheddetail},
- {"schedtrace", &runtime·debug.schedtrace},
- {"scavenge", &runtime·debug.scavenge},
-};
-
-void
-runtime·parsedebugvars(void)
-{
- byte *p;
- intgo i, n;
-
- p = runtime·getenv("GODEBUG");
- if(p != nil){
- for(;;) {
- for(i=0; i<nelem(dbgvar); i++) {
- n = runtime·findnull((byte*)dbgvar[i].name);
- if(runtime·mcmp(p, (byte*)dbgvar[i].name, n) == 0 && p[n] == '=')
- *dbgvar[i].value = runtime·atoi(p+n+1);
- }
- p = runtime·strstr(p, (byte*)",");
- if(p == nil)
- break;
- p++;
- }
- }
-
- p = runtime·getenv("GOTRACEBACK");
- if(p == nil)
- p = (byte*)"";
- if(p[0] == '\0')
- traceback_cache = 1<<1;
- else if(runtime·strcmp(p, (byte*)"crash") == 0)
- traceback_cache = (2<<1) | 1;
- else
- traceback_cache = runtime·atoi(p)<<1;
-}
-
-// Poor mans 64-bit division.
-// This is a very special function, do not use it if you are not sure what you are doing.
-// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
-// Handles overflow in a time-specific manner.
-#pragma textflag NOSPLIT
-int32
-runtime·timediv(int64 v, int32 div, int32 *rem)
-{
- int32 res, bit;
-
- res = 0;
- for(bit = 30; bit >= 0; bit--) {
- if(v >= ((int64)div<<bit)) {
- v = v - ((int64)div<<bit);
- res += 1<<bit;
- }
- }
- if(v >= (int64)div) {
- if(rem != nil)
- *rem = 0;
- return 0x7fffffff;
- }
- if(rem != nil)
- *rem = v;
- return res;
-}
-
-// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
-
-#pragma textflag NOSPLIT
-G*
-runtime·getg(void)
-{
- return g;
-}
-
-#pragma textflag NOSPLIT
-M*
-runtime·acquirem(void)
-{
- g->m->locks++;
- return g->m;
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·releasem(M *mp)
-{
- mp->locks--;
- if(mp->locks == 0 && g->preempt) {
- // restore the preemption request in case we've cleared it in newstack
- g->stackguard0 = StackPreempt;
- }
-}
-
-#pragma textflag NOSPLIT
-MCache*
-runtime·gomcache(void)
-{
- return g->m->mcache;
-}
-
-#pragma textflag NOSPLIT
-Slice
-reflect·typelinks(void)
-{
- extern Type *runtime·typelink[], *runtime·etypelink[];
- Slice ret;
-
- ret.array = (byte*)runtime·typelink;
- ret.len = runtime·etypelink - runtime·typelink;
- ret.cap = ret.len;
- return ret;
-}
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
deleted file mode 100644
index 330ed429b..000000000
--- a/src/runtime/runtime.h
+++ /dev/null
@@ -1,1151 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
- * basic types
- */
-typedef signed char int8;
-typedef unsigned char uint8;
-typedef signed short int16;
-typedef unsigned short uint16;
-typedef signed int int32;
-typedef unsigned int uint32;
-typedef signed long long int int64;
-typedef unsigned long long int uint64;
-typedef float float32;
-typedef double float64;
-
-#ifdef _64BIT
-typedef uint64 uintptr;
-typedef int64 intptr;
-typedef int64 intgo; // Go's int
-typedef uint64 uintgo; // Go's uint
-#else
-typedef uint32 uintptr;
-typedef int32 intptr;
-typedef int32 intgo; // Go's int
-typedef uint32 uintgo; // Go's uint
-#endif
-
-#ifdef _64BITREG
-typedef uint64 uintreg;
-#else
-typedef uint32 uintreg;
-#endif
-
-/*
- * get rid of C types
- * the / / / forces a syntax error immediately,
- * which will show "last name: XXunsigned".
- */
-#define unsigned XXunsigned / / /
-#define signed XXsigned / / /
-#define char XXchar / / /
-#define short XXshort / / /
-#define int XXint / / /
-#define long XXlong / / /
-#define float XXfloat / / /
-#define double XXdouble / / /
-
-/*
- * defined types
- */
-typedef uint8 bool;
-typedef uint8 byte;
-typedef struct Func Func;
-typedef struct G G;
-typedef struct Gobuf Gobuf;
-typedef struct SudoG SudoG;
-typedef struct Mutex Mutex;
-typedef struct M M;
-typedef struct P P;
-typedef struct SchedT SchedT;
-typedef struct Note Note;
-typedef struct Slice Slice;
-typedef struct String String;
-typedef struct FuncVal FuncVal;
-typedef struct SigTab SigTab;
-typedef struct MCache MCache;
-typedef struct FixAlloc FixAlloc;
-typedef struct Iface Iface;
-typedef struct Itab Itab;
-typedef struct InterfaceType InterfaceType;
-typedef struct Eface Eface;
-typedef struct Type Type;
-typedef struct PtrType PtrType;
-typedef struct ChanType ChanType;
-typedef struct MapType MapType;
-typedef struct Defer Defer;
-typedef struct Panic Panic;
-typedef struct Hmap Hmap;
-typedef struct Hiter Hiter;
-typedef struct Hchan Hchan;
-typedef struct Complex64 Complex64;
-typedef struct Complex128 Complex128;
-typedef struct LibCall LibCall;
-typedef struct WinCallbackContext WinCallbackContext;
-typedef struct GCStats GCStats;
-typedef struct LFNode LFNode;
-typedef struct ParFor ParFor;
-typedef struct ParForThread ParForThread;
-typedef struct CgoMal CgoMal;
-typedef struct PollDesc PollDesc;
-typedef struct DebugVars DebugVars;
-typedef struct ForceGCState ForceGCState;
-typedef struct Stack Stack;
-typedef struct Workbuf Workbuf;
-
-/*
- * Per-CPU declaration.
- *
- * "extern register" is a special storage class implemented by 6c, 8c, etc.
- * On the ARM, it is an actual register; elsewhere it is a slot in thread-
- * local storage indexed by a pseudo-register TLS. See zasmhdr in
- * src/cmd/dist/buildruntime.c for details, and be aware that the linker may
- * make further OS-specific changes to the compiler's output. For example,
- * 6l/linux rewrites 0(TLS) as -8(FS).
- *
- * Every C file linked into a Go program must include runtime.h so that the
- * C compiler (6c, 8c, etc.) knows to avoid other uses of these dedicated
- * registers. The Go compiler (6g, 8g, etc.) knows to avoid them.
- */
-extern register G* g;
-
-/*
- * defined constants
- */
-enum
-{
- // G status
- //
- // If you add to this list, add to the list
- // of "okay during garbage collection" status
- // in mgc0.c too.
- Gidle, // 0
- Grunnable, // 1 runnable and on a run queue
- Grunning, // 2
- Gsyscall, // 3
- Gwaiting, // 4
- Gmoribund_unused, // 5 currently unused, but hardcoded in gdb scripts
- Gdead, // 6
- Genqueue, // 7 Only the Gscanenqueue is used.
- Gcopystack, // 8 in this state when newstack is moving the stack
- // the following encode that the GC is scanning the stack and what to do when it is done
- Gscan = 0x1000, // atomicstatus&~Gscan = the non-scan state,
- // Gscanidle = Gscan + Gidle, // Not used. Gidle only used with newly malloced gs
- Gscanrunnable = Gscan + Grunnable, // 0x1001 When scanning complets make Grunnable (it is already on run queue)
- Gscanrunning = Gscan + Grunning, // 0x1002 Used to tell preemption newstack routine to scan preempted stack.
- Gscansyscall = Gscan + Gsyscall, // 0x1003 When scanning completes make is Gsyscall
- Gscanwaiting = Gscan + Gwaiting, // 0x1004 When scanning completes make it Gwaiting
- // Gscanmoribund_unused, // not possible
- // Gscandead, // not possible
- Gscanenqueue = Gscan + Genqueue, // When scanning completes make it Grunnable and put on runqueue
-};
-enum
-{
- // P status
- Pidle,
- Prunning,
- Psyscall,
- Pgcstop,
- Pdead,
-};
-enum
-{
- true = 1,
- false = 0,
-};
-enum
-{
- PtrSize = sizeof(void*),
-};
-/*
- * structures
- */
-struct Mutex
-{
- // Futex-based impl treats it as uint32 key,
- // while sema-based impl as M* waitm.
- // Used to be a union, but unions break precise GC.
- uintptr key;
-};
-struct Note
-{
- // Futex-based impl treats it as uint32 key,
- // while sema-based impl as M* waitm.
- // Used to be a union, but unions break precise GC.
- uintptr key;
-};
-struct String
-{
- byte* str;
- intgo len;
-};
-struct FuncVal
-{
- void (*fn)(void);
- // variable-size, fn-specific data here
-};
-struct Iface
-{
- Itab* tab;
- void* data;
-};
-struct Eface
-{
- Type* type;
- void* data;
-};
-struct Complex64
-{
- float32 real;
- float32 imag;
-};
-struct Complex128
-{
- float64 real;
- float64 imag;
-};
-
-struct Slice
-{ // must not move anything
- byte* array; // actual data
- uintgo len; // number of elements
- uintgo cap; // allocated number of elements
-};
-struct Gobuf
-{
- // The offsets of sp, pc, and g are known to (hard-coded in) libmach.
- uintptr sp;
- uintptr pc;
- G* g;
- void* ctxt; // this has to be a pointer so that GC scans it
- uintreg ret;
- uintptr lr;
-};
-// Known to compiler.
-// Changes here must also be made in src/cmd/gc/select.c's selecttype.
-struct SudoG
-{
- G* g;
- uint32* selectdone;
- SudoG* next;
- SudoG* prev;
- void* elem; // data element
- int64 releasetime;
- int32 nrelease; // -1 for acquire
- SudoG* waitlink; // G.waiting list
-};
-struct GCStats
-{
- // the struct must consist of only uint64's,
- // because it is casted to uint64[].
- uint64 nhandoff;
- uint64 nhandoffcnt;
- uint64 nprocyield;
- uint64 nosyield;
- uint64 nsleep;
-};
-
-struct LibCall
-{
- uintptr fn;
- uintptr n; // number of parameters
- uintptr args; // parameters
- uintptr r1; // return values
- uintptr r2;
- uintptr err; // error number
-};
-
-// describes how to handle callback
-struct WinCallbackContext
-{
- void* gobody; // Go function to call
- uintptr argsize; // callback arguments size (in bytes)
- uintptr restorestack; // adjust stack on return by (in bytes) (386 only)
- bool cleanstack;
-};
-
-// Stack describes a Go execution stack.
-// The bounds of the stack are exactly [lo, hi),
-// with no implicit data structures on either side.
-struct Stack
-{
- uintptr lo;
- uintptr hi;
-};
-
-struct G
-{
- // Stack parameters.
- // stack describes the actual stack memory: [stack.lo, stack.hi).
- // stackguard0 is the stack pointer compared in the Go stack growth prologue.
- // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
- // stackguard1 is the stack pointer compared in the C stack growth prologue.
- // It is stack.lo+StackGuard on g0 and gsignal stacks.
- // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
- Stack stack; // offset known to runtime/cgo
- uintptr stackguard0; // offset known to liblink
- uintptr stackguard1; // offset known to liblink
-
- Panic* panic; // innermost panic - offset known to liblink
- Defer* defer; // innermost defer
- Gobuf sched;
- uintptr syscallsp; // if status==Gsyscall, syscallsp = sched.sp to use during gc
- uintptr syscallpc; // if status==Gsyscall, syscallpc = sched.pc to use during gc
- void* param; // passed parameter on wakeup
- uint32 atomicstatus;
- int64 goid;
- int64 waitsince; // approx time when the G become blocked
- String waitreason; // if status==Gwaiting
- G* schedlink;
- bool issystem; // do not output in stack dump, ignore in deadlock detector
- bool preempt; // preemption signal, duplicates stackguard0 = StackPreempt
- bool paniconfault; // panic (instead of crash) on unexpected fault address
- bool preemptscan; // preempted g does scan for GC
- bool gcworkdone; // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
- bool throwsplit; // must not split stack
- int8 raceignore; // ignore race detection events
- M* m; // for debuggers, but offset not hard-coded
- M* lockedm;
- int32 sig;
- Slice writebuf;
- uintptr sigcode0;
- uintptr sigcode1;
- uintptr sigpc;
- uintptr gopc; // pc of go statement that created this goroutine
- uintptr racectx;
- SudoG* waiting; // sudog structures this G is waiting on (that have a valid elem ptr)
- uintptr end[];
-};
-
-struct M
-{
- G* g0; // goroutine with scheduling stack
- Gobuf morebuf; // gobuf arg to morestack
-
- // Fields not known to debuggers.
- uint64 procid; // for debuggers, but offset not hard-coded
- G* gsignal; // signal-handling G
- uintptr tls[4]; // thread-local storage (for x86 extern register)
- void (*mstartfn)(void);
- G* curg; // current running goroutine
- G* caughtsig; // goroutine running during fatal signal
- P* p; // attached P for executing Go code (nil if not executing Go code)
- P* nextp;
- int32 id;
- int32 mallocing;
- int32 throwing;
- int32 gcing;
- int32 locks;
- int32 softfloat;
- int32 dying;
- int32 profilehz;
- int32 helpgc;
- bool spinning; // M is out of work and is actively looking for work
- bool blocked; // M is blocked on a Note
- bool inwb; // M is executing a write barrier
- int8 printlock;
- uint32 fastrand;
- uint64 ncgocall; // number of cgo calls in total
- int32 ncgo; // number of cgo calls currently in progress
- CgoMal* cgomal;
- Note park;
- M* alllink; // on allm
- M* schedlink;
- uint32 machport; // Return address for Mach IPC (OS X)
- MCache* mcache;
- G* lockedg;
- uintptr createstack[32];// Stack that created this thread.
- uint32 freglo[16]; // D[i] lsb and F[i]
- uint32 freghi[16]; // D[i] msb and F[i+16]
- uint32 fflag; // floating point compare flags
- uint32 locked; // tracking for LockOSThread
- M* nextwaitm; // next M waiting for lock
- uintptr waitsema; // semaphore for parking on locks
- uint32 waitsemacount;
- uint32 waitsemalock;
- GCStats gcstats;
- bool needextram;
- uint8 traceback;
- bool (*waitunlockf)(G*, void*);
- void* waitlock;
- uintptr scalararg[4]; // scalar argument/return for mcall
- void* ptrarg[4]; // pointer argument/return for mcall
-#ifdef GOOS_windows
- uintptr thread; // thread handle
- // these are here because they are too large to be on the stack
- // of low-level NOSPLIT functions.
- LibCall libcall;
- uintptr libcallpc; // for cpu profiler
- uintptr libcallsp;
- G* libcallg;
-#endif
-#ifdef GOOS_solaris
- int32* perrno; // pointer to TLS errno
- // these are here because they are too large to be on the stack
- // of low-level NOSPLIT functions.
- LibCall libcall;
- struct MTs {
- int64 tv_sec;
- int64 tv_nsec;
- } ts;
- struct MScratch {
- uintptr v[6];
- } scratch;
-#endif
-#ifdef GOOS_plan9
- int8* notesig;
- byte* errstr;
-#endif
- uintptr end[];
-};
-
-struct P
-{
- Mutex lock;
-
- int32 id;
- uint32 status; // one of Pidle/Prunning/...
- P* link;
- uint32 schedtick; // incremented on every scheduler call
- uint32 syscalltick; // incremented on every system call
- M* m; // back-link to associated M (nil if idle)
- MCache* mcache;
- Defer* deferpool[5]; // pool of available Defer structs of different sizes (see panic.c)
-
- // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
- uint64 goidcache;
- uint64 goidcacheend;
-
- // Queue of runnable goroutines.
- uint32 runqhead;
- uint32 runqtail;
- G* runq[256];
-
- // Available G's (status == Gdead)
- G* gfree;
- int32 gfreecnt;
-
- byte pad[64];
-};
-
-enum {
- // The max value of GOMAXPROCS.
- // There are no fundamental restrictions on the value.
- MaxGomaxprocs = 1<<8,
-};
-
-struct SchedT
-{
- Mutex lock;
-
- uint64 goidgen;
-
- M* midle; // idle m's waiting for work
- int32 nmidle; // number of idle m's waiting for work
- int32 nmidlelocked; // number of locked m's waiting for work
- int32 mcount; // number of m's that have been created
- int32 maxmcount; // maximum number of m's allowed (or die)
-
- P* pidle; // idle P's
- uint32 npidle;
- uint32 nmspinning;
-
- // Global runnable queue.
- G* runqhead;
- G* runqtail;
- int32 runqsize;
-
- // Global cache of dead G's.
- Mutex gflock;
- G* gfree;
- int32 ngfree;
-
- uint32 gcwaiting; // gc is waiting to run
- int32 stopwait;
- Note stopnote;
- uint32 sysmonwait;
- Note sysmonnote;
- uint64 lastpoll;
-
- int32 profilehz; // cpu profiling rate
-};
-
-// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
-// The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
-// External locks are not recursive; a second lock is silently ignored.
-// The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
-// (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
-// Internal locks can be recursive. For instance, a lock for cgo can occur while the main
-// goroutine is holding the lock during the initialization phase.
-enum
-{
- LockExternal = 1,
- LockInternal = 2,
-};
-
-struct SigTab
-{
- int32 flags;
- int8 *name;
-};
-enum
-{
- SigNotify = 1<<0, // let signal.Notify have signal, even if from kernel
- SigKill = 1<<1, // if signal.Notify doesn't take it, exit quietly
- SigThrow = 1<<2, // if signal.Notify doesn't take it, exit loudly
- SigPanic = 1<<3, // if the signal is from the kernel, panic
- SigDefault = 1<<4, // if the signal isn't explicitly requested, don't monitor it
- SigHandling = 1<<5, // our signal handler is registered
- SigIgnored = 1<<6, // the signal was ignored before we registered for it
- SigGoExit = 1<<7, // cause all runtime procs to exit (only used on Plan 9).
-};
-
-// Layout of in-memory per-function information prepared by linker
-// See http://golang.org/s/go12symtab.
-// Keep in sync with linker and with ../../libmach/sym.c
-// and with package debug/gosym and with symtab.go in package runtime.
-struct Func
-{
- uintptr entry; // start pc
- int32 nameoff;// function name
-
- int32 args; // in/out args size
- int32 frame; // legacy frame size; use pcsp if possible
-
- int32 pcsp;
- int32 pcfile;
- int32 pcln;
- int32 npcdata;
- int32 nfuncdata;
-};
-
-// layout of Itab known to compilers
-// allocated in non-garbage-collected memory
-struct Itab
-{
- InterfaceType* inter;
- Type* type;
- Itab* link;
- int32 bad;
- int32 unused;
- void (*fun[])(void);
-};
-
-#ifdef GOOS_nacl
-enum {
- NaCl = 1,
-};
-#else
-enum {
- NaCl = 0,
-};
-#endif
-
-#ifdef GOOS_windows
-enum {
- Windows = 1
-};
-#else
-enum {
- Windows = 0
-};
-#endif
-#ifdef GOOS_solaris
-enum {
- Solaris = 1
-};
-#else
-enum {
- Solaris = 0
-};
-#endif
-#ifdef GOOS_plan9
-enum {
- Plan9 = 1
-};
-#else
-enum {
- Plan9 = 0
-};
-#endif
-
-// Lock-free stack node.
-// Also known to export_test.go.
-struct LFNode
-{
- uint64 next;
- uintptr pushcnt;
-};
-
-// Parallel for descriptor.
-struct ParFor
-{
- void (*body)(ParFor*, uint32); // executed for each element
- uint32 done; // number of idle threads
- uint32 nthr; // total number of threads
- uint32 nthrmax; // maximum number of threads
- uint32 thrseq; // thread id sequencer
- uint32 cnt; // iteration space [0, cnt)
- void *ctx; // arbitrary user context
- bool wait; // if true, wait while all threads finish processing,
- // otherwise parfor may return while other threads are still working
- ParForThread *thr; // array of thread descriptors
- uint32 pad; // to align ParForThread.pos for 64-bit atomic operations
- // stats
- uint64 nsteal;
- uint64 nstealcnt;
- uint64 nprocyield;
- uint64 nosyield;
- uint64 nsleep;
-};
-
-enum {
- WorkbufSize = 4*1024,
-};
-struct Workbuf
-{
- LFNode node; // must be first
- uintptr nobj;
- byte* obj[(WorkbufSize-sizeof(LFNode)-sizeof(uintptr))/PtrSize];
-};
-
-// Track memory allocated by code not written in Go during a cgo call,
-// so that the garbage collector can see them.
-struct CgoMal
-{
- CgoMal *next;
- void *alloc;
-};
-
-// Holds variables parsed from GODEBUG env var.
-struct DebugVars
-{
- int32 allocfreetrace;
- int32 efence;
- int32 gctrace;
- int32 gcdead;
- int32 scheddetail;
- int32 schedtrace;
- int32 scavenge;
-};
-
-// Indicates to write barrier and sychronization task to preform.
-enum
-{ // Action WB installation
- GCoff = 0, // stop and start no wb
- GCquiesce, // stop and start no wb
- GCstw, // stop the ps nop
- GCscan, // scan the stacks prior to marking
- GCmark, // mark use wbufs from GCscan and globals, scan the stacks, then go to GCtermination
- GCmarktermination, // mark termination detection. Allocate black, Ps help out GC
- GCsweep, // stop and start nop
-};
-
-struct ForceGCState
-{
- Mutex lock;
- G* g;
- uint32 idle;
-};
-
-extern uint32 runtime·gcphase;
-extern Mutex runtime·allglock;
-
-/*
- * defined macros
- * you need super-gopher-guru privilege
- * to add this list.
- */
-#define nelem(x) (sizeof(x)/sizeof((x)[0]))
-#define nil ((void*)0)
-#define offsetof(s,m) (uint32)(&(((s*)0)->m))
-#define ROUND(x, n) (((x)+(n)-1)&~(uintptr)((n)-1)) /* all-caps to mark as macro: it evaluates n twice */
-
-/*
- * known to compiler
- */
-enum {
- Structrnd = sizeof(uintreg),
-};
-
-byte* runtime·startup_random_data;
-uint32 runtime·startup_random_data_len;
-
-int32 runtime·invalidptr;
-
-enum {
- // hashinit wants this many random bytes
- HashRandomBytes = 32
-};
-
-uint32 runtime·readgstatus(G*);
-void runtime·casgstatus(G*, uint32, uint32);
-bool runtime·castogscanstatus(G*, uint32, uint32);
-void runtime·quiesce(G*);
-bool runtime·stopg(G*);
-void runtime·restartg(G*);
-void runtime·gcphasework(G*);
-
-/*
- * deferred subroutine calls
- */
-struct Defer
-{
- int32 siz;
- bool started;
- uintptr argp; // where args were copied from
- uintptr pc;
- FuncVal* fn;
- Panic* panic; // panic that is running defer
- Defer* link;
-};
-
-// argp used in Defer structs when there is no argp.
-#define NoArgs ((uintptr)-1)
-
-/*
- * panics
- */
-struct Panic
-{
- void* argp; // pointer to arguments of deferred call run during panic; cannot move - known to liblink
- Eface arg; // argument to panic
- Panic* link; // link to earlier panic
- bool recovered; // whether this panic is over
- bool aborted; // the panic was aborted
-};
-
-/*
- * stack traces
- */
-typedef struct Stkframe Stkframe;
-typedef struct BitVector BitVector;
-struct Stkframe
-{
- Func* fn; // function being run
- uintptr pc; // program counter within fn
- uintptr continpc; // program counter where execution can continue, or 0 if not
- uintptr lr; // program counter at caller aka link register
- uintptr sp; // stack pointer at pc
- uintptr fp; // stack pointer at caller aka frame pointer
- uintptr varp; // top of local variables
- uintptr argp; // pointer to function arguments
- uintptr arglen; // number of bytes at argp
- BitVector* argmap; // force use of this argmap
-};
-
-enum
-{
- TraceRuntimeFrames = 1<<0, // include frames for internal runtime functions.
- TraceTrap = 1<<1, // the initial PC, SP are from a trap, not a return PC from a call
-};
-intgo runtime·gentraceback(uintptr, uintptr, uintptr, G*, intgo, uintptr*, intgo, bool(**)(Stkframe*, void*), void*, uintgo);
-void runtime·tracebackdefers(G*, bool(**)(Stkframe*, void*), void*);
-void runtime·traceback(uintptr pc, uintptr sp, uintptr lr, G* gp);
-void runtime·tracebacktrap(uintptr pc, uintptr sp, uintptr lr, G* gp);
-void runtime·tracebackothers(G*);
-bool runtime·haszeroargs(uintptr pc);
-bool runtime·topofstack(Func*);
-enum
-{
- // The maximum number of frames we print for a traceback
- TracebackMaxFrames = 100,
-};
-
-/*
- * external data
- */
-extern String runtime·emptystring;
-extern G** runtime·allg;
-extern Slice runtime·allgs; // []*G
-extern uintptr runtime·allglen;
-extern G* runtime·lastg;
-extern M* runtime·allm;
-extern P* runtime·allp[MaxGomaxprocs+1];
-extern int32 runtime·gomaxprocs;
-extern uint32 runtime·needextram;
-extern uint32 runtime·panicking;
-extern int8* runtime·goos;
-extern int32 runtime·ncpu;
-extern bool runtime·iscgo;
-extern void (*runtime·sysargs)(int32, uint8**);
-extern uintptr runtime·maxstring;
-extern uint32 runtime·cpuid_ecx;
-extern uint32 runtime·cpuid_edx;
-extern DebugVars runtime·debug;
-extern uintptr runtime·maxstacksize;
-extern Note runtime·signote;
-extern ForceGCState runtime·forcegc;
-extern SchedT runtime·sched;
-extern int32 runtime·newprocs;
-
-/*
- * common functions and data
- */
-int32 runtime·strcmp(byte*, byte*);
-int32 runtime·strncmp(byte*, byte*, uintptr);
-byte* runtime·strstr(byte*, byte*);
-intgo runtime·findnull(byte*);
-intgo runtime·findnullw(uint16*);
-void runtime·dump(byte*, int32);
-int32 runtime·runetochar(byte*, int32);
-int32 runtime·charntorune(int32*, uint8*, int32);
-
-
-/*
- * This macro is used when writing C functions
- * called as if they were Go functions.
- * Passed the address of a result before a return statement,
- * it makes sure the result has been flushed to memory
- * before the return.
- *
- * It is difficult to write such functions portably, because
- * of the varying requirements on the alignment of the
- * first output value. Almost all code should write such
- * functions in .goc files, where goc2c (part of cmd/dist)
- * can arrange the correct alignment for the target system.
- * Goc2c also takes care of conveying to the garbage collector
- * which parts of the argument list are inputs vs outputs.
- *
- * Therefore, do NOT use this macro if at all possible.
- */
-#define FLUSH(x) USED(x)
-
-/*
- * GoOutput is a type with the same alignment requirements as the
- * initial output argument from a Go function. Only for use in cases
- * where using goc2c is not possible. See comment on FLUSH above.
- */
-typedef uint64 GoOutput;
-
-void runtime·gogo(Gobuf*);
-void runtime·gostartcall(Gobuf*, void(*)(void), void*);
-void runtime·gostartcallfn(Gobuf*, FuncVal*);
-void runtime·gosave(Gobuf*);
-void runtime·goargs(void);
-void runtime·goenvs(void);
-void runtime·goenvs_unix(void);
-void* runtime·getu(void);
-void runtime·throw(int8*);
-bool runtime·canpanic(G*);
-void runtime·prints(int8*);
-void runtime·printf(int8*, ...);
-void runtime·snprintf(byte*, int32, int8*, ...);
-byte* runtime·mchr(byte*, byte, byte*);
-int32 runtime·mcmp(byte*, byte*, uintptr);
-void runtime·memmove(void*, void*, uintptr);
-String runtime·catstring(String, String);
-String runtime·gostring(byte*);
-Slice runtime·makeStringSlice(intgo);
-String runtime·gostringn(byte*, intgo);
-Slice runtime·gobytes(byte*, intgo);
-String runtime·gostringnocopy(byte*);
-String runtime·gostringw(uint16*);
-void runtime·initsig(void);
-void runtime·sigenable(uint32 sig);
-void runtime·sigdisable(uint32 sig);
-int32 runtime·gotraceback(bool *crash);
-void runtime·goroutineheader(G*);
-int32 runtime·open(int8*, int32, int32);
-int32 runtime·read(int32, void*, int32);
-int32 runtime·write(uintptr, void*, int32); // use uintptr to accommodate windows.
-int32 runtime·close(int32);
-int32 runtime·mincore(void*, uintptr, byte*);
-void runtime·jmpdefer(FuncVal*, uintptr);
-void runtime·exit1(int32);
-void runtime·ready(G*);
-byte* runtime·getenv(int8*);
-int32 runtime·atoi(byte*);
-void runtime·newosproc(M *mp, void *stk);
-void runtime·mstart(void);
-G* runtime·malg(int32);
-void runtime·asminit(void);
-void runtime·mpreinit(M*);
-void runtime·minit(void);
-void runtime·unminit(void);
-void runtime·signalstack(byte*, int32);
-void runtime·tracebackinit(void);
-void runtime·symtabinit(void);
-Func* runtime·findfunc(uintptr);
-int32 runtime·funcline(Func*, uintptr, String*);
-int32 runtime·funcspdelta(Func*, uintptr);
-int8* runtime·funcname(Func*);
-int32 runtime·pcdatavalue(Func*, int32, uintptr);
-void runtime·stackinit(void);
-Stack runtime·stackalloc(uint32);
-void runtime·stackfree(Stack);
-void runtime·shrinkstack(G*);
-void runtime·shrinkfinish(void);
-MCache* runtime·allocmcache(void);
-void runtime·freemcache(MCache*);
-void runtime·mallocinit(void);
-void runtime·gcinit(void);
-void* runtime·mallocgc(uintptr size, Type* typ, uint32 flag);
-void runtime·runpanic(Panic*);
-uintptr runtime·getcallersp(void*);
-int32 runtime·mcount(void);
-int32 runtime·gcount(void);
-void runtime·mcall(void(**)(G*));
-void runtime·onM(void(**)(void));
-void runtime·onMsignal(void(**)(void));
-uint32 runtime·fastrand1(void);
-void runtime·rewindmorestack(Gobuf*);
-int32 runtime·timediv(int64, int32, int32*);
-int32 runtime·round2(int32 x); // round x up to a power of 2.
-
-// atomic operations
-bool runtime·cas(uint32*, uint32, uint32);
-bool runtime·cas64(uint64*, uint64, uint64);
-bool runtime·casp(void**, void*, void*);
-bool runtime·casuintptr(uintptr*, uintptr, uintptr);
-// Don't confuse with XADD x86 instruction,
-// this one is actually 'addx', that is, add-and-fetch.
-uint32 runtime·xadd(uint32 volatile*, int32);
-uint64 runtime·xadd64(uint64 volatile*, int64);
-uint32 runtime·xchg(uint32 volatile*, uint32);
-uint64 runtime·xchg64(uint64 volatile*, uint64);
-void* runtime·xchgp(void* volatile*, void*);
-uint32 runtime·atomicload(uint32 volatile*);
-void runtime·atomicstore(uint32 volatile*, uint32);
-void runtime·atomicstore64(uint64 volatile*, uint64);
-uint64 runtime·atomicload64(uint64 volatile*);
-void* runtime·atomicloadp(void* volatile*);
-uintptr runtime·atomicloaduintptr(uintptr volatile*);
-void runtime·atomicstorep(void* volatile*, void*);
-void runtime·atomicstoreuintptr(uintptr volatile*, uintptr);
-void runtime·atomicor8(byte volatile*, byte);
-
-void runtime·setg(G*);
-void runtime·newextram(void);
-void runtime·exit(int32);
-void runtime·breakpoint(void);
-void runtime·gosched_m(G*);
-void runtime·schedtrace(bool);
-void runtime·park(bool(*)(G*, void*), void*, String);
-void runtime·parkunlock(Mutex*, String);
-void runtime·tsleep(int64, String);
-M* runtime·newm(void);
-void runtime·goexit(void);
-void runtime·asmcgocall(void (*fn)(void*), void*);
-int32 runtime·asmcgocall_errno(void (*fn)(void*), void*);
-void runtime·entersyscall(void);
-void runtime·reentersyscall(uintptr, uintptr);
-void runtime·entersyscallblock(void);
-void runtime·exitsyscall(void);
-G* runtime·newproc1(FuncVal*, byte*, int32, int32, void*);
-bool runtime·sigsend(int32 sig);
-intgo runtime·callers(intgo, uintptr*, intgo);
-intgo runtime·gcallers(G*, intgo, uintptr*, intgo);
-int64 runtime·nanotime(void); // monotonic time
-int64 runtime·unixnanotime(void); // real time, can skip
-void runtime·dopanic(int32);
-void runtime·startpanic(void);
-void runtime·freezetheworld(void);
-void runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp);
-void runtime·resetcpuprofiler(int32);
-void runtime·setcpuprofilerate(int32);
-void runtime·usleep(uint32);
-int64 runtime·cputicks(void);
-int64 runtime·tickspersecond(void);
-void runtime·blockevent(int64, intgo);
-G* runtime·netpoll(bool);
-void runtime·netpollready(G**, PollDesc*, int32);
-uintptr runtime·netpollfd(PollDesc*);
-void** runtime·netpolluser(PollDesc*);
-bool runtime·netpollclosing(PollDesc*);
-void runtime·netpolllock(PollDesc*);
-void runtime·netpollunlock(PollDesc*);
-void runtime·crash(void);
-void runtime·parsedebugvars(void);
-void* runtime·funcdata(Func*, int32);
-void runtime·setmaxthreads_m(void);
-G* runtime·timejump(void);
-void runtime·iterate_itabs(void (**callback)(Itab*));
-void runtime·iterate_finq(void (*callback)(FuncVal*, byte*, uintptr, Type*, PtrType*));
-
-#pragma varargck argpos runtime·printf 1
-#pragma varargck type "c" int32
-#pragma varargck type "d" int32
-#pragma varargck type "d" uint32
-#pragma varargck type "D" int64
-#pragma varargck type "D" uint64
-#pragma varargck type "x" int32
-#pragma varargck type "x" uint32
-#pragma varargck type "X" int64
-#pragma varargck type "X" uint64
-#pragma varargck type "p" void*
-#pragma varargck type "p" uintptr
-#pragma varargck type "s" int8*
-#pragma varargck type "s" uint8*
-#pragma varargck type "S" String
-
-void runtime·stoptheworld(void);
-void runtime·starttheworld(void);
-extern uint32 runtime·worldsema;
-
-/*
- * mutual exclusion locks. in the uncontended case,
- * as fast as spin locks (just a few user-level instructions),
- * but on the contention path they sleep in the kernel.
- * a zeroed Mutex is unlocked (no need to initialize each lock).
- */
-void runtime·lock(Mutex*);
-void runtime·unlock(Mutex*);
-
-/*
- * sleep and wakeup on one-time events.
- * before any calls to notesleep or notewakeup,
- * must call noteclear to initialize the Note.
- * then, exactly one thread can call notesleep
- * and exactly one thread can call notewakeup (once).
- * once notewakeup has been called, the notesleep
- * will return. future notesleep will return immediately.
- * subsequent noteclear must be called only after
- * previous notesleep has returned, e.g. it's disallowed
- * to call noteclear straight after notewakeup.
- *
- * notetsleep is like notesleep but wakes up after
- * a given number of nanoseconds even if the event
- * has not yet happened. if a goroutine uses notetsleep to
- * wake up early, it must wait to call noteclear until it
- * can be sure that no other goroutine is calling
- * notewakeup.
- *
- * notesleep/notetsleep are generally called on g0,
- * notetsleepg is similar to notetsleep but is called on user g.
- */
-void runtime·noteclear(Note*);
-void runtime·notesleep(Note*);
-void runtime·notewakeup(Note*);
-bool runtime·notetsleep(Note*, int64); // false - timeout
-bool runtime·notetsleepg(Note*, int64); // false - timeout
-
-/*
- * low-level synchronization for implementing the above
- */
-uintptr runtime·semacreate(void);
-int32 runtime·semasleep(int64);
-void runtime·semawakeup(M*);
-// or
-void runtime·futexsleep(uint32*, uint32, int64);
-void runtime·futexwakeup(uint32*, uint32);
-
-/*
- * Mutex-free stack.
- * Initialize uint64 head to 0, compare with 0 to test for emptiness.
- * The stack does not keep pointers to nodes,
- * so they can be garbage collected if there are no other pointers to nodes.
- */
-void runtime·lfstackpush(uint64 *head, LFNode *node);
-LFNode* runtime·lfstackpop(uint64 *head);
-
-/*
- * Parallel for over [0, n).
- * body() is executed for each iteration.
- * nthr - total number of worker threads.
- * ctx - arbitrary user context.
- * if wait=true, threads return from parfor() when all work is done;
- * otherwise, threads can return while other threads are still finishing processing.
- */
-ParFor* runtime·parforalloc(uint32 nthrmax);
-void runtime·parforsetup(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void (*body)(ParFor*, uint32));
-void runtime·parfordo(ParFor *desc);
-void runtime·parforiters(ParFor*, uintptr, uintptr*, uintptr*);
-
-/*
- * low level C-called
- */
-// for mmap, we only pass the lower 32 bits of file offset to the
-// assembly routine; the higher bits (if required), should be provided
-// by the assembly routine as 0.
-uint8* runtime·mmap(byte*, uintptr, int32, int32, int32, uint32);
-void runtime·munmap(byte*, uintptr);
-void runtime·madvise(byte*, uintptr, int32);
-void runtime·memclr(byte*, uintptr);
-void runtime·setcallerpc(void*, void*);
-void* runtime·getcallerpc(void*);
-void runtime·printbool(bool);
-void runtime·printbyte(int8);
-void runtime·printfloat(float64);
-void runtime·printint(int64);
-void runtime·printiface(Iface);
-void runtime·printeface(Eface);
-void runtime·printstring(String);
-void runtime·printpc(void*);
-void runtime·printpointer(void*);
-void runtime·printuint(uint64);
-void runtime·printhex(uint64);
-void runtime·printslice(Slice);
-void runtime·printcomplex(Complex128);
-
-/*
- * runtime go-called
- */
-void runtime·gopanic(Eface);
-void runtime·panicindex(void);
-void runtime·panicslice(void);
-void runtime·panicdivide(void);
-
-/*
- * runtime c-called (but written in Go)
- */
-void runtime·printany(Eface);
-void runtime·newTypeAssertionError(String*, String*, String*, String*, Eface*);
-void runtime·fadd64c(uint64, uint64, uint64*);
-void runtime·fsub64c(uint64, uint64, uint64*);
-void runtime·fmul64c(uint64, uint64, uint64*);
-void runtime·fdiv64c(uint64, uint64, uint64*);
-void runtime·fneg64c(uint64, uint64*);
-void runtime·f32to64c(uint32, uint64*);
-void runtime·f64to32c(uint64, uint32*);
-void runtime·fcmp64c(uint64, uint64, int32*, bool*);
-void runtime·fintto64c(int64, uint64*);
-void runtime·f64tointc(uint64, int64*, bool*);
-
-/*
- * wrapped for go users
- */
-float64 runtime·Inf(int32 sign);
-float64 runtime·NaN(void);
-float32 runtime·float32frombits(uint32 i);
-uint32 runtime·float32tobits(float32 f);
-float64 runtime·float64frombits(uint64 i);
-uint64 runtime·float64tobits(float64 f);
-float64 runtime·frexp(float64 d, int32 *ep);
-bool runtime·isInf(float64 f, int32 sign);
-bool runtime·isNaN(float64 f);
-float64 runtime·ldexp(float64 d, int32 e);
-float64 runtime·modf(float64 d, float64 *ip);
-void runtime·semacquire(uint32*, bool);
-void runtime·semrelease(uint32*);
-int32 runtime·gomaxprocsfunc(int32 n);
-void runtime·procyield(uint32);
-void runtime·osyield(void);
-void runtime·lockOSThread(void);
-void runtime·unlockOSThread(void);
-
-void runtime·writebarrierptr_nostore(void*, void*);
-
-bool runtime·showframe(Func*, G*);
-void runtime·printcreatedby(G*);
-
-void runtime·ifaceE2I(InterfaceType*, Eface, Iface*);
-bool runtime·ifaceE2I2(InterfaceType*, Eface, Iface*);
-uintptr runtime·memlimit(void);
-
-// float.c
-extern float64 runtime·nan;
-extern float64 runtime·posinf;
-extern float64 runtime·neginf;
-extern uint64 ·nan;
-extern uint64 ·posinf;
-extern uint64 ·neginf;
-#define ISNAN(f) ((f) != (f))
-
-enum
-{
- UseSpanType = 1,
-};
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
new file mode 100644
index 000000000..15dea01a3
--- /dev/null
+++ b/src/runtime/runtime1.go
@@ -0,0 +1,417 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// Keep a cached value to make gotraceback fast,
+// since we call it on every call to gentraceback.
+// The cached value is a uint32 in which the low bit
+// is the "crash" setting and the top 31 bits are the
+// gotraceback value.
+var traceback_cache uint32 = 2 << 1
+
+// The GOTRACEBACK environment variable controls the
+// behavior of a Go program that is crashing and exiting.
+// GOTRACEBACK=0 suppress all tracebacks
+// GOTRACEBACK=1 default behavior - show tracebacks but exclude runtime frames
+// GOTRACEBACK=2 show tracebacks including runtime frames
+// GOTRACEBACK=crash show tracebacks including runtime frames, then crash (core dump etc)
+//go:nosplit
+func gotraceback(crash *bool) int32 {
+ _g_ := getg()
+ if crash != nil {
+ *crash = false
+ }
+ if _g_.m.traceback != 0 {
+ return int32(_g_.m.traceback)
+ }
+ if crash != nil {
+ *crash = traceback_cache&1 != 0
+ }
+ return int32(traceback_cache >> 1)
+}
+
+var (
+ argc int32
+ argv **byte
+)
+
+// nosplit for use in linux/386 startup linux_setup_vdso
+//go:nosplit
+func argv_index(argv **byte, i int32) *byte {
+ return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*ptrSize))
+}
+
+func args(c int32, v **byte) {
+ argc = c
+ argv = v
+ sysargs(c, v)
+}
+
+var (
+ // TODO: Retire in favor of GOOS== checks.
+ isplan9 int32
+ issolaris int32
+ iswindows int32
+)
+
+// Information about what cpu features are available.
+// Set on startup in asm_{x86/amd64}.s.
+var (
+//cpuid_ecx uint32
+//cpuid_edx uint32
+)
+
+func goargs() {
+ if GOOS == "windows" {
+ return
+ }
+
+ argslice = make([]string, argc)
+ for i := int32(0); i < argc; i++ {
+ argslice[i] = gostringnocopy(argv_index(argv, i))
+ }
+}
+
+func goenvs_unix() {
+ n := int32(0)
+ for argv_index(argv, argc+1+n) != nil {
+ n++
+ }
+
+ envs = make([]string, n)
+ for i := int32(0); i < n; i++ {
+ envs[i] = gostringnocopy(argv_index(argv, argc+1+i))
+ }
+}
+
+func environ() []string {
+ return envs
+}
+
+func testAtomic64() {
+ var z64, x64 uint64
+
+ z64 = 42
+ x64 = 0
+ // TODO: PREFETCH((unsafe.Pointer)(&z64))
+ if cas64(&z64, x64, 1) {
+ gothrow("cas64 failed")
+ }
+ if x64 != 0 {
+ gothrow("cas64 failed")
+ }
+ x64 = 42
+ if !cas64(&z64, x64, 1) {
+ gothrow("cas64 failed")
+ }
+ if x64 != 42 || z64 != 1 {
+ gothrow("cas64 failed")
+ }
+ if atomicload64(&z64) != 1 {
+ gothrow("load64 failed")
+ }
+ atomicstore64(&z64, (1<<40)+1)
+ if atomicload64(&z64) != (1<<40)+1 {
+ gothrow("store64 failed")
+ }
+ if xadd64(&z64, (1<<40)+1) != (2<<40)+2 {
+ gothrow("xadd64 failed")
+ }
+ if atomicload64(&z64) != (2<<40)+2 {
+ gothrow("xadd64 failed")
+ }
+ if xchg64(&z64, (3<<40)+3) != (2<<40)+2 {
+ gothrow("xchg64 failed")
+ }
+ if atomicload64(&z64) != (3<<40)+3 {
+ gothrow("xchg64 failed")
+ }
+}
+
+func check() {
+ var (
+ a int8
+ b uint8
+ c int16
+ d uint16
+ e int32
+ f uint32
+ g int64
+ h uint64
+ i, i1 float32
+ j, j1 float64
+ k, k1 unsafe.Pointer
+ l *uint16
+ m [4]byte
+ )
+ type x1t struct {
+ x uint8
+ }
+ type y1t struct {
+ x1 x1t
+ y uint8
+ }
+ var x1 x1t
+ var y1 y1t
+
+ if unsafe.Sizeof(a) != 1 {
+ gothrow("bad a")
+ }
+ if unsafe.Sizeof(b) != 1 {
+ gothrow("bad b")
+ }
+ if unsafe.Sizeof(c) != 2 {
+ gothrow("bad c")
+ }
+ if unsafe.Sizeof(d) != 2 {
+ gothrow("bad d")
+ }
+ if unsafe.Sizeof(e) != 4 {
+ gothrow("bad e")
+ }
+ if unsafe.Sizeof(f) != 4 {
+ gothrow("bad f")
+ }
+ if unsafe.Sizeof(g) != 8 {
+ gothrow("bad g")
+ }
+ if unsafe.Sizeof(h) != 8 {
+ gothrow("bad h")
+ }
+ if unsafe.Sizeof(i) != 4 {
+ gothrow("bad i")
+ }
+ if unsafe.Sizeof(j) != 8 {
+ gothrow("bad j")
+ }
+ if unsafe.Sizeof(k) != ptrSize {
+ gothrow("bad k")
+ }
+ if unsafe.Sizeof(l) != ptrSize {
+ gothrow("bad l")
+ }
+ if unsafe.Sizeof(x1) != 1 {
+ gothrow("bad unsafe.Sizeof x1")
+ }
+ if unsafe.Offsetof(y1.y) != 1 {
+ gothrow("bad offsetof y1.y")
+ }
+ if unsafe.Sizeof(y1) != 2 {
+ gothrow("bad unsafe.Sizeof y1")
+ }
+
+ if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
+ gothrow("bad timediv")
+ }
+
+ var z uint32
+ z = 1
+ if !cas(&z, 1, 2) {
+ gothrow("cas1")
+ }
+ if z != 2 {
+ gothrow("cas2")
+ }
+
+ z = 4
+ if cas(&z, 5, 6) {
+ gothrow("cas3")
+ }
+ if z != 4 {
+ gothrow("cas4")
+ }
+
+ z = 0xffffffff
+ if !cas(&z, 0xffffffff, 0xfffffffe) {
+ gothrow("cas5")
+ }
+ if z != 0xfffffffe {
+ gothrow("cas6")
+ }
+
+ k = unsafe.Pointer(uintptr(0xfedcb123))
+ if ptrSize == 8 {
+ k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10)
+ }
+ if casp(&k, nil, nil) {
+ gothrow("casp1")
+ }
+ k1 = add(k, 1)
+ if !casp(&k, k, k1) {
+ gothrow("casp2")
+ }
+ if k != k1 {
+ gothrow("casp3")
+ }
+
+ m = [4]byte{1, 1, 1, 1}
+ atomicor8(&m[1], 0xf0)
+ if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
+ gothrow("atomicor8")
+ }
+
+ *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
+ if j == j {
+ gothrow("float64nan")
+ }
+ if !(j != j) {
+ gothrow("float64nan1")
+ }
+
+ *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
+ if j == j1 {
+ gothrow("float64nan2")
+ }
+ if !(j != j1) {
+ gothrow("float64nan3")
+ }
+
+ *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
+ if i == i {
+ gothrow("float32nan")
+ }
+ if i == i {
+ gothrow("float32nan1")
+ }
+
+ *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
+ if i == i1 {
+ gothrow("float32nan2")
+ }
+ if i == i1 {
+ gothrow("float32nan3")
+ }
+
+ testAtomic64()
+
+ if _FixedStack != round2(_FixedStack) {
+ gothrow("FixedStack is not power-of-2")
+ }
+}
+
+type dbgVar struct {
+ name string
+ value *int32
+}
+
+// Do we report invalid pointers found during stack or heap scans?
+//var invalidptr int32 = 1
+
+var dbgvars = []dbgVar{
+ {"allocfreetrace", &debug.allocfreetrace},
+ {"invalidptr", &invalidptr},
+ {"efence", &debug.efence},
+ {"gctrace", &debug.gctrace},
+ {"gcdead", &debug.gcdead},
+ {"scheddetail", &debug.scheddetail},
+ {"schedtrace", &debug.schedtrace},
+ {"scavenge", &debug.scavenge},
+}
+
+func parsedebugvars() {
+ for p := gogetenv("GODEBUG"); p != ""; {
+ field := ""
+ i := index(p, ",")
+ if i < 0 {
+ field, p = p, ""
+ } else {
+ field, p = p[:i], p[i+1:]
+ }
+ i = index(field, "=")
+ if i < 0 {
+ continue
+ }
+ key, value := field[:i], field[i+1:]
+ for _, v := range dbgvars {
+ if v.name == key {
+ *v.value = int32(goatoi(value))
+ }
+ }
+ }
+
+ switch p := gogetenv("GOTRACEBACK"); p {
+ case "":
+ traceback_cache = 1 << 1
+ case "crash":
+ traceback_cache = 2<<1 | 1
+ default:
+ traceback_cache = uint32(goatoi(p)) << 1
+ }
+}
+
+// Poor mans 64-bit division.
+// This is a very special function, do not use it if you are not sure what you are doing.
+// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
+// Handles overflow in a time-specific manner.
+//go:nosplit
+func timediv(v int64, div int32, rem *int32) int32 {
+ res := int32(0)
+ for bit := 30; bit >= 0; bit-- {
+ if v >= int64(div)<<uint(bit) {
+ v = v - (int64(div) << uint(bit))
+ res += 1 << uint(bit)
+ }
+ }
+ if v >= int64(div) {
+ if rem != nil {
+ *rem = 0
+ }
+ return 0x7fffffff
+ }
+ if rem != nil {
+ *rem = int32(v)
+ }
+ return res
+}
+
+// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
+
+//go:nosplit
+func acquirem() *m {
+ _g_ := getg()
+ _g_.m.locks++
+ return _g_.m
+}
+
+//go:nosplit
+func releasem(mp *m) {
+ _g_ := getg()
+ mp.locks--
+ if mp.locks == 0 && _g_.preempt {
+ // restore the preemption request in case we've cleared it in newstack
+ _g_.stackguard0 = stackPreempt
+ }
+}
+
+//go:nosplit
+func gomcache() *mcache {
+ return getg().m.mcache
+}
+
+var typelink, etypelink [0]byte
+
+//go:nosplit
+func typelinks() []*_type {
+ var ret []*_type
+ sp := (*slice)(unsafe.Pointer(&ret))
+ sp.array = (*byte)(unsafe.Pointer(&typelink))
+ sp.len = uint((uintptr(unsafe.Pointer(&etypelink)) - uintptr(unsafe.Pointer(&typelink))) / unsafe.Sizeof(ret[0]))
+ sp.cap = sp.len
+ return ret
+}
+
+// TODO: move back into mgc0.c when converted to Go
+func readgogc() int32 {
+ p := gogetenv("GOGC")
+ if p == "" {
+ return 100
+ }
+ if p == "off" {
+ return -1
+ }
+ return int32(goatoi(p))
+}
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
new file mode 100644
index 000000000..7625a2dd8
--- /dev/null
+++ b/src/runtime/runtime2.go
@@ -0,0 +1,613 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+/*
+ * defined constants
+ */
+const (
+ // G status
+ //
+ // If you add to this list, add to the list
+ // of "okay during garbage collection" status
+ // in mgc0.c too.
+ _Gidle = iota // 0
+ _Grunnable // 1 runnable and on a run queue
+ _Grunning // 2
+ _Gsyscall // 3
+ _Gwaiting // 4
+ _Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts
+ _Gdead // 6
+ _Genqueue // 7 Only the Gscanenqueue is used.
+ _Gcopystack // 8 in this state when newstack is moving the stack
+ // the following encode that the GC is scanning the stack and what to do when it is done
+ _Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state,
+ // _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs
+ _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning complets make Grunnable (it is already on run queue)
+ _Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack.
+ _Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make is Gsyscall
+ _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting
+ // _Gscanmoribund_unused, // not possible
+ // _Gscandead, // not possible
+ _Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue
+)
+
+const (
+ // P status
+ _Pidle = iota
+ _Prunning
+ _Psyscall
+ _Pgcstop
+ _Pdead
+)
+
+// XXX inserting below here
+
+type mutex struct {
+ // Futex-based impl treats it as uint32 key,
+ // while sema-based impl as M* waitm.
+ // Used to be a union, but unions break precise GC.
+ key uintptr
+}
+
+type note struct {
+ // Futex-based impl treats it as uint32 key,
+ // while sema-based impl as M* waitm.
+ // Used to be a union, but unions break precise GC.
+ key uintptr
+}
+
+type _string struct {
+ str *byte
+ len int
+}
+
+type funcval struct {
+ fn uintptr
+ // variable-size, fn-specific data here
+}
+
+type iface struct {
+ tab *itab
+ data unsafe.Pointer
+}
+
+type eface struct {
+ _type *_type
+ data unsafe.Pointer
+}
+
+type slice struct {
+ array *byte // actual data
+ len uint // number of elements
+ cap uint // allocated number of elements
+}
+
+type gobuf struct {
+ // The offsets of sp, pc, and g are known to (hard-coded in) libmach.
+ sp uintptr
+ pc uintptr
+ g *g
+ ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
+ ret uintreg
+ lr uintptr
+}
+
+// Known to compiler.
+// Changes here must also be made in src/cmd/gc/select.c's selecttype.
+type sudog struct {
+ g *g
+ selectdone *uint32
+ next *sudog
+ prev *sudog
+ elem unsafe.Pointer // data element
+ releasetime int64
+ nrelease int32 // -1 for acquire
+ waitlink *sudog // g.waiting list
+}
+
+type gcstats struct {
+ // the struct must consist of only uint64's,
+ // because it is casted to uint64[].
+ nhandoff uint64
+ nhandoffcnt uint64
+ nprocyield uint64
+ nosyield uint64
+ nsleep uint64
+}
+
+type libcall struct {
+ fn uintptr
+ n uintptr // number of parameters
+ args uintptr // parameters
+ r1 uintptr // return values
+ r2 uintptr
+ err uintptr // error number
+}
+
+// describes how to handle callback
+type wincallbackcontext struct {
+ gobody unsafe.Pointer // go function to call
+ argsize uintptr // callback arguments size (in bytes)
+ restorestack uintptr // adjust stack on return by (in bytes) (386 only)
+ cleanstack bool
+}
+
+// Stack describes a Go execution stack.
+// The bounds of the stack are exactly [lo, hi),
+// with no implicit data structures on either side.
+type stack struct {
+ lo uintptr
+ hi uintptr
+}
+
+type g struct {
+ // Stack parameters.
+ // stack describes the actual stack memory: [stack.lo, stack.hi).
+ // stackguard0 is the stack pointer compared in the Go stack growth prologue.
+ // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
+ // stackguard1 is the stack pointer compared in the C stack growth prologue.
+ // It is stack.lo+StackGuard on g0 and gsignal stacks.
+ // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
+ stack stack // offset known to runtime/cgo
+ stackguard0 uintptr // offset known to liblink
+ stackguard1 uintptr // offset known to liblink
+
+ _panic *_panic // innermost panic - offset known to liblink
+ _defer *_defer // innermost defer
+ sched gobuf
+ syscallsp uintptr // if status==gsyscall, syscallsp = sched.sp to use during gc
+ syscallpc uintptr // if status==gsyscall, syscallpc = sched.pc to use during gc
+ param unsafe.Pointer // passed parameter on wakeup
+ atomicstatus uint32
+ goid int64
+ waitsince int64 // approx time when the g become blocked
+ waitreason string // if status==gwaiting
+ schedlink *g
+ issystem bool // do not output in stack dump, ignore in deadlock detector
+ preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
+ paniconfault bool // panic (instead of crash) on unexpected fault address
+ preemptscan bool // preempted g does scan for gc
+ gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
+ throwsplit bool // must not split stack
+ raceignore int8 // ignore race detection events
+ m *m // for debuggers, but offset not hard-coded
+ lockedm *m
+ sig uint32
+ writebuf []byte
+ sigcode0 uintptr
+ sigcode1 uintptr
+ sigpc uintptr
+ gopc uintptr // pc of go statement that created this goroutine
+ racectx uintptr
+ waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr)
+ end [0]byte
+}
+
+type mts struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+type mscratch struct {
+ v [6]uintptr
+}
+
+type m struct {
+ g0 *g // goroutine with scheduling stack
+ morebuf gobuf // gobuf arg to morestack
+
+ // Fields not known to debuggers.
+ procid uint64 // for debuggers, but offset not hard-coded
+ gsignal *g // signal-handling g
+ tls [4]uintptr // thread-local storage (for x86 extern register)
+ mstartfn unsafe.Pointer // todo go func()
+ curg *g // current running goroutine
+ caughtsig *g // goroutine running during fatal signal
+ p *p // attached p for executing go code (nil if not executing go code)
+ nextp *p
+ id int32
+ mallocing int32
+ throwing int32
+ gcing int32
+ locks int32
+ softfloat int32
+ dying int32
+ profilehz int32
+ helpgc int32
+ spinning bool // m is out of work and is actively looking for work
+ blocked bool // m is blocked on a note
+ inwb bool // m is executing a write barrier
+ printlock int8
+ fastrand uint32
+ ncgocall uint64 // number of cgo calls in total
+ ncgo int32 // number of cgo calls currently in progress
+ cgomal *cgomal
+ park note
+ alllink *m // on allm
+ schedlink *m
+ machport uint32 // return address for mach ipc (os x)
+ mcache *mcache
+ lockedg *g
+ createstack [32]uintptr // stack that created this thread.
+ freglo [16]uint32 // d[i] lsb and f[i]
+ freghi [16]uint32 // d[i] msb and f[i+16]
+ fflag uint32 // floating point compare flags
+ locked uint32 // tracking for lockosthread
+ nextwaitm *m // next m waiting for lock
+ waitsema uintptr // semaphore for parking on locks
+ waitsemacount uint32
+ waitsemalock uint32
+ gcstats gcstats
+ needextram bool
+ traceback uint8
+ waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
+ waitlock unsafe.Pointer
+ //#ifdef GOOS_windows
+ thread uintptr // thread handle
+ // these are here because they are too large to be on the stack
+ // of low-level NOSPLIT functions.
+ libcall libcall
+ libcallpc uintptr // for cpu profiler
+ libcallsp uintptr
+ libcallg *g
+ //#endif
+ //#ifdef GOOS_solaris
+ perrno *int32 // pointer to tls errno
+ // these are here because they are too large to be on the stack
+ // of low-level NOSPLIT functions.
+ //LibCall libcall;
+ ts mts
+ scratch mscratch
+ //#endif
+ //#ifdef GOOS_plan9
+ notesig *int8
+ errstr *byte
+ //#endif
+ end [0]byte
+}
+
+type p struct {
+ lock mutex
+
+ id int32
+ status uint32 // one of pidle/prunning/...
+ link *p
+ schedtick uint32 // incremented on every scheduler call
+ syscalltick uint32 // incremented on every system call
+ m *m // back-link to associated m (nil if idle)
+ mcache *mcache
+ deferpool [5]*_defer // pool of available defer structs of different sizes (see panic.c)
+
+ // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
+ goidcache uint64
+ goidcacheend uint64
+
+ // Queue of runnable goroutines.
+ runqhead uint32
+ runqtail uint32
+ runq [256]*g
+
+ // Available G's (status == Gdead)
+ gfree *g
+ gfreecnt int32
+
+ pad [64]byte
+}
+
+const (
+ // The max value of GOMAXPROCS.
+ // There are no fundamental restrictions on the value.
+ _MaxGomaxprocs = 1 << 8
+)
+
+type schedt struct {
+ lock mutex
+
+ goidgen uint64
+
+ midle *m // idle m's waiting for work
+ nmidle int32 // number of idle m's waiting for work
+ nmidlelocked int32 // number of locked m's waiting for work
+ mcount int32 // number of m's that have been created
+ maxmcount int32 // maximum number of m's allowed (or die)
+
+ pidle *p // idle p's
+ npidle uint32
+ nmspinning uint32
+
+ // Global runnable queue.
+ runqhead *g
+ runqtail *g
+ runqsize int32
+
+ // Global cache of dead G's.
+ gflock mutex
+ gfree *g
+ ngfree int32
+
+ gcwaiting uint32 // gc is waiting to run
+ stopwait int32
+ stopnote note
+ sysmonwait uint32
+ sysmonnote note
+ lastpoll uint64
+
+ profilehz int32 // cpu profiling rate
+}
+
+// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
+// The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
+// External locks are not recursive; a second lock is silently ignored.
+// The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
+// (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
+// Internal locks can be recursive. For instance, a lock for cgo can occur while the main
+// goroutine is holding the lock during the initialization phase.
+const (
+ _LockExternal = 1
+ _LockInternal = 2
+)
+
+type sigtabtt struct {
+ flags int32
+ name *int8
+}
+
+const (
+ _SigNotify = 1 << 0 // let signal.Notify have signal, even if from kernel
+ _SigKill = 1 << 1 // if signal.Notify doesn't take it, exit quietly
+ _SigThrow = 1 << 2 // if signal.Notify doesn't take it, exit loudly
+ _SigPanic = 1 << 3 // if the signal is from the kernel, panic
+ _SigDefault = 1 << 4 // if the signal isn't explicitly requested, don't monitor it
+ _SigHandling = 1 << 5 // our signal handler is registered
+ _SigIgnored = 1 << 6 // the signal was ignored before we registered for it
+ _SigGoExit = 1 << 7 // cause all runtime procs to exit (only used on Plan 9).
+)
+
+// Layout of in-memory per-function information prepared by linker
+// See http://golang.org/s/go12symtab.
+// Keep in sync with linker and with ../../libmach/sym.c
+// and with package debug/gosym and with symtab.go in package runtime.
+type _func struct {
+ entry uintptr // start pc
+ nameoff int32 // function name
+
+ args int32 // in/out args size
+ frame int32 // legacy frame size; use pcsp if possible
+
+ pcsp int32
+ pcfile int32
+ pcln int32
+ npcdata int32
+ nfuncdata int32
+}
+
+// layout of Itab known to compilers
+// allocated in non-garbage-collected memory
+type itab struct {
+ inter *interfacetype
+ _type *_type
+ link *itab
+ bad int32
+ unused int32
+ fun [0]uintptr
+}
+
+const (
+ // TODO: Generate in cmd/dist.
+ _NaCl = 0
+ _Windows = 0
+ _Solaris = 0
+ _Plan9 = 0
+)
+
+// Lock-free stack node.
+// // Also known to export_test.go.
+type lfnode struct {
+ next uint64
+ pushcnt uintptr
+}
+
+// Parallel for descriptor.
+type parfor struct {
+ body unsafe.Pointer // go func(*parfor, uint32), executed for each element
+ done uint32 // number of idle threads
+ nthr uint32 // total number of threads
+ nthrmax uint32 // maximum number of threads
+ thrseq uint32 // thread id sequencer
+ cnt uint32 // iteration space [0, cnt)
+ ctx unsafe.Pointer // arbitrary user context
+ wait bool // if true, wait while all threads finish processing,
+ // otherwise parfor may return while other threads are still working
+ thr *parforthread // array of thread descriptors
+ pad uint32 // to align parforthread.pos for 64-bit atomic operations
+ // stats
+ nsteal uint64
+ nstealcnt uint64
+ nprocyield uint64
+ nosyield uint64
+ nsleep uint64
+}
+
+// Track memory allocated by code not written in Go during a cgo call,
+// so that the garbage collector can see them.
+type cgomal struct {
+ next *cgomal
+ alloc unsafe.Pointer
+}
+
+// Holds variables parsed from GODEBUG env var.
+type debugvars struct {
+ allocfreetrace int32
+ efence int32
+ gctrace int32
+ gcdead int32
+ scheddetail int32
+ schedtrace int32
+ scavenge int32
+}
+
+// Indicates to write barrier and sychronization task to preform.
+const (
+ _GCoff = iota // GC not running, write barrier disabled
+ _GCquiesce // unused state
+ _GCstw // unused state
+ _GCscan // GC collecting roots into workbufs, write barrier disabled
+ _GCmark // GC marking from workbufs, write barrier ENABLED
+ _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
+ _GCsweep // GC mark completed; sweeping in background, write barrier disabled
+)
+
+type forcegcstate struct {
+ lock mutex
+ g *g
+ idle uint32
+}
+
+var gcphase uint32
+
+/*
+ * known to compiler
+ */
+const (
+ _Structrnd = regSize
+)
+
+var startup_random_data *byte
+var startup_random_data_len uint32
+
+var invalidptr int32
+
+const (
+ // hashinit wants this many random bytes
+ _HashRandomBytes = 32
+)
+
+/*
+ * deferred subroutine calls
+ */
+type _defer struct {
+ siz int32
+ started bool
+ argp uintptr // where args were copied from
+ pc uintptr
+ fn *funcval
+ _panic *_panic // panic that is running defer
+ link *_defer
+}
+
+/*
+ * panics
+ */
+type _panic struct {
+ argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
+ arg interface{} // argument to panic
+ link *_panic // link to earlier panic
+ recovered bool // whether this panic is over
+ aborted bool // the panic was aborted
+}
+
+/*
+ * stack traces
+ */
+
+type stkframe struct {
+ fn *_func // function being run
+ pc uintptr // program counter within fn
+ continpc uintptr // program counter where execution can continue, or 0 if not
+ lr uintptr // program counter at caller aka link register
+ sp uintptr // stack pointer at pc
+ fp uintptr // stack pointer at caller aka frame pointer
+ varp uintptr // top of local variables
+ argp uintptr // pointer to function arguments
+ arglen uintptr // number of bytes at argp
+ argmap *bitvector // force use of this argmap
+}
+
+const (
+ _TraceRuntimeFrames = 1 << 0 // include frames for internal runtime functions.
+ _TraceTrap = 1 << 1 // the initial PC, SP are from a trap, not a return PC from a call
+)
+
+const (
+ // The maximum number of frames we print for a traceback
+ _TracebackMaxFrames = 100
+)
+
+var (
+ emptystring string
+ allg **g
+ allglen uintptr
+ lastg *g
+ allm *m
+ allp [_MaxGomaxprocs + 1]*p
+ gomaxprocs int32
+ needextram uint32
+ panicking uint32
+ goos *int8
+ ncpu int32
+ iscgo bool
+ cpuid_ecx uint32
+ cpuid_edx uint32
+ debug debugvars
+ signote note
+ forcegc forcegcstate
+ sched schedt
+ newprocs int32
+)
+
+/*
+ * mutual exclusion locks. in the uncontended case,
+ * as fast as spin locks (just a few user-level instructions),
+ * but on the contention path they sleep in the kernel.
+ * a zeroed Mutex is unlocked (no need to initialize each lock).
+ */
+
+/*
+ * sleep and wakeup on one-time events.
+ * before any calls to notesleep or notewakeup,
+ * must call noteclear to initialize the Note.
+ * then, exactly one thread can call notesleep
+ * and exactly one thread can call notewakeup (once).
+ * once notewakeup has been called, the notesleep
+ * will return. future notesleep will return immediately.
+ * subsequent noteclear must be called only after
+ * previous notesleep has returned, e.g. it's disallowed
+ * to call noteclear straight after notewakeup.
+ *
+ * notetsleep is like notesleep but wakes up after
+ * a given number of nanoseconds even if the event
+ * has not yet happened. if a goroutine uses notetsleep to
+ * wake up early, it must wait to call noteclear until it
+ * can be sure that no other goroutine is calling
+ * notewakeup.
+ *
+ * notesleep/notetsleep are generally called on g0,
+ * notetsleepg is similar to notetsleep but is called on user g.
+ */
+// bool runtime·notetsleep(Note*, int64); // false - timeout
+// bool runtime·notetsleepg(Note*, int64); // false - timeout
+
+/*
+ * Lock-free stack.
+ * Initialize uint64 head to 0, compare with 0 to test for emptiness.
+ * The stack does not keep pointers to nodes,
+ * so they can be garbage collected if there are no other pointers to nodes.
+ */
+
+/*
+ * Parallel for over [0, n).
+ * body() is executed for each iteration.
+ * nthr - total number of worker threads.
+ * ctx - arbitrary user context.
+ * if wait=true, threads return from parfor() when all work is done;
+ * otherwise, threads can return while other threads are still finishing processing.
+ */
+
+// for mmap, we only pass the lower 32 bits of file offset to the
+// assembly routine; the higher bits (if required), should be provided
+// by the assembly routine as 0.
diff --git a/src/runtime/runtime2_windows.go b/src/runtime/runtime2_windows.go
new file mode 100644
index 000000000..80fc386e9
--- /dev/null
+++ b/src/runtime/runtime2_windows.go
@@ -0,0 +1,8 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// TODO(brainman): move generation of zsys_windows_*.s out from cmd/dist/buildruntime.c and into here
+const cb_max = 2000 // maximum number of windows callbacks allowed (must be in sync with MAXWINCB from cmd/dist/buildruntime.c)
diff --git a/src/runtime/select.go b/src/runtime/select.go
index d703e1d79..e918b734a 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -167,8 +167,8 @@ func selunlock(sel *_select) {
}
}
-func selparkcommit(gp *g, sel *_select) bool {
- selunlock(sel)
+func selparkcommit(gp *g, sel unsafe.Pointer) bool {
+ selunlock((*_select)(sel))
return true
}
@@ -363,7 +363,7 @@ loop:
// wait for someone to wake us up
gp.param = nil
- gopark(unsafe.Pointer(funcPC(selparkcommit)), unsafe.Pointer(sel), "select")
+ gopark(selparkcommit, unsafe.Pointer(sel), "select")
// someone woke us up
sellock(sel)
diff --git a/src/runtime/signal.c b/src/runtime/signal.c
deleted file mode 100644
index 0674bfb22..000000000
--- a/src/runtime/signal.c
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-
-void
-runtime·sigenable_m(void)
-{
- uint32 s;
-
- s = g->m->scalararg[0];
- g->m->scalararg[0] = 0;
- runtime·sigenable(s);
-}
-
-void
-runtime·sigdisable_m(void)
-{
- uint32 s;
-
- s = g->m->scalararg[0];
- g->m->scalararg[0] = 0;
- runtime·sigdisable(s);
-}
diff --git a/src/runtime/signal1_unix.go b/src/runtime/signal1_unix.go
new file mode 100644
index 000000000..25f01e056
--- /dev/null
+++ b/src/runtime/signal1_unix.go
@@ -0,0 +1,111 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package runtime
+
+const (
+ _SIG_DFL uintptr = 0
+ _SIG_IGN uintptr = 1
+)
+
+func initsig() {
+ // _NSIG is the number of signals on this operating system.
+ // sigtable should describe what to do for all the possible signals.
+ if len(sigtable) != _NSIG {
+ print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n")
+ gothrow("initsig")
+ }
+
+ // First call: basic setup.
+ for i := int32(0); i < _NSIG; i++ {
+ t := &sigtable[i]
+ if t.flags == 0 || t.flags&_SigDefault != 0 {
+ continue
+ }
+
+ // For some signals, we respect an inherited SIG_IGN handler
+ // rather than insist on installing our own default handler.
+ // Even these signals can be fetched using the os/signal package.
+ switch i {
+ case _SIGHUP, _SIGINT:
+ if getsig(i) == _SIG_IGN {
+ t.flags = _SigNotify | _SigIgnored
+ continue
+ }
+ }
+
+ t.flags |= _SigHandling
+ setsig(i, funcPC(sighandler), true)
+ }
+}
+
+func sigenable(sig uint32) {
+ if sig >= uint32(len(sigtable)) {
+ return
+ }
+
+ t := &sigtable[sig]
+ if t.flags&_SigNotify != 0 && t.flags&_SigHandling == 0 {
+ t.flags |= _SigHandling
+ if getsig(int32(sig)) == _SIG_IGN {
+ t.flags |= _SigIgnored
+ }
+ setsig(int32(sig), funcPC(sighandler), true)
+ }
+}
+
+func sigdisable(sig uint32) {
+ if sig >= uint32(len(sigtable)) {
+ return
+ }
+
+ t := &sigtable[sig]
+ if t.flags&_SigNotify != 0 && t.flags&_SigHandling != 0 {
+ t.flags &^= _SigHandling
+ if t.flags&_SigIgnored != 0 {
+ setsig(int32(sig), _SIG_IGN, true)
+ } else {
+ setsig(int32(sig), _SIG_DFL, true)
+ }
+ }
+}
+
+func resetcpuprofiler(hz int32) {
+ var it itimerval
+ if hz == 0 {
+ setitimer(_ITIMER_PROF, &it, nil)
+ } else {
+ it.it_interval.tv_sec = 0
+ it.it_interval.set_usec(1000000 / hz)
+ it.it_value = it.it_interval
+ setitimer(_ITIMER_PROF, &it, nil)
+ }
+ _g_ := getg()
+ _g_.m.profilehz = hz
+}
+
+func sigpipe() {
+ setsig(_SIGPIPE, _SIG_DFL, false)
+ raise(_SIGPIPE)
+}
+
+func crash() {
+ if GOOS == "darwin" {
+ // OS X core dumps are linear dumps of the mapped memory,
+ // from the first virtual byte to the last, with zeros in the gaps.
+ // Because of the way we arrange the address space on 64-bit systems,
+ // this means the OS X core file will be >128 GB and even on a zippy
+ // workstation can take OS X well over an hour to write (uninterruptible).
+ // Save users from making that mistake.
+ if ptrSize == 8 {
+ return
+ }
+ }
+
+ unblocksignals()
+ setsig(_SIGABRT, _SIG_DFL, false)
+ raise(_SIGABRT)
+}
diff --git a/src/runtime/signal_386.c b/src/runtime/signal_386.c
deleted file mode 100644
index 30a7488bd..000000000
--- a/src/runtime/signal_386.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "signal_GOOS_GOARCH.h"
-#include "signals_GOOS.h"
-
-void
-runtime·dumpregs(Siginfo *info, void *ctxt)
-{
- USED(info);
- USED(ctxt);
-
- runtime·printf("eax %x\n", SIG_EAX(info, ctxt));
- runtime·printf("ebx %x\n", SIG_EBX(info, ctxt));
- runtime·printf("ecx %x\n", SIG_ECX(info, ctxt));
- runtime·printf("edx %x\n", SIG_EDX(info, ctxt));
- runtime·printf("edi %x\n", SIG_EDI(info, ctxt));
- runtime·printf("esi %x\n", SIG_ESI(info, ctxt));
- runtime·printf("ebp %x\n", SIG_EBP(info, ctxt));
- runtime·printf("esp %x\n", SIG_ESP(info, ctxt));
- runtime·printf("eip %x\n", SIG_EIP(info, ctxt));
- runtime·printf("eflags %x\n", SIG_EFLAGS(info, ctxt));
- runtime·printf("cs %x\n", SIG_CS(info, ctxt));
- runtime·printf("fs %x\n", SIG_FS(info, ctxt));
- runtime·printf("gs %x\n", SIG_GS(info, ctxt));
-}
-
-void
-runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
-{
- uintptr *sp;
- SigTab *t;
- bool crash;
-
- if(sig == SIGPROF) {
- runtime·sigprof((byte*)SIG_EIP(info, ctxt), (byte*)SIG_ESP(info, ctxt), nil, gp, g->m);
- return;
- }
-
- t = &runtime·sigtab[sig];
- if(SIG_CODE0(info, ctxt) != SI_USER && (t->flags & SigPanic)) {
- // Make it look like a call to the signal func.
- // Have to pass arguments out of band since
- // augmenting the stack frame would break
- // the unwinding code.
- gp->sig = sig;
- gp->sigcode0 = SIG_CODE0(info, ctxt);
- gp->sigcode1 = SIG_CODE1(info, ctxt);
- gp->sigpc = SIG_EIP(info, ctxt);
-
-#ifdef GOOS_darwin
- // Work around Leopard bug that doesn't set FPE_INTDIV.
- // Look at instruction to see if it is a divide.
- // Not necessary in Snow Leopard (si_code will be != 0).
- if(sig == SIGFPE && gp->sigcode0 == 0) {
- byte *pc;
- pc = (byte*)gp->sigpc;
- if(pc[0] == 0x66) // 16-bit instruction prefix
- pc++;
- if(pc[0] == 0xF6 || pc[0] == 0xF7)
- gp->sigcode0 = FPE_INTDIV;
- }
-#endif
-
- // Only push runtime·sigpanic if eip != 0.
- // If eip == 0, probably panicked because of a
- // call to a nil func. Not pushing that onto sp will
- // make the trace look like a call to runtime·sigpanic instead.
- // (Otherwise the trace will end at runtime·sigpanic and we
- // won't get to see who faulted.)
- if(SIG_EIP(info, ctxt) != 0) {
- sp = (uintptr*)SIG_ESP(info, ctxt);
- *--sp = SIG_EIP(info, ctxt);
- SIG_ESP(info, ctxt) = (uintptr)sp;
- }
- SIG_EIP(info, ctxt) = (uintptr)runtime·sigpanic;
- return;
- }
-
- if(SIG_CODE0(info, ctxt) == SI_USER || (t->flags & SigNotify))
- if(runtime·sigsend(sig))
- return;
- if(t->flags & SigKill)
- runtime·exit(2);
- if(!(t->flags & SigThrow))
- return;
-
- g->m->throwing = 1;
- g->m->caughtsig = gp;
- runtime·startpanic();
-
- if(sig < 0 || sig >= NSIG)
- runtime·printf("Signal %d\n", sig);
- else
- runtime·printf("%s\n", runtime·sigtab[sig].name);
-
- runtime·printf("PC=%x\n", SIG_EIP(info, ctxt));
- if(g->m->lockedg != nil && g->m->ncgo > 0 && gp == g->m->g0) {
- runtime·printf("signal arrived during cgo execution\n");
- gp = g->m->lockedg;
- }
- runtime·printf("\n");
-
- if(runtime·gotraceback(&crash)){
- runtime·goroutineheader(gp);
- runtime·tracebacktrap(SIG_EIP(info, ctxt), SIG_ESP(info, ctxt), 0, gp);
- runtime·tracebackothers(gp);
- runtime·printf("\n");
- runtime·dumpregs(info, ctxt);
- }
-
- if(crash)
- runtime·crash();
-
- runtime·exit(2);
-}
diff --git a/src/runtime/signal_386.go b/src/runtime/signal_386.go
new file mode 100644
index 000000000..5336a4330
--- /dev/null
+++ b/src/runtime/signal_386.go
@@ -0,0 +1,131 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd
+
+package runtime
+
+import "unsafe"
+
+func dumpregs(c *sigctxt) {
+ print("eax ", hex(c.eax()), "\n")
+ print("ebx ", hex(c.ebx()), "\n")
+ print("ecx ", hex(c.ecx()), "\n")
+ print("edx ", hex(c.edx()), "\n")
+ print("edi ", hex(c.edi()), "\n")
+ print("esi ", hex(c.esi()), "\n")
+ print("ebp ", hex(c.ebp()), "\n")
+ print("esp ", hex(c.esp()), "\n")
+ print("eip ", hex(c.eip()), "\n")
+ print("eflags ", hex(c.eflags()), "\n")
+ print("cs ", hex(c.cs()), "\n")
+ print("fs ", hex(c.fs()), "\n")
+ print("gs ", hex(c.gs()), "\n")
+}
+
+func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
+ _g_ := getg()
+ c := &sigctxt{info, ctxt}
+
+ if sig == _SIGPROF {
+ sigprof((*byte)(unsafe.Pointer(uintptr(c.eip()))), (*byte)(unsafe.Pointer(uintptr(c.esp()))), nil, gp, _g_.m)
+ return
+ }
+
+ flags := int32(_SigThrow)
+ if sig < uint32(len(sigtable)) {
+ flags = sigtable[sig].flags
+ }
+ if c.sigcode() != _SI_USER && flags&_SigPanic != 0 {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp.sig = sig
+ gp.sigcode0 = uintptr(c.sigcode())
+ gp.sigcode1 = uintptr(c.sigaddr())
+ gp.sigpc = uintptr(c.eip())
+
+ if GOOS == "darwin" {
+ // Work around Leopard bug that doesn't set FPE_INTDIV.
+ // Look at instruction to see if it is a divide.
+ // Not necessary in Snow Leopard (si_code will be != 0).
+ if sig == _SIGFPE && gp.sigcode0 == 0 {
+ pc := (*[4]byte)(unsafe.Pointer(gp.sigpc))
+ i := 0
+ if pc[i] == 0x66 { // 16-bit instruction prefix
+ i++
+ }
+ if pc[i] == 0xF6 || pc[i] == 0xF7 {
+ gp.sigcode0 = _FPE_INTDIV
+ }
+ }
+ }
+
+ // Only push runtime.sigpanic if rip != 0.
+ // If rip == 0, probably panicked because of a
+ // call to a nil func. Not pushing that onto sp will
+ // make the trace look like a call to runtime.sigpanic instead.
+ // (Otherwise the trace will end at runtime.sigpanic and we
+ // won't get to see who faulted.)
+ if c.eip() != 0 {
+ sp := c.esp()
+ if regSize > ptrSize {
+ sp -= ptrSize
+ *(*uintptr)(unsafe.Pointer(uintptr(sp))) = 0
+ }
+ sp -= ptrSize
+ *(*uintptr)(unsafe.Pointer(uintptr(sp))) = uintptr(c.eip())
+ c.set_esp(sp)
+ }
+ c.set_eip(uint32(funcPC(sigpanic)))
+ return
+ }
+
+ if c.sigcode() == _SI_USER || flags&_SigNotify != 0 {
+ if sigsend(sig) {
+ return
+ }
+ }
+
+ if flags&_SigKill != 0 {
+ exit(2)
+ }
+
+ if flags&_SigThrow == 0 {
+ return
+ }
+
+ _g_.m.throwing = 1
+ _g_.m.caughtsig = gp
+ startpanic()
+
+ if sig < uint32(len(sigtable)) {
+ print(sigtable[sig].name, "\n")
+ } else {
+ print("Signal ", sig, "\n")
+ }
+
+ print("PC=", hex(c.eip()), "\n")
+ if _g_.m.lockedg != nil && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
+ print("signal arrived during cgo execution\n")
+ gp = _g_.m.lockedg
+ }
+ print("\n")
+
+ var docrash bool
+ if gotraceback(&docrash) > 0 {
+ goroutineheader(gp)
+ tracebacktrap(uintptr(c.eip()), uintptr(c.esp()), 0, gp)
+ tracebackothers(gp)
+ print("\n")
+ dumpregs(c)
+ }
+
+ if docrash {
+ crash()
+ }
+
+ exit(2)
+}
diff --git a/src/runtime/signal_amd64x.c b/src/runtime/signal_amd64x.c
deleted file mode 100644
index feb4afcce..000000000
--- a/src/runtime/signal_amd64x.c
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build amd64 amd64p32
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "signal_GOOS_GOARCH.h"
-#include "signals_GOOS.h"
-
-void
-runtime·dumpregs(Siginfo *info, void *ctxt)
-{
- USED(info);
- USED(ctxt);
-
- runtime·printf("rax %X\n", SIG_RAX(info, ctxt));
- runtime·printf("rbx %X\n", SIG_RBX(info, ctxt));
- runtime·printf("rcx %X\n", SIG_RCX(info, ctxt));
- runtime·printf("rdx %X\n", SIG_RDX(info, ctxt));
- runtime·printf("rdi %X\n", SIG_RDI(info, ctxt));
- runtime·printf("rsi %X\n", SIG_RSI(info, ctxt));
- runtime·printf("rbp %X\n", SIG_RBP(info, ctxt));
- runtime·printf("rsp %X\n", SIG_RSP(info, ctxt));
- runtime·printf("r8 %X\n", SIG_R8(info, ctxt) );
- runtime·printf("r9 %X\n", SIG_R9(info, ctxt) );
- runtime·printf("r10 %X\n", SIG_R10(info, ctxt));
- runtime·printf("r11 %X\n", SIG_R11(info, ctxt));
- runtime·printf("r12 %X\n", SIG_R12(info, ctxt));
- runtime·printf("r13 %X\n", SIG_R13(info, ctxt));
- runtime·printf("r14 %X\n", SIG_R14(info, ctxt));
- runtime·printf("r15 %X\n", SIG_R15(info, ctxt));
- runtime·printf("rip %X\n", SIG_RIP(info, ctxt));
- runtime·printf("rflags %X\n", SIG_RFLAGS(info, ctxt));
- runtime·printf("cs %X\n", SIG_CS(info, ctxt));
- runtime·printf("fs %X\n", SIG_FS(info, ctxt));
- runtime·printf("gs %X\n", SIG_GS(info, ctxt));
-}
-
-void
-runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
-{
- uintptr *sp;
- SigTab *t;
- bool crash;
-
- if(sig == SIGPROF) {
- runtime·sigprof((byte*)SIG_RIP(info, ctxt), (byte*)SIG_RSP(info, ctxt), nil, gp, g->m);
- return;
- }
-
-#ifdef GOOS_darwin
- // x86-64 has 48-bit virtual addresses. The top 16 bits must echo bit 47.
- // The hardware delivers a different kind of fault for a malformed address
- // than it does for an attempt to access a valid but unmapped address.
- // OS X 10.9.2 mishandles the malformed address case, making it look like
- // a user-generated signal (like someone ran kill -SEGV ourpid).
- // We pass user-generated signals to os/signal, or else ignore them.
- // Doing that here - and returning to the faulting code - results in an
- // infinite loop. It appears the best we can do is rewrite what the kernel
- // delivers into something more like the truth. The address used below
- // has very little chance of being the one that caused the fault, but it is
- // malformed, it is clearly not a real pointer, and if it does get printed
- // in real life, people will probably search for it and find this code.
- // There are no Google hits for b01dfacedebac1e or 0xb01dfacedebac1e
- // as I type this comment.
- if(sig == SIGSEGV && SIG_CODE0(info, ctxt) == SI_USER) {
- SIG_CODE0(info, ctxt) = SI_USER+1;
- info->si_addr = (void*)(uintptr)0xb01dfacedebac1eULL;
- }
-#endif
-
- t = &runtime·sigtab[sig];
- if(SIG_CODE0(info, ctxt) != SI_USER && (t->flags & SigPanic)) {
- // Make it look like a call to the signal func.
- // Have to pass arguments out of band since
- // augmenting the stack frame would break
- // the unwinding code.
- gp->sig = sig;
- gp->sigcode0 = SIG_CODE0(info, ctxt);
- gp->sigcode1 = SIG_CODE1(info, ctxt);
- gp->sigpc = SIG_RIP(info, ctxt);
-
-#ifdef GOOS_darwin
- // Work around Leopard bug that doesn't set FPE_INTDIV.
- // Look at instruction to see if it is a divide.
- // Not necessary in Snow Leopard (si_code will be != 0).
- if(sig == SIGFPE && gp->sigcode0 == 0) {
- byte *pc;
- pc = (byte*)gp->sigpc;
- if((pc[0]&0xF0) == 0x40) // 64-bit REX prefix
- pc++;
- else if(pc[0] == 0x66) // 16-bit instruction prefix
- pc++;
- if(pc[0] == 0xF6 || pc[0] == 0xF7)
- gp->sigcode0 = FPE_INTDIV;
- }
-#endif
-
- // Only push runtime·sigpanic if rip != 0.
- // If rip == 0, probably panicked because of a
- // call to a nil func. Not pushing that onto sp will
- // make the trace look like a call to runtime·sigpanic instead.
- // (Otherwise the trace will end at runtime·sigpanic and we
- // won't get to see who faulted.)
- if(SIG_RIP(info, ctxt) != 0) {
- sp = (uintptr*)SIG_RSP(info, ctxt);
- if(sizeof(uintreg) > sizeof(uintptr))
- *--sp = 0;
- *--sp = SIG_RIP(info, ctxt);
- SIG_RSP(info, ctxt) = (uintptr)sp;
- }
- SIG_RIP(info, ctxt) = (uintptr)runtime·sigpanic;
- return;
- }
-
- if(SIG_CODE0(info, ctxt) == SI_USER || (t->flags & SigNotify))
- if(runtime·sigsend(sig))
- return;
- if(t->flags & SigKill)
- runtime·exit(2);
- if(!(t->flags & SigThrow))
- return;
-
- g->m->throwing = 1;
- g->m->caughtsig = gp;
- runtime·startpanic();
-
- if(sig < 0 || sig >= NSIG)
- runtime·printf("Signal %d\n", sig);
- else
- runtime·printf("%s\n", runtime·sigtab[sig].name);
-
- runtime·printf("PC=%X\n", SIG_RIP(info, ctxt));
- if(g->m->lockedg != nil && g->m->ncgo > 0 && gp == g->m->g0) {
- runtime·printf("signal arrived during cgo execution\n");
- gp = g->m->lockedg;
- }
- runtime·printf("\n");
-
- if(runtime·gotraceback(&crash)){
- runtime·goroutineheader(gp);
- runtime·tracebacktrap(SIG_RIP(info, ctxt), SIG_RSP(info, ctxt), 0, gp);
- runtime·tracebackothers(gp);
- runtime·printf("\n");
- runtime·dumpregs(info, ctxt);
- }
-
- if(crash)
- runtime·crash();
-
- runtime·exit(2);
-}
diff --git a/src/runtime/signal_amd64x.go b/src/runtime/signal_amd64x.go
new file mode 100644
index 000000000..de88d93a5
--- /dev/null
+++ b/src/runtime/signal_amd64x.go
@@ -0,0 +1,163 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64 amd64p32
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
+
+package runtime
+
+import "unsafe"
+
+func dumpregs(c *sigctxt) {
+ print("rax ", hex(c.rax()), "\n")
+ print("rbx ", hex(c.rbx()), "\n")
+ print("rcx ", hex(c.rcx()), "\n")
+ print("rdx ", hex(c.rdx()), "\n")
+ print("rdi ", hex(c.rdi()), "\n")
+ print("rsi ", hex(c.rsi()), "\n")
+ print("rbp ", hex(c.rbp()), "\n")
+ print("rsp ", hex(c.rsp()), "\n")
+ print("r8 ", hex(c.r8()), "\n")
+ print("r9 ", hex(c.r9()), "\n")
+ print("r10 ", hex(c.r10()), "\n")
+ print("r11 ", hex(c.r11()), "\n")
+ print("r12 ", hex(c.r12()), "\n")
+ print("r13 ", hex(c.r13()), "\n")
+ print("r14 ", hex(c.r14()), "\n")
+ print("r15 ", hex(c.r15()), "\n")
+ print("rip ", hex(c.rip()), "\n")
+ print("rflags ", hex(c.rflags()), "\n")
+ print("cs ", hex(c.cs()), "\n")
+ print("fs ", hex(c.fs()), "\n")
+ print("gs ", hex(c.gs()), "\n")
+}
+
+func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
+ _g_ := getg()
+ c := &sigctxt{info, ctxt}
+
+ if sig == _SIGPROF {
+ sigprof((*byte)(unsafe.Pointer(uintptr(c.rip()))), (*byte)(unsafe.Pointer(uintptr(c.rsp()))), nil, gp, _g_.m)
+ return
+ }
+
+ if GOOS == "darwin" {
+ // x86-64 has 48-bit virtual addresses. The top 16 bits must echo bit 47.
+ // The hardware delivers a different kind of fault for a malformed address
+ // than it does for an attempt to access a valid but unmapped address.
+ // OS X 10.9.2 mishandles the malformed address case, making it look like
+ // a user-generated signal (like someone ran kill -SEGV ourpid).
+ // We pass user-generated signals to os/signal, or else ignore them.
+ // Doing that here - and returning to the faulting code - results in an
+ // infinite loop. It appears the best we can do is rewrite what the kernel
+ // delivers into something more like the truth. The address used below
+ // has very little chance of being the one that caused the fault, but it is
+ // malformed, it is clearly not a real pointer, and if it does get printed
+ // in real life, people will probably search for it and find this code.
+ // There are no Google hits for b01dfacedebac1e or 0xb01dfacedebac1e
+ // as I type this comment.
+ if sig == _SIGSEGV && c.sigcode() == _SI_USER {
+ c.set_sigcode(_SI_USER + 1)
+ c.set_sigaddr(0xb01dfacedebac1e)
+ }
+ }
+
+ flags := int32(_SigThrow)
+ if sig < uint32(len(sigtable)) {
+ flags = sigtable[sig].flags
+ }
+ if c.sigcode() != _SI_USER && flags&_SigPanic != 0 {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp.sig = sig
+ gp.sigcode0 = uintptr(c.sigcode())
+ gp.sigcode1 = uintptr(c.sigaddr())
+ gp.sigpc = uintptr(c.rip())
+
+ if GOOS == "darwin" {
+ // Work around Leopard bug that doesn't set FPE_INTDIV.
+ // Look at instruction to see if it is a divide.
+ // Not necessary in Snow Leopard (si_code will be != 0).
+ if sig == _SIGFPE && gp.sigcode0 == 0 {
+ pc := (*[4]byte)(unsafe.Pointer(gp.sigpc))
+ i := 0
+ if pc[i]&0xF0 == 0x40 { // 64-bit REX prefix
+ i++
+ } else if pc[i] == 0x66 { // 16-bit instruction prefix
+ i++
+ }
+ if pc[i] == 0xF6 || pc[i] == 0xF7 {
+ gp.sigcode0 = _FPE_INTDIV
+ }
+ }
+ }
+
+ // Only push runtime.sigpanic if rip != 0.
+ // If rip == 0, probably panicked because of a
+ // call to a nil func. Not pushing that onto sp will
+ // make the trace look like a call to runtime.sigpanic instead.
+ // (Otherwise the trace will end at runtime.sigpanic and we
+ // won't get to see who faulted.)
+ if c.rip() != 0 {
+ sp := c.rsp()
+ if regSize > ptrSize {
+ sp -= ptrSize
+ *(*uintptr)(unsafe.Pointer(uintptr(sp))) = 0
+ }
+ sp -= ptrSize
+ *(*uintptr)(unsafe.Pointer(uintptr(sp))) = uintptr(c.rip())
+ c.set_rsp(sp)
+ }
+ c.set_rip(uint64(funcPC(sigpanic)))
+ return
+ }
+
+ if c.sigcode() == _SI_USER || flags&_SigNotify != 0 {
+ if sigsend(sig) {
+ return
+ }
+ }
+
+ if flags&_SigKill != 0 {
+ exit(2)
+ }
+
+ if flags&_SigThrow == 0 {
+ return
+ }
+
+ _g_.m.throwing = 1
+ _g_.m.caughtsig = gp
+ startpanic()
+
+ if sig < uint32(len(sigtable)) {
+ print(sigtable[sig].name, "\n")
+ } else {
+ print("Signal ", sig, "\n")
+ }
+
+ print("PC=", hex(c.rip()), "\n")
+ if _g_.m.lockedg != nil && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
+ print("signal arrived during cgo execution\n")
+ gp = _g_.m.lockedg
+ }
+ print("\n")
+
+ var docrash bool
+ if gotraceback(&docrash) > 0 {
+ goroutineheader(gp)
+ tracebacktrap(uintptr(c.rip()), uintptr(c.rsp()), 0, gp)
+ tracebackothers(gp)
+ print("\n")
+ dumpregs(c)
+ }
+
+ if docrash {
+ crash()
+ }
+
+ exit(2)
+}
diff --git a/src/runtime/signal_arm.c b/src/runtime/signal_arm.c
deleted file mode 100644
index afad5e7d1..000000000
--- a/src/runtime/signal_arm.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "signal_GOOS_GOARCH.h"
-#include "signals_GOOS.h"
-
-void
-runtime·dumpregs(Siginfo *info, void *ctxt)
-{
- USED(info);
- USED(ctxt);
-
- runtime·printf("trap %x\n", SIG_TRAP(info, ctxt));
- runtime·printf("error %x\n", SIG_ERROR(info, ctxt));
- runtime·printf("oldmask %x\n", SIG_OLDMASK(info, ctxt));
- runtime·printf("r0 %x\n", SIG_R0(info, ctxt));
- runtime·printf("r1 %x\n", SIG_R1(info, ctxt));
- runtime·printf("r2 %x\n", SIG_R2(info, ctxt));
- runtime·printf("r3 %x\n", SIG_R3(info, ctxt));
- runtime·printf("r4 %x\n", SIG_R4(info, ctxt));
- runtime·printf("r5 %x\n", SIG_R5(info, ctxt));
- runtime·printf("r6 %x\n", SIG_R6(info, ctxt));
- runtime·printf("r7 %x\n", SIG_R7(info, ctxt));
- runtime·printf("r8 %x\n", SIG_R8(info, ctxt));
- runtime·printf("r9 %x\n", SIG_R9(info, ctxt));
- runtime·printf("r10 %x\n", SIG_R10(info, ctxt));
- runtime·printf("fp %x\n", SIG_FP(info, ctxt));
- runtime·printf("ip %x\n", SIG_IP(info, ctxt));
- runtime·printf("sp %x\n", SIG_SP(info, ctxt));
- runtime·printf("lr %x\n", SIG_LR(info, ctxt));
- runtime·printf("pc %x\n", SIG_PC(info, ctxt));
- runtime·printf("cpsr %x\n", SIG_CPSR(info, ctxt));
- runtime·printf("fault %x\n", SIG_FAULT(info, ctxt));
-}
-
-void
-runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
-{
- SigTab *t;
- bool crash;
-
- if(sig == SIGPROF) {
- runtime·sigprof((uint8*)SIG_PC(info, ctxt), (uint8*)SIG_SP(info, ctxt), (uint8*)SIG_LR(info, ctxt), gp, g->m);
- return;
- }
-
- t = &runtime·sigtab[sig];
- if(SIG_CODE0(info, ctxt) != SI_USER && (t->flags & SigPanic)) {
- // Make it look like a call to the signal func.
- // Have to pass arguments out of band since
- // augmenting the stack frame would break
- // the unwinding code.
- gp->sig = sig;
- gp->sigcode0 = SIG_CODE0(info, ctxt);
- gp->sigcode1 = SIG_FAULT(info, ctxt);
- gp->sigpc = SIG_PC(info, ctxt);
-
- // We arrange lr, and pc to pretend the panicking
- // function calls sigpanic directly.
- // Always save LR to stack so that panics in leaf
- // functions are correctly handled. This smashes
- // the stack frame but we're not going back there
- // anyway.
- SIG_SP(info, ctxt) -= 4;
- *(uint32*)SIG_SP(info, ctxt) = SIG_LR(info, ctxt);
- // Don't bother saving PC if it's zero, which is
- // probably a call to a nil func: the old link register
- // is more useful in the stack trace.
- if(gp->sigpc != 0)
- SIG_LR(info, ctxt) = gp->sigpc;
- // In case we are panicking from external C code
- SIG_R10(info, ctxt) = (uintptr)gp;
- SIG_PC(info, ctxt) = (uintptr)runtime·sigpanic;
- return;
- }
-
- if(SIG_CODE0(info, ctxt) == SI_USER || (t->flags & SigNotify))
- if(runtime·sigsend(sig))
- return;
- if(t->flags & SigKill)
- runtime·exit(2);
- if(!(t->flags & SigThrow))
- return;
-
- g->m->throwing = 1;
- g->m->caughtsig = gp;
- if(runtime·panicking) // traceback already printed
- runtime·exit(2);
- runtime·panicking = 1;
-
- if(sig < 0 || sig >= NSIG)
- runtime·printf("Signal %d\n", sig);
- else
- runtime·printf("%s\n", runtime·sigtab[sig].name);
-
- runtime·printf("PC=%x\n", SIG_PC(info, ctxt));
- if(g->m->lockedg != nil && g->m->ncgo > 0 && gp == g->m->g0) {
- runtime·printf("signal arrived during cgo execution\n");
- gp = g->m->lockedg;
- }
- runtime·printf("\n");
-
- if(runtime·gotraceback(&crash)){
- runtime·goroutineheader(gp);
- runtime·tracebacktrap(SIG_PC(info, ctxt), SIG_SP(info, ctxt), SIG_LR(info, ctxt), gp);
- runtime·tracebackothers(gp);
- runtime·printf("\n");
- runtime·dumpregs(info, ctxt);
- }
-
- if(crash)
- runtime·crash();
-
- runtime·exit(2);
-}
diff --git a/src/runtime/signal_arm.go b/src/runtime/signal_arm.go
new file mode 100644
index 000000000..d224ce645
--- /dev/null
+++ b/src/runtime/signal_arm.go
@@ -0,0 +1,126 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd
+
+package runtime
+
+import "unsafe"
+
+func dumpregs(c *sigctxt) {
+ print("trap ", hex(c.trap()), "\n")
+ print("error ", hex(c.error()), "\n")
+ print("oldmask ", hex(c.oldmask()), "\n")
+ print("r0 ", hex(c.r0()), "\n")
+ print("r1 ", hex(c.r1()), "\n")
+ print("r2 ", hex(c.r2()), "\n")
+ print("r3 ", hex(c.r3()), "\n")
+ print("r4 ", hex(c.r4()), "\n")
+ print("r5 ", hex(c.r5()), "\n")
+ print("r6 ", hex(c.r6()), "\n")
+ print("r7 ", hex(c.r7()), "\n")
+ print("r8 ", hex(c.r8()), "\n")
+ print("r9 ", hex(c.r9()), "\n")
+ print("r10 ", hex(c.r10()), "\n")
+ print("fp ", hex(c.fp()), "\n")
+ print("ip ", hex(c.ip()), "\n")
+ print("sp ", hex(c.sp()), "\n")
+ print("lr ", hex(c.lr()), "\n")
+ print("pc ", hex(c.pc()), "\n")
+ print("cpsr ", hex(c.cpsr()), "\n")
+ print("fault ", hex(c.fault()), "\n")
+}
+
+func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
+ _g_ := getg()
+ c := &sigctxt{info, ctxt}
+
+ if sig == _SIGPROF {
+ sigprof((*byte)(unsafe.Pointer(uintptr(c.pc()))), (*byte)(unsafe.Pointer(uintptr(c.sp()))), (*byte)(unsafe.Pointer(uintptr(c.lr()))), gp, _g_.m)
+ return
+ }
+
+ flags := int32(_SigThrow)
+ if sig < uint32(len(sigtable)) {
+ flags = sigtable[sig].flags
+ }
+ if c.sigcode() != _SI_USER && flags&_SigPanic != 0 {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp.sig = sig
+ gp.sigcode0 = uintptr(c.sigcode())
+ gp.sigcode1 = uintptr(c.fault())
+ gp.sigpc = uintptr(c.pc())
+
+ // We arrange lr, and pc to pretend the panicking
+ // function calls sigpanic directly.
+ // Always save LR to stack so that panics in leaf
+ // functions are correctly handled. This smashes
+ // the stack frame but we're not going back there
+ // anyway.
+ sp := c.sp() - 4
+ c.set_sp(sp)
+ *(*uint32)(unsafe.Pointer(uintptr(sp))) = c.lr()
+
+ // Don't bother saving PC if it's zero, which is
+ // probably a call to a nil func: the old link register
+ // is more useful in the stack trace.
+ if gp.sigpc != 0 {
+ c.set_lr(uint32(gp.sigpc))
+ }
+
+ // In case we are panicking from external C code
+ c.set_r10(uint32(uintptr(unsafe.Pointer(gp))))
+ c.set_pc(uint32(funcPC(sigpanic)))
+ return
+ }
+
+ if c.sigcode() == _SI_USER || flags&_SigNotify != 0 {
+ if sigsend(sig) {
+ return
+ }
+ }
+
+ if flags&_SigKill != 0 {
+ exit(2)
+ }
+
+ if flags&_SigThrow == 0 {
+ return
+ }
+
+ _g_.m.throwing = 1
+ _g_.m.caughtsig = gp
+ startpanic()
+
+ if sig < uint32(len(sigtable)) {
+ print(sigtable[sig].name, "\n")
+ } else {
+ print("Signal ", sig, "\n")
+ }
+
+ print("PC=", hex(c.pc()), "\n")
+ if _g_.m.lockedg != nil && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
+ print("signal arrived during cgo execution\n")
+ gp = _g_.m.lockedg
+ }
+ print("\n")
+
+ var docrash bool
+ if gotraceback(&docrash) > 0 {
+ goroutineheader(gp)
+ tracebacktrap(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.lr()), gp)
+ tracebackothers(gp)
+ print("\n")
+ dumpregs(c)
+ }
+
+ if docrash {
+ crash()
+ }
+
+ exit(2)
+}
diff --git a/src/runtime/signal_darwin.go b/src/runtime/signal_darwin.go
new file mode 100644
index 000000000..122648bc3
--- /dev/null
+++ b/src/runtime/signal_darwin.go
@@ -0,0 +1,45 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+type sigTabT struct {
+ flags int32
+ name string
+}
+
+var sigtable = [...]sigTabT{
+ /* 0 */ {0, "SIGNONE: no trap"},
+ /* 1 */ {_SigNotify + _SigKill, "SIGHUP: terminal line hangup"},
+ /* 2 */ {_SigNotify + _SigKill, "SIGINT: interrupt"},
+ /* 3 */ {_SigNotify + _SigThrow, "SIGQUIT: quit"},
+ /* 4 */ {_SigThrow, "SIGILL: illegal instruction"},
+ /* 5 */ {_SigThrow, "SIGTRAP: trace trap"},
+ /* 6 */ {_SigNotify + _SigThrow, "SIGABRT: abort"},
+ /* 7 */ {_SigThrow, "SIGEMT: emulate instruction executed"},
+ /* 8 */ {_SigPanic, "SIGFPE: floating-point exception"},
+ /* 9 */ {0, "SIGKILL: kill"},
+ /* 10 */ {_SigPanic, "SIGBUS: bus error"},
+ /* 11 */ {_SigPanic, "SIGSEGV: segmentation violation"},
+ /* 12 */ {_SigThrow, "SIGSYS: bad system call"},
+ /* 13 */ {_SigNotify, "SIGPIPE: write to broken pipe"},
+ /* 14 */ {_SigNotify, "SIGALRM: alarm clock"},
+ /* 15 */ {_SigNotify + _SigKill, "SIGTERM: termination"},
+ /* 16 */ {_SigNotify, "SIGURG: urgent condition on socket"},
+ /* 17 */ {0, "SIGSTOP: stop"},
+ /* 18 */ {_SigNotify + _SigDefault, "SIGTSTP: keyboard stop"},
+ /* 19 */ {0, "SIGCONT: continue after stop"},
+ /* 20 */ {_SigNotify, "SIGCHLD: child status has changed"},
+ /* 21 */ {_SigNotify + _SigDefault, "SIGTTIN: background read from tty"},
+ /* 22 */ {_SigNotify + _SigDefault, "SIGTTOU: background write to tty"},
+ /* 23 */ {_SigNotify, "SIGIO: i/o now possible"},
+ /* 24 */ {_SigNotify, "SIGXCPU: cpu limit exceeded"},
+ /* 25 */ {_SigNotify, "SIGXFSZ: file size limit exceeded"},
+ /* 26 */ {_SigNotify, "SIGVTALRM: virtual alarm clock"},
+ /* 27 */ {_SigNotify, "SIGPROF: profiling alarm clock"},
+ /* 28 */ {_SigNotify, "SIGWINCH: window size change"},
+ /* 29 */ {_SigNotify, "SIGINFO: status request from keyboard"},
+ /* 30 */ {_SigNotify, "SIGUSR1: user-defined signal 1"},
+ /* 31 */ {_SigNotify, "SIGUSR2: user-defined signal 2"},
+}
diff --git a/src/runtime/signal_darwin_386.go b/src/runtime/signal_darwin_386.go
new file mode 100644
index 000000000..ccf30ef31
--- /dev/null
+++ b/src/runtime/signal_darwin_386.go
@@ -0,0 +1,34 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *regs32 { return &(*ucontext)(c.ctxt).uc_mcontext.ss }
+func (c *sigctxt) eax() uint32 { return c.regs().eax }
+func (c *sigctxt) ebx() uint32 { return c.regs().ebx }
+func (c *sigctxt) ecx() uint32 { return c.regs().ecx }
+func (c *sigctxt) edx() uint32 { return c.regs().edx }
+func (c *sigctxt) edi() uint32 { return c.regs().edi }
+func (c *sigctxt) esi() uint32 { return c.regs().esi }
+func (c *sigctxt) ebp() uint32 { return c.regs().ebp }
+func (c *sigctxt) esp() uint32 { return c.regs().esp }
+func (c *sigctxt) eip() uint32 { return c.regs().eip }
+func (c *sigctxt) eflags() uint32 { return c.regs().eflags }
+func (c *sigctxt) cs() uint32 { return c.regs().cs }
+func (c *sigctxt) fs() uint32 { return c.regs().fs }
+func (c *sigctxt) gs() uint32 { return c.regs().gs }
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint32 { return uint32(uintptr(unsafe.Pointer(c.info.si_addr))) }
+
+func (c *sigctxt) set_eip(x uint32) { c.regs().eip = x }
+func (c *sigctxt) set_esp(x uint32) { c.regs().esp = x }
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint32) { c.info.si_addr = (*byte)(unsafe.Pointer(uintptr(x))) }
diff --git a/src/runtime/signal_darwin_386.h b/src/runtime/signal_darwin_386.h
deleted file mode 100644
index 5459e10a1..000000000
--- a/src/runtime/signal_darwin_386.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (((Ucontext*)(ctxt))->uc_mcontext->ss)
-
-#define SIG_EAX(info, ctxt) (SIG_REGS(ctxt).eax)
-#define SIG_EBX(info, ctxt) (SIG_REGS(ctxt).ebx)
-#define SIG_ECX(info, ctxt) (SIG_REGS(ctxt).ecx)
-#define SIG_EDX(info, ctxt) (SIG_REGS(ctxt).edx)
-#define SIG_EDI(info, ctxt) (SIG_REGS(ctxt).edi)
-#define SIG_ESI(info, ctxt) (SIG_REGS(ctxt).esi)
-#define SIG_EBP(info, ctxt) (SIG_REGS(ctxt).ebp)
-#define SIG_ESP(info, ctxt) (SIG_REGS(ctxt).esp)
-#define SIG_EIP(info, ctxt) (SIG_REGS(ctxt).eip)
-#define SIG_EFLAGS(info, ctxt) (SIG_REGS(ctxt).eflags)
-
-#define SIG_CS(info, ctxt) (SIG_REGS(ctxt).cs)
-#define SIG_FS(info, ctxt) (SIG_REGS(ctxt).fs)
-#define SIG_GS(info, ctxt) (SIG_REGS(ctxt).gs)
-
-#define SIG_CODE0(info, ctxt) ((info)->si_code)
-#define SIG_CODE1(info, ctxt) ((uintptr)(info)->si_addr)
diff --git a/src/runtime/signal_darwin_amd64.go b/src/runtime/signal_darwin_amd64.go
new file mode 100644
index 000000000..409bc6d57
--- /dev/null
+++ b/src/runtime/signal_darwin_amd64.go
@@ -0,0 +1,42 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *regs64 { return &(*ucontext)(c.ctxt).uc_mcontext.ss }
+func (c *sigctxt) rax() uint64 { return c.regs().rax }
+func (c *sigctxt) rbx() uint64 { return c.regs().rbx }
+func (c *sigctxt) rcx() uint64 { return c.regs().rcx }
+func (c *sigctxt) rdx() uint64 { return c.regs().rdx }
+func (c *sigctxt) rdi() uint64 { return c.regs().rdi }
+func (c *sigctxt) rsi() uint64 { return c.regs().rsi }
+func (c *sigctxt) rbp() uint64 { return c.regs().rbp }
+func (c *sigctxt) rsp() uint64 { return c.regs().rsp }
+func (c *sigctxt) r8() uint64 { return c.regs().r8 }
+func (c *sigctxt) r9() uint64 { return c.regs().r9 }
+func (c *sigctxt) r10() uint64 { return c.regs().r10 }
+func (c *sigctxt) r11() uint64 { return c.regs().r11 }
+func (c *sigctxt) r12() uint64 { return c.regs().r12 }
+func (c *sigctxt) r13() uint64 { return c.regs().r13 }
+func (c *sigctxt) r14() uint64 { return c.regs().r14 }
+func (c *sigctxt) r15() uint64 { return c.regs().r15 }
+func (c *sigctxt) rip() uint64 { return c.regs().rip }
+func (c *sigctxt) rflags() uint64 { return c.regs().rflags }
+func (c *sigctxt) cs() uint64 { return c.regs().cs }
+func (c *sigctxt) fs() uint64 { return c.regs().fs }
+func (c *sigctxt) gs() uint64 { return c.regs().gs }
+func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return uint64(uintptr(unsafe.Pointer(c.info.si_addr))) }
+
+func (c *sigctxt) set_rip(x uint64) { c.regs().rip = x }
+func (c *sigctxt) set_rsp(x uint64) { c.regs().rsp = x }
+func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) { c.info.si_addr = (*byte)(unsafe.Pointer(uintptr(x))) }
diff --git a/src/runtime/signal_darwin_amd64.h b/src/runtime/signal_darwin_amd64.h
deleted file mode 100644
index e3da6de3a..000000000
--- a/src/runtime/signal_darwin_amd64.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (((Ucontext*)(ctxt))->uc_mcontext->ss)
-
-#define SIG_RAX(info, ctxt) (SIG_REGS(ctxt).rax)
-#define SIG_RBX(info, ctxt) (SIG_REGS(ctxt).rbx)
-#define SIG_RCX(info, ctxt) (SIG_REGS(ctxt).rcx)
-#define SIG_RDX(info, ctxt) (SIG_REGS(ctxt).rdx)
-#define SIG_RDI(info, ctxt) (SIG_REGS(ctxt).rdi)
-#define SIG_RSI(info, ctxt) (SIG_REGS(ctxt).rsi)
-#define SIG_RBP(info, ctxt) (SIG_REGS(ctxt).rbp)
-#define SIG_RSP(info, ctxt) (SIG_REGS(ctxt).rsp)
-#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).r8)
-#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).r9)
-#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).r10)
-#define SIG_R11(info, ctxt) (SIG_REGS(ctxt).r11)
-#define SIG_R12(info, ctxt) (SIG_REGS(ctxt).r12)
-#define SIG_R13(info, ctxt) (SIG_REGS(ctxt).r13)
-#define SIG_R14(info, ctxt) (SIG_REGS(ctxt).r14)
-#define SIG_R15(info, ctxt) (SIG_REGS(ctxt).r15)
-#define SIG_RIP(info, ctxt) (SIG_REGS(ctxt).rip)
-#define SIG_RFLAGS(info, ctxt) (SIG_REGS(ctxt).rflags)
-
-#define SIG_CS(info, ctxt) (SIG_REGS(ctxt).cs)
-#define SIG_FS(info, ctxt) (SIG_REGS(ctxt).fs)
-#define SIG_GS(info, ctxt) (SIG_REGS(ctxt).gs)
-
-#define SIG_CODE0(info, ctxt) ((info)->si_code)
-#define SIG_CODE1(info, ctxt) ((uintptr)(info)->si_addr)
diff --git a/src/runtime/signal_dragonfly.go b/src/runtime/signal_dragonfly.go
new file mode 100644
index 000000000..d37e11a3e
--- /dev/null
+++ b/src/runtime/signal_dragonfly.go
@@ -0,0 +1,46 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+type sigTabT struct {
+ flags int32
+ name string
+}
+
+var sigtable = [...]sigTabT{
+ /* 0 */ {0, "SIGNONE: no trap"},
+ /* 1 */ {_SigNotify + _SigKill, "SIGHUP: terminal line hangup"},
+ /* 2 */ {_SigNotify + _SigKill, "SIGINT: interrupt"},
+ /* 3 */ {_SigNotify + _SigThrow, "SIGQUIT: quit"},
+ /* 4 */ {_SigThrow, "SIGILL: illegal instruction"},
+ /* 5 */ {_SigThrow, "SIGTRAP: trace trap"},
+ /* 6 */ {_SigNotify + _SigThrow, "SIGABRT: abort"},
+ /* 7 */ {_SigThrow, "SIGEMT: emulate instruction executed"},
+ /* 8 */ {_SigPanic, "SIGFPE: floating-point exception"},
+ /* 9 */ {0, "SIGKILL: kill"},
+ /* 10 */ {_SigPanic, "SIGBUS: bus error"},
+ /* 11 */ {_SigPanic, "SIGSEGV: segmentation violation"},
+ /* 12 */ {_SigThrow, "SIGSYS: bad system call"},
+ /* 13 */ {_SigNotify, "SIGPIPE: write to broken pipe"},
+ /* 14 */ {_SigNotify, "SIGALRM: alarm clock"},
+ /* 15 */ {_SigNotify + _SigKill, "SIGTERM: termination"},
+ /* 16 */ {_SigNotify, "SIGURG: urgent condition on socket"},
+ /* 17 */ {0, "SIGSTOP: stop"},
+ /* 18 */ {_SigNotify + _SigDefault, "SIGTSTP: keyboard stop"},
+ /* 19 */ {0, "SIGCONT: continue after stop"},
+ /* 20 */ {_SigNotify, "SIGCHLD: child status has changed"},
+ /* 21 */ {_SigNotify + _SigDefault, "SIGTTIN: background read from tty"},
+ /* 22 */ {_SigNotify + _SigDefault, "SIGTTOU: background write to tty"},
+ /* 23 */ {_SigNotify, "SIGIO: i/o now possible"},
+ /* 24 */ {_SigNotify, "SIGXCPU: cpu limit exceeded"},
+ /* 25 */ {_SigNotify, "SIGXFSZ: file size limit exceeded"},
+ /* 26 */ {_SigNotify, "SIGVTALRM: virtual alarm clock"},
+ /* 27 */ {_SigNotify, "SIGPROF: profiling alarm clock"},
+ /* 28 */ {_SigNotify, "SIGWINCH: window size change"},
+ /* 29 */ {_SigNotify, "SIGINFO: status request from keyboard"},
+ /* 30 */ {_SigNotify, "SIGUSR1: user-defined signal 1"},
+ /* 31 */ {_SigNotify, "SIGUSR2: user-defined signal 2"},
+ /* 32 */ {_SigNotify, "SIGTHR: reserved"},
+}
diff --git a/src/runtime/signal_dragonfly_amd64.go b/src/runtime/signal_dragonfly_amd64.go
new file mode 100644
index 000000000..740959c58
--- /dev/null
+++ b/src/runtime/signal_dragonfly_amd64.go
@@ -0,0 +1,44 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *mcontext {
+ return (*mcontext)(unsafe.Pointer(&(*ucontext)(c.ctxt).uc_mcontext))
+}
+func (c *sigctxt) rax() uint64 { return c.regs().mc_rax }
+func (c *sigctxt) rbx() uint64 { return c.regs().mc_rbx }
+func (c *sigctxt) rcx() uint64 { return c.regs().mc_rcx }
+func (c *sigctxt) rdx() uint64 { return c.regs().mc_rdx }
+func (c *sigctxt) rdi() uint64 { return c.regs().mc_rdi }
+func (c *sigctxt) rsi() uint64 { return c.regs().mc_rsi }
+func (c *sigctxt) rbp() uint64 { return c.regs().mc_rbp }
+func (c *sigctxt) rsp() uint64 { return c.regs().mc_rsp }
+func (c *sigctxt) r8() uint64 { return c.regs().mc_r8 }
+func (c *sigctxt) r9() uint64 { return c.regs().mc_r9 }
+func (c *sigctxt) r10() uint64 { return c.regs().mc_r10 }
+func (c *sigctxt) r11() uint64 { return c.regs().mc_r11 }
+func (c *sigctxt) r12() uint64 { return c.regs().mc_r12 }
+func (c *sigctxt) r13() uint64 { return c.regs().mc_r13 }
+func (c *sigctxt) r14() uint64 { return c.regs().mc_r14 }
+func (c *sigctxt) r15() uint64 { return c.regs().mc_r15 }
+func (c *sigctxt) rip() uint64 { return c.regs().mc_rip }
+func (c *sigctxt) rflags() uint64 { return c.regs().mc_rflags }
+func (c *sigctxt) cs() uint64 { return uint64(c.regs().mc_cs) }
+func (c *sigctxt) fs() uint64 { return uint64(c.regs().mc_ss) }
+func (c *sigctxt) gs() uint64 { return uint64(c.regs().mc_ss) }
+func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return uint64(c.info.si_addr) }
+
+func (c *sigctxt) set_rip(x uint64) { c.regs().mc_rip = x }
+func (c *sigctxt) set_rsp(x uint64) { c.regs().mc_rsp = x }
+func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) { c.info.si_addr = x }
diff --git a/src/runtime/signal_dragonfly_amd64.h b/src/runtime/signal_dragonfly_amd64.h
deleted file mode 100644
index 5b4f97782..000000000
--- a/src/runtime/signal_dragonfly_amd64.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (((Ucontext*)(ctxt))->uc_mcontext)
-
-#define SIG_RAX(info, ctxt) (SIG_REGS(ctxt).mc_rax)
-#define SIG_RBX(info, ctxt) (SIG_REGS(ctxt).mc_rbx)
-#define SIG_RCX(info, ctxt) (SIG_REGS(ctxt).mc_rcx)
-#define SIG_RDX(info, ctxt) (SIG_REGS(ctxt).mc_rdx)
-#define SIG_RDI(info, ctxt) (SIG_REGS(ctxt).mc_rdi)
-#define SIG_RSI(info, ctxt) (SIG_REGS(ctxt).mc_rsi)
-#define SIG_RBP(info, ctxt) (SIG_REGS(ctxt).mc_rbp)
-#define SIG_RSP(info, ctxt) (SIG_REGS(ctxt).mc_rsp)
-#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).mc_r8)
-#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).mc_r9)
-#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).mc_r10)
-#define SIG_R11(info, ctxt) (SIG_REGS(ctxt).mc_r11)
-#define SIG_R12(info, ctxt) (SIG_REGS(ctxt).mc_r12)
-#define SIG_R13(info, ctxt) (SIG_REGS(ctxt).mc_r13)
-#define SIG_R14(info, ctxt) (SIG_REGS(ctxt).mc_r14)
-#define SIG_R15(info, ctxt) (SIG_REGS(ctxt).mc_r15)
-#define SIG_RIP(info, ctxt) (SIG_REGS(ctxt).mc_rip)
-#define SIG_RFLAGS(info, ctxt) (SIG_REGS(ctxt).mc_rflags)
-
-#define SIG_CS(info, ctxt) (SIG_REGS(ctxt).mc_cs)
-#define SIG_FS(info, ctxt) (SIG_REGS(ctxt).mc_ss)
-#define SIG_GS(info, ctxt) (SIG_REGS(ctxt).mc_ss)
-
-#define SIG_CODE0(info, ctxt) ((info)->si_code)
-#define SIG_CODE1(info, ctxt) ((uintptr)(info)->si_addr)
diff --git a/src/runtime/signal_freebsd.go b/src/runtime/signal_freebsd.go
new file mode 100644
index 000000000..1dbdb1bd9
--- /dev/null
+++ b/src/runtime/signal_freebsd.go
@@ -0,0 +1,46 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+type sigTabT struct {
+ flags int32
+ name string
+}
+
+var sigtable = [...]sigTabT{
+ /* 0 */ {0, "SIGNONE: no trap"},
+ /* 1 */ {_SigNotify + _SigKill, "SIGHUP: terminal line hangup"},
+ /* 2 */ {_SigNotify + _SigKill, "SIGINT: interrupt"},
+ /* 3 */ {_SigNotify + _SigThrow, "SIGQUIT: quit"},
+ /* 4 */ {_SigThrow, "SIGILL: illegal instruction"},
+ /* 5 */ {_SigThrow, "SIGTRAP: trace trap"},
+ /* 6 */ {_SigNotify + _SigThrow, "SIGABRT: abort"},
+ /* 7 */ {_SigThrow, "SIGEMT: emulate instruction executed"},
+ /* 8 */ {_SigPanic, "SIGFPE: floating-point exception"},
+ /* 9 */ {0, "SIGKILL: kill"},
+ /* 10 */ {_SigPanic, "SIGBUS: bus error"},
+ /* 11 */ {_SigPanic, "SIGSEGV: segmentation violation"},
+ /* 12 */ {_SigNotify, "SIGSYS: bad system call"},
+ /* 13 */ {_SigNotify, "SIGPIPE: write to broken pipe"},
+ /* 14 */ {_SigNotify, "SIGALRM: alarm clock"},
+ /* 15 */ {_SigNotify + _SigKill, "SIGTERM: termination"},
+ /* 16 */ {_SigNotify, "SIGURG: urgent condition on socket"},
+ /* 17 */ {0, "SIGSTOP: stop"},
+ /* 18 */ {_SigNotify + _SigDefault, "SIGTSTP: keyboard stop"},
+ /* 19 */ {0, "SIGCONT: continue after stop"},
+ /* 20 */ {_SigNotify, "SIGCHLD: child status has changed"},
+ /* 21 */ {_SigNotify + _SigDefault, "SIGTTIN: background read from tty"},
+ /* 22 */ {_SigNotify + _SigDefault, "SIGTTOU: background write to tty"},
+ /* 23 */ {_SigNotify, "SIGIO: i/o now possible"},
+ /* 24 */ {_SigNotify, "SIGXCPU: cpu limit exceeded"},
+ /* 25 */ {_SigNotify, "SIGXFSZ: file size limit exceeded"},
+ /* 26 */ {_SigNotify, "SIGVTALRM: virtual alarm clock"},
+ /* 27 */ {_SigNotify, "SIGPROF: profiling alarm clock"},
+ /* 28 */ {_SigNotify, "SIGWINCH: window size change"},
+ /* 29 */ {_SigNotify, "SIGINFO: status request from keyboard"},
+ /* 30 */ {_SigNotify, "SIGUSR1: user-defined signal 1"},
+ /* 31 */ {_SigNotify, "SIGUSR2: user-defined signal 2"},
+ /* 32 */ {_SigNotify, "SIGTHR: reserved"},
+}
diff --git a/src/runtime/signal_freebsd_386.go b/src/runtime/signal_freebsd_386.go
new file mode 100644
index 000000000..a0fec1309
--- /dev/null
+++ b/src/runtime/signal_freebsd_386.go
@@ -0,0 +1,34 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *mcontext { return &(*ucontext)(c.ctxt).uc_mcontext }
+func (c *sigctxt) eax() uint32 { return c.regs().mc_eax }
+func (c *sigctxt) ebx() uint32 { return c.regs().mc_ebx }
+func (c *sigctxt) ecx() uint32 { return c.regs().mc_ecx }
+func (c *sigctxt) edx() uint32 { return c.regs().mc_edx }
+func (c *sigctxt) edi() uint32 { return c.regs().mc_edi }
+func (c *sigctxt) esi() uint32 { return c.regs().mc_esi }
+func (c *sigctxt) ebp() uint32 { return c.regs().mc_ebp }
+func (c *sigctxt) esp() uint32 { return c.regs().mc_esp }
+func (c *sigctxt) eip() uint32 { return c.regs().mc_eip }
+func (c *sigctxt) eflags() uint32 { return c.regs().mc_eflags }
+func (c *sigctxt) cs() uint32 { return uint32(c.regs().mc_cs) }
+func (c *sigctxt) fs() uint32 { return uint32(c.regs().mc_fs) }
+func (c *sigctxt) gs() uint32 { return uint32(c.regs().mc_gs) }
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint32 { return uint32(c.info.si_addr) }
+
+func (c *sigctxt) set_eip(x uint32) { c.regs().mc_eip = x }
+func (c *sigctxt) set_esp(x uint32) { c.regs().mc_esp = x }
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint32) { c.info.si_addr = uintptr(x) }
diff --git a/src/runtime/signal_freebsd_386.h b/src/runtime/signal_freebsd_386.h
deleted file mode 100644
index a24f1ee96..000000000
--- a/src/runtime/signal_freebsd_386.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (((Ucontext*)(ctxt))->uc_mcontext)
-
-#define SIG_EAX(info, ctxt) (SIG_REGS(ctxt).mc_eax)
-#define SIG_EBX(info, ctxt) (SIG_REGS(ctxt).mc_ebx)
-#define SIG_ECX(info, ctxt) (SIG_REGS(ctxt).mc_ecx)
-#define SIG_EDX(info, ctxt) (SIG_REGS(ctxt).mc_edx)
-#define SIG_EDI(info, ctxt) (SIG_REGS(ctxt).mc_edi)
-#define SIG_ESI(info, ctxt) (SIG_REGS(ctxt).mc_esi)
-#define SIG_EBP(info, ctxt) (SIG_REGS(ctxt).mc_ebp)
-#define SIG_ESP(info, ctxt) (SIG_REGS(ctxt).mc_esp)
-#define SIG_EIP(info, ctxt) (SIG_REGS(ctxt).mc_eip)
-#define SIG_EFLAGS(info, ctxt) (SIG_REGS(ctxt).mc_eflags)
-
-#define SIG_CS(info, ctxt) (SIG_REGS(ctxt).mc_cs)
-#define SIG_FS(info, ctxt) (SIG_REGS(ctxt).mc_fs)
-#define SIG_GS(info, ctxt) (SIG_REGS(ctxt).mc_gs)
-
-#define SIG_CODE0(info, ctxt) ((info)->si_code)
-#define SIG_CODE1(info, ctxt) ((uintptr)(info)->si_addr)
diff --git a/src/runtime/signal_freebsd_amd64.go b/src/runtime/signal_freebsd_amd64.go
new file mode 100644
index 000000000..d10c883d4
--- /dev/null
+++ b/src/runtime/signal_freebsd_amd64.go
@@ -0,0 +1,44 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *mcontext {
+ return (*mcontext)(unsafe.Pointer(&(*ucontext)(c.ctxt).uc_mcontext))
+}
+func (c *sigctxt) rax() uint64 { return c.regs().mc_rax }
+func (c *sigctxt) rbx() uint64 { return c.regs().mc_rbx }
+func (c *sigctxt) rcx() uint64 { return c.regs().mc_rcx }
+func (c *sigctxt) rdx() uint64 { return c.regs().mc_rdx }
+func (c *sigctxt) rdi() uint64 { return c.regs().mc_rdi }
+func (c *sigctxt) rsi() uint64 { return c.regs().mc_rsi }
+func (c *sigctxt) rbp() uint64 { return c.regs().mc_rbp }
+func (c *sigctxt) rsp() uint64 { return c.regs().mc_rsp }
+func (c *sigctxt) r8() uint64 { return c.regs().mc_r8 }
+func (c *sigctxt) r9() uint64 { return c.regs().mc_r9 }
+func (c *sigctxt) r10() uint64 { return c.regs().mc_r10 }
+func (c *sigctxt) r11() uint64 { return c.regs().mc_r11 }
+func (c *sigctxt) r12() uint64 { return c.regs().mc_r12 }
+func (c *sigctxt) r13() uint64 { return c.regs().mc_r13 }
+func (c *sigctxt) r14() uint64 { return c.regs().mc_r14 }
+func (c *sigctxt) r15() uint64 { return c.regs().mc_r15 }
+func (c *sigctxt) rip() uint64 { return c.regs().mc_rip }
+func (c *sigctxt) rflags() uint64 { return c.regs().mc_rflags }
+func (c *sigctxt) cs() uint64 { return uint64(c.regs().mc_cs) }
+func (c *sigctxt) fs() uint64 { return uint64(c.regs().mc_fs) }
+func (c *sigctxt) gs() uint64 { return uint64(c.regs().mc_gs) }
+func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return uint64(c.info.si_addr) }
+
+func (c *sigctxt) set_rip(x uint64) { c.regs().mc_rip = x }
+func (c *sigctxt) set_rsp(x uint64) { c.regs().mc_rsp = x }
+func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) { c.info.si_addr = x }
diff --git a/src/runtime/signal_freebsd_amd64.h b/src/runtime/signal_freebsd_amd64.h
deleted file mode 100644
index 7d35b7f85..000000000
--- a/src/runtime/signal_freebsd_amd64.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (((Ucontext*)(ctxt))->uc_mcontext)
-
-#define SIG_RAX(info, ctxt) (SIG_REGS(ctxt).mc_rax)
-#define SIG_RBX(info, ctxt) (SIG_REGS(ctxt).mc_rbx)
-#define SIG_RCX(info, ctxt) (SIG_REGS(ctxt).mc_rcx)
-#define SIG_RDX(info, ctxt) (SIG_REGS(ctxt).mc_rdx)
-#define SIG_RDI(info, ctxt) (SIG_REGS(ctxt).mc_rdi)
-#define SIG_RSI(info, ctxt) (SIG_REGS(ctxt).mc_rsi)
-#define SIG_RBP(info, ctxt) (SIG_REGS(ctxt).mc_rbp)
-#define SIG_RSP(info, ctxt) (SIG_REGS(ctxt).mc_rsp)
-#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).mc_r8)
-#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).mc_r9)
-#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).mc_r10)
-#define SIG_R11(info, ctxt) (SIG_REGS(ctxt).mc_r11)
-#define SIG_R12(info, ctxt) (SIG_REGS(ctxt).mc_r12)
-#define SIG_R13(info, ctxt) (SIG_REGS(ctxt).mc_r13)
-#define SIG_R14(info, ctxt) (SIG_REGS(ctxt).mc_r14)
-#define SIG_R15(info, ctxt) (SIG_REGS(ctxt).mc_r15)
-#define SIG_RIP(info, ctxt) (SIG_REGS(ctxt).mc_rip)
-#define SIG_RFLAGS(info, ctxt) (SIG_REGS(ctxt).mc_rflags)
-
-#define SIG_CS(info, ctxt) (SIG_REGS(ctxt).mc_cs)
-#define SIG_FS(info, ctxt) (SIG_REGS(ctxt).mc_fs)
-#define SIG_GS(info, ctxt) (SIG_REGS(ctxt).mc_gs)
-
-#define SIG_CODE0(info, ctxt) ((info)->si_code)
-#define SIG_CODE1(info, ctxt) ((uintptr)(info)->si_addr)
diff --git a/src/runtime/signal_freebsd_arm.go b/src/runtime/signal_freebsd_arm.go
new file mode 100644
index 000000000..12de23d58
--- /dev/null
+++ b/src/runtime/signal_freebsd_arm.go
@@ -0,0 +1,48 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *mcontext { return &(*ucontext)(c.ctxt).uc_mcontext }
+func (c *sigctxt) r0() uint32 { return c.regs().__gregs[0] }
+func (c *sigctxt) r1() uint32 { return c.regs().__gregs[1] }
+func (c *sigctxt) r2() uint32 { return c.regs().__gregs[2] }
+func (c *sigctxt) r3() uint32 { return c.regs().__gregs[3] }
+func (c *sigctxt) r4() uint32 { return c.regs().__gregs[4] }
+func (c *sigctxt) r5() uint32 { return c.regs().__gregs[5] }
+func (c *sigctxt) r6() uint32 { return c.regs().__gregs[6] }
+func (c *sigctxt) r7() uint32 { return c.regs().__gregs[7] }
+func (c *sigctxt) r8() uint32 { return c.regs().__gregs[8] }
+func (c *sigctxt) r9() uint32 { return c.regs().__gregs[9] }
+func (c *sigctxt) r10() uint32 { return c.regs().__gregs[10] }
+func (c *sigctxt) fp() uint32 { return c.regs().__gregs[11] }
+func (c *sigctxt) ip() uint32 { return c.regs().__gregs[12] }
+func (c *sigctxt) sp() uint32 { return c.regs().__gregs[13] }
+func (c *sigctxt) lr() uint32 { return c.regs().__gregs[14] }
+func (c *sigctxt) pc() uint32 { return c.regs().__gregs[15] }
+func (c *sigctxt) cpsr() uint32 { return c.regs().__gregs[16] }
+func (c *sigctxt) fault() uint32 { return uint32(c.info.si_addr) }
+func (c *sigctxt) trap() uint32 { return 0 }
+func (c *sigctxt) error() uint32 { return 0 }
+func (c *sigctxt) oldmask() uint32 { return 0 }
+
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint32 { return uint32(c.info.si_addr) }
+
+func (c *sigctxt) set_pc(x uint32) { c.regs().__gregs[15] = x }
+func (c *sigctxt) set_sp(x uint32) { c.regs().__gregs[13] = x }
+func (c *sigctxt) set_lr(x uint32) { c.regs().__gregs[14] = x }
+func (c *sigctxt) set_r10(x uint32) { c.regs().__gregs[10] = x }
+
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint32) {
+ c.info.si_addr = uintptr(x)
+}
diff --git a/src/runtime/signal_freebsd_arm.h b/src/runtime/signal_freebsd_arm.h
deleted file mode 100644
index 87a45aa27..000000000
--- a/src/runtime/signal_freebsd_arm.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (((Ucontext*)(ctxt))->uc_mcontext)
-
-#define SIG_R0(info, ctxt) (SIG_REGS(ctxt).__gregs[0])
-#define SIG_R1(info, ctxt) (SIG_REGS(ctxt).__gregs[1])
-#define SIG_R2(info, ctxt) (SIG_REGS(ctxt).__gregs[2])
-#define SIG_R3(info, ctxt) (SIG_REGS(ctxt).__gregs[3])
-#define SIG_R4(info, ctxt) (SIG_REGS(ctxt).__gregs[4])
-#define SIG_R5(info, ctxt) (SIG_REGS(ctxt).__gregs[5])
-#define SIG_R6(info, ctxt) (SIG_REGS(ctxt).__gregs[6])
-#define SIG_R7(info, ctxt) (SIG_REGS(ctxt).__gregs[7])
-#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).__gregs[8])
-#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).__gregs[9])
-#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).__gregs[10])
-#define SIG_FP(info, ctxt) (SIG_REGS(ctxt).__gregs[11])
-#define SIG_IP(info, ctxt) (SIG_REGS(ctxt).__gregs[12])
-#define SIG_SP(info, ctxt) (SIG_REGS(ctxt).__gregs[13])
-#define SIG_LR(info, ctxt) (SIG_REGS(ctxt).__gregs[14])
-#define SIG_PC(info, ctxt) (SIG_REGS(ctxt).__gregs[15])
-#define SIG_CPSR(info, ctxt) (SIG_REGS(ctxt).__gregs[16])
-#define SIG_FAULT(info, ctxt) ((uintptr)(info)->si_addr)
-#define SIG_TRAP(info, ctxt) (0)
-#define SIG_ERROR(info, ctxt) (0)
-#define SIG_OLDMASK(info, ctxt) (0)
-#define SIG_CODE0(info, ctxt) ((uintptr)(info)->si_code)
diff --git a/src/runtime/signal_linux.go b/src/runtime/signal_linux.go
new file mode 100644
index 000000000..1c3d6872b
--- /dev/null
+++ b/src/runtime/signal_linux.go
@@ -0,0 +1,78 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+type sigTabT struct {
+ flags int32
+ name string
+}
+
+var sigtable = [...]sigTabT{
+ /* 0 */ {0, "SIGNONE: no trap"},
+ /* 1 */ {_SigNotify + _SigKill, "SIGHUP: terminal line hangup"},
+ /* 2 */ {_SigNotify + _SigKill, "SIGINT: interrupt"},
+ /* 3 */ {_SigNotify + _SigThrow, "SIGQUIT: quit"},
+ /* 4 */ {_SigThrow, "SIGILL: illegal instruction"},
+ /* 5 */ {_SigThrow, "SIGTRAP: trace trap"},
+ /* 6 */ {_SigNotify + _SigThrow, "SIGABRT: abort"},
+ /* 7 */ {_SigPanic, "SIGBUS: bus error"},
+ /* 8 */ {_SigPanic, "SIGFPE: floating-point exception"},
+ /* 9 */ {0, "SIGKILL: kill"},
+ /* 10 */ {_SigNotify, "SIGUSR1: user-defined signal 1"},
+ /* 11 */ {_SigPanic, "SIGSEGV: segmentation violation"},
+ /* 12 */ {_SigNotify, "SIGUSR2: user-defined signal 2"},
+ /* 13 */ {_SigNotify, "SIGPIPE: write to broken pipe"},
+ /* 14 */ {_SigNotify, "SIGALRM: alarm clock"},
+ /* 15 */ {_SigNotify + _SigKill, "SIGTERM: termination"},
+ /* 16 */ {_SigThrow, "SIGSTKFLT: stack fault"},
+ /* 17 */ {_SigNotify, "SIGCHLD: child status has changed"},
+ /* 18 */ {0, "SIGCONT: continue"},
+ /* 19 */ {0, "SIGSTOP: stop, unblockable"},
+ /* 20 */ {_SigNotify + _SigDefault, "SIGTSTP: keyboard stop"},
+ /* 21 */ {_SigNotify + _SigDefault, "SIGTTIN: background read from tty"},
+ /* 22 */ {_SigNotify + _SigDefault, "SIGTTOU: background write to tty"},
+ /* 23 */ {_SigNotify, "SIGURG: urgent condition on socket"},
+ /* 24 */ {_SigNotify, "SIGXCPU: cpu limit exceeded"},
+ /* 25 */ {_SigNotify, "SIGXFSZ: file size limit exceeded"},
+ /* 26 */ {_SigNotify, "SIGVTALRM: virtual alarm clock"},
+ /* 27 */ {_SigNotify, "SIGPROF: profiling alarm clock"},
+ /* 28 */ {_SigNotify, "SIGWINCH: window size change"},
+ /* 29 */ {_SigNotify, "SIGIO: i/o now possible"},
+ /* 30 */ {_SigNotify, "SIGPWR: power failure restart"},
+ /* 31 */ {_SigNotify, "SIGSYS: bad system call"},
+ /* 32 */ {0, "signal 32"}, /* SIGCANCEL; see issue 6997 */
+ /* 33 */ {0, "signal 33"}, /* SIGSETXID; see issue 3871 */
+ /* 34 */ {_SigNotify, "signal 34"},
+ /* 35 */ {_SigNotify, "signal 35"},
+ /* 36 */ {_SigNotify, "signal 36"},
+ /* 37 */ {_SigNotify, "signal 37"},
+ /* 38 */ {_SigNotify, "signal 38"},
+ /* 39 */ {_SigNotify, "signal 39"},
+ /* 40 */ {_SigNotify, "signal 40"},
+ /* 41 */ {_SigNotify, "signal 41"},
+ /* 42 */ {_SigNotify, "signal 42"},
+ /* 43 */ {_SigNotify, "signal 43"},
+ /* 44 */ {_SigNotify, "signal 44"},
+ /* 45 */ {_SigNotify, "signal 45"},
+ /* 46 */ {_SigNotify, "signal 46"},
+ /* 47 */ {_SigNotify, "signal 47"},
+ /* 48 */ {_SigNotify, "signal 48"},
+ /* 49 */ {_SigNotify, "signal 49"},
+ /* 50 */ {_SigNotify, "signal 50"},
+ /* 51 */ {_SigNotify, "signal 51"},
+ /* 52 */ {_SigNotify, "signal 52"},
+ /* 53 */ {_SigNotify, "signal 53"},
+ /* 54 */ {_SigNotify, "signal 54"},
+ /* 55 */ {_SigNotify, "signal 55"},
+ /* 56 */ {_SigNotify, "signal 56"},
+ /* 57 */ {_SigNotify, "signal 57"},
+ /* 58 */ {_SigNotify, "signal 58"},
+ /* 59 */ {_SigNotify, "signal 59"},
+ /* 60 */ {_SigNotify, "signal 60"},
+ /* 61 */ {_SigNotify, "signal 61"},
+ /* 62 */ {_SigNotify, "signal 62"},
+ /* 63 */ {_SigNotify, "signal 63"},
+ /* 64 */ {_SigNotify, "signal 64"},
+}
diff --git a/src/runtime/signal_linux_386.go b/src/runtime/signal_linux_386.go
new file mode 100644
index 000000000..41eae80ea
--- /dev/null
+++ b/src/runtime/signal_linux_386.go
@@ -0,0 +1,36 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *sigcontext { return &(*ucontext)(c.ctxt).uc_mcontext }
+func (c *sigctxt) eax() uint32 { return c.regs().eax }
+func (c *sigctxt) ebx() uint32 { return c.regs().ebx }
+func (c *sigctxt) ecx() uint32 { return c.regs().ecx }
+func (c *sigctxt) edx() uint32 { return c.regs().edx }
+func (c *sigctxt) edi() uint32 { return c.regs().edi }
+func (c *sigctxt) esi() uint32 { return c.regs().esi }
+func (c *sigctxt) ebp() uint32 { return c.regs().ebp }
+func (c *sigctxt) esp() uint32 { return c.regs().esp }
+func (c *sigctxt) eip() uint32 { return c.regs().eip }
+func (c *sigctxt) eflags() uint32 { return c.regs().eflags }
+func (c *sigctxt) cs() uint32 { return uint32(c.regs().cs) }
+func (c *sigctxt) fs() uint32 { return uint32(c.regs().fs) }
+func (c *sigctxt) gs() uint32 { return uint32(c.regs().gs) }
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint32 { return uint32(*(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize))) }
+
+func (c *sigctxt) set_eip(x uint32) { c.regs().eip = x }
+func (c *sigctxt) set_esp(x uint32) { c.regs().esp = x }
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint32) {
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+}
diff --git a/src/runtime/signal_linux_386.h b/src/runtime/signal_linux_386.h
deleted file mode 100644
index f77f1c9d5..000000000
--- a/src/runtime/signal_linux_386.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (*((Sigcontext*)&((Ucontext*)(ctxt))->uc_mcontext))
-
-#define SIG_EAX(info, ctxt) (SIG_REGS(ctxt).eax)
-#define SIG_EBX(info, ctxt) (SIG_REGS(ctxt).ebx)
-#define SIG_ECX(info, ctxt) (SIG_REGS(ctxt).ecx)
-#define SIG_EDX(info, ctxt) (SIG_REGS(ctxt).edx)
-#define SIG_EDI(info, ctxt) (SIG_REGS(ctxt).edi)
-#define SIG_ESI(info, ctxt) (SIG_REGS(ctxt).esi)
-#define SIG_EBP(info, ctxt) (SIG_REGS(ctxt).ebp)
-#define SIG_ESP(info, ctxt) (SIG_REGS(ctxt).esp)
-#define SIG_EIP(info, ctxt) (SIG_REGS(ctxt).eip)
-#define SIG_EFLAGS(info, ctxt) (SIG_REGS(ctxt).eflags)
-
-#define SIG_CS(info, ctxt) (SIG_REGS(ctxt).cs)
-#define SIG_FS(info, ctxt) (SIG_REGS(ctxt).fs)
-#define SIG_GS(info, ctxt) (SIG_REGS(ctxt).gs)
-
-#define SIG_CODE0(info, ctxt) ((info)->si_code)
-#define SIG_CODE1(info, ctxt) (((uintptr*)(info))[2])
-
diff --git a/src/runtime/signal_linux_amd64.go b/src/runtime/signal_linux_amd64.go
new file mode 100644
index 000000000..d94b19102
--- /dev/null
+++ b/src/runtime/signal_linux_amd64.go
@@ -0,0 +1,46 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *sigcontext {
+ return (*sigcontext)(unsafe.Pointer(&(*ucontext)(c.ctxt).uc_mcontext))
+}
+func (c *sigctxt) rax() uint64 { return c.regs().rax }
+func (c *sigctxt) rbx() uint64 { return c.regs().rbx }
+func (c *sigctxt) rcx() uint64 { return c.regs().rcx }
+func (c *sigctxt) rdx() uint64 { return c.regs().rdx }
+func (c *sigctxt) rdi() uint64 { return c.regs().rdi }
+func (c *sigctxt) rsi() uint64 { return c.regs().rsi }
+func (c *sigctxt) rbp() uint64 { return c.regs().rbp }
+func (c *sigctxt) rsp() uint64 { return c.regs().rsp }
+func (c *sigctxt) r8() uint64 { return c.regs().r8 }
+func (c *sigctxt) r9() uint64 { return c.regs().r9 }
+func (c *sigctxt) r10() uint64 { return c.regs().r10 }
+func (c *sigctxt) r11() uint64 { return c.regs().r11 }
+func (c *sigctxt) r12() uint64 { return c.regs().r12 }
+func (c *sigctxt) r13() uint64 { return c.regs().r13 }
+func (c *sigctxt) r14() uint64 { return c.regs().r14 }
+func (c *sigctxt) r15() uint64 { return c.regs().r15 }
+func (c *sigctxt) rip() uint64 { return c.regs().rip }
+func (c *sigctxt) rflags() uint64 { return c.regs().eflags }
+func (c *sigctxt) cs() uint64 { return uint64(c.regs().cs) }
+func (c *sigctxt) fs() uint64 { return uint64(c.regs().fs) }
+func (c *sigctxt) gs() uint64 { return uint64(c.regs().gs) }
+func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return uint64(*(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize))) }
+
+func (c *sigctxt) set_rip(x uint64) { c.regs().rip = x }
+func (c *sigctxt) set_rsp(x uint64) { c.regs().rsp = x }
+func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) {
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+}
diff --git a/src/runtime/signal_linux_amd64.h b/src/runtime/signal_linux_amd64.h
deleted file mode 100644
index 5a9a3e5da..000000000
--- a/src/runtime/signal_linux_amd64.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (*((Sigcontext*)&((Ucontext*)(ctxt))->uc_mcontext))
-
-#define SIG_RAX(info, ctxt) (SIG_REGS(ctxt).rax)
-#define SIG_RBX(info, ctxt) (SIG_REGS(ctxt).rbx)
-#define SIG_RCX(info, ctxt) (SIG_REGS(ctxt).rcx)
-#define SIG_RDX(info, ctxt) (SIG_REGS(ctxt).rdx)
-#define SIG_RDI(info, ctxt) (SIG_REGS(ctxt).rdi)
-#define SIG_RSI(info, ctxt) (SIG_REGS(ctxt).rsi)
-#define SIG_RBP(info, ctxt) (SIG_REGS(ctxt).rbp)
-#define SIG_RSP(info, ctxt) (SIG_REGS(ctxt).rsp)
-#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).r8)
-#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).r9)
-#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).r10)
-#define SIG_R11(info, ctxt) (SIG_REGS(ctxt).r11)
-#define SIG_R12(info, ctxt) (SIG_REGS(ctxt).r12)
-#define SIG_R13(info, ctxt) (SIG_REGS(ctxt).r13)
-#define SIG_R14(info, ctxt) (SIG_REGS(ctxt).r14)
-#define SIG_R15(info, ctxt) (SIG_REGS(ctxt).r15)
-#define SIG_RIP(info, ctxt) (SIG_REGS(ctxt).rip)
-#define SIG_RFLAGS(info, ctxt) ((uint64)SIG_REGS(ctxt).eflags)
-
-#define SIG_CS(info, ctxt) ((uint64)SIG_REGS(ctxt).cs)
-#define SIG_FS(info, ctxt) ((uint64)SIG_REGS(ctxt).fs)
-#define SIG_GS(info, ctxt) ((uint64)SIG_REGS(ctxt).gs)
-
-#define SIG_CODE0(info, ctxt) ((info)->si_code)
-#define SIG_CODE1(info, ctxt) (((uintptr*)(info))[2])
-
diff --git a/src/runtime/signal_linux_arm.go b/src/runtime/signal_linux_arm.go
new file mode 100644
index 000000000..4a5670e74
--- /dev/null
+++ b/src/runtime/signal_linux_arm.go
@@ -0,0 +1,48 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *sigcontext { return &(*ucontext)(c.ctxt).uc_mcontext }
+func (c *sigctxt) r0() uint32 { return c.regs().r0 }
+func (c *sigctxt) r1() uint32 { return c.regs().r1 }
+func (c *sigctxt) r2() uint32 { return c.regs().r2 }
+func (c *sigctxt) r3() uint32 { return c.regs().r3 }
+func (c *sigctxt) r4() uint32 { return c.regs().r4 }
+func (c *sigctxt) r5() uint32 { return c.regs().r5 }
+func (c *sigctxt) r6() uint32 { return c.regs().r6 }
+func (c *sigctxt) r7() uint32 { return c.regs().r7 }
+func (c *sigctxt) r8() uint32 { return c.regs().r8 }
+func (c *sigctxt) r9() uint32 { return c.regs().r9 }
+func (c *sigctxt) r10() uint32 { return c.regs().r10 }
+func (c *sigctxt) fp() uint32 { return c.regs().fp }
+func (c *sigctxt) ip() uint32 { return c.regs().ip }
+func (c *sigctxt) sp() uint32 { return c.regs().sp }
+func (c *sigctxt) lr() uint32 { return c.regs().lr }
+func (c *sigctxt) pc() uint32 { return c.regs().pc }
+func (c *sigctxt) cpsr() uint32 { return c.regs().cpsr }
+func (c *sigctxt) fault() uint32 { return c.regs().fault_address }
+func (c *sigctxt) trap() uint32 { return c.regs().trap_no }
+func (c *sigctxt) error() uint32 { return c.regs().error_code }
+func (c *sigctxt) oldmask() uint32 { return c.regs().oldmask }
+
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint32 { return uint32(*(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize))) }
+
+func (c *sigctxt) set_pc(x uint32) { c.regs().pc = x }
+func (c *sigctxt) set_sp(x uint32) { c.regs().sp = x }
+func (c *sigctxt) set_lr(x uint32) { c.regs().lr = x }
+func (c *sigctxt) set_r10(x uint32) { c.regs().r10 = x }
+
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint32) {
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*ptrSize)) = uintptr(x)
+}
diff --git a/src/runtime/signal_linux_arm.h b/src/runtime/signal_linux_arm.h
deleted file mode 100644
index a674c0d57..000000000
--- a/src/runtime/signal_linux_arm.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (*((Sigcontext*)&((Ucontext*)(ctxt))->uc_mcontext))
-
-#define SIG_R0(info, ctxt) (SIG_REGS(ctxt).arm_r0)
-#define SIG_R1(info, ctxt) (SIG_REGS(ctxt).arm_r1)
-#define SIG_R2(info, ctxt) (SIG_REGS(ctxt).arm_r2)
-#define SIG_R3(info, ctxt) (SIG_REGS(ctxt).arm_r3)
-#define SIG_R4(info, ctxt) (SIG_REGS(ctxt).arm_r4)
-#define SIG_R5(info, ctxt) (SIG_REGS(ctxt).arm_r5)
-#define SIG_R6(info, ctxt) (SIG_REGS(ctxt).arm_r6)
-#define SIG_R7(info, ctxt) (SIG_REGS(ctxt).arm_r7)
-#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).arm_r8)
-#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).arm_r9)
-#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).arm_r10)
-#define SIG_FP(info, ctxt) (SIG_REGS(ctxt).arm_fp)
-#define SIG_IP(info, ctxt) (SIG_REGS(ctxt).arm_ip)
-#define SIG_SP(info, ctxt) (SIG_REGS(ctxt).arm_sp)
-#define SIG_LR(info, ctxt) (SIG_REGS(ctxt).arm_lr)
-#define SIG_PC(info, ctxt) (SIG_REGS(ctxt).arm_pc)
-#define SIG_CPSR(info, ctxt) (SIG_REGS(ctxt).arm_cpsr)
-#define SIG_FAULT(info, ctxt) (SIG_REGS(ctxt).fault_address)
-#define SIG_TRAP(info, ctxt) (SIG_REGS(ctxt).trap_no)
-#define SIG_ERROR(info, ctxt) (SIG_REGS(ctxt).error_code)
-#define SIG_OLDMASK(info, ctxt) (SIG_REGS(ctxt).oldmask)
-#define SIG_CODE0(info, ctxt) ((uintptr)(info)->si_code)
diff --git a/src/runtime/signal_openbsd.go b/src/runtime/signal_openbsd.go
new file mode 100644
index 000000000..78afc59ef
--- /dev/null
+++ b/src/runtime/signal_openbsd.go
@@ -0,0 +1,46 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+type sigTabT struct {
+ flags int32
+ name string
+}
+
+var sigtable = [...]sigTabT{
+ /* 0 */ {0, "SIGNONE: no trap"},
+ /* 1 */ {_SigNotify + _SigKill, "SIGHUP: terminal line hangup"},
+ /* 2 */ {_SigNotify + _SigKill, "SIGINT: interrupt"},
+ /* 3 */ {_SigNotify + _SigThrow, "SIGQUIT: quit"},
+ /* 4 */ {_SigThrow, "SIGILL: illegal instruction"},
+ /* 5 */ {_SigThrow, "SIGTRAP: trace trap"},
+ /* 6 */ {_SigNotify + _SigThrow, "SIGABRT: abort"},
+ /* 7 */ {_SigThrow, "SIGEMT: emulate instruction executed"},
+ /* 8 */ {_SigPanic, "SIGFPE: floating-point exception"},
+ /* 9 */ {0, "SIGKILL: kill"},
+ /* 10 */ {_SigPanic, "SIGBUS: bus error"},
+ /* 11 */ {_SigPanic, "SIGSEGV: segmentation violation"},
+ /* 12 */ {_SigThrow, "SIGSYS: bad system call"},
+ /* 13 */ {_SigNotify, "SIGPIPE: write to broken pipe"},
+ /* 14 */ {_SigNotify, "SIGALRM: alarm clock"},
+ /* 15 */ {_SigNotify + _SigKill, "SIGTERM: termination"},
+ /* 16 */ {_SigNotify, "SIGURG: urgent condition on socket"},
+ /* 17 */ {0, "SIGSTOP: stop"},
+ /* 18 */ {_SigNotify + _SigDefault, "SIGTSTP: keyboard stop"},
+ /* 19 */ {0, "SIGCONT: continue after stop"},
+ /* 20 */ {_SigNotify, "SIGCHLD: child status has changed"},
+ /* 21 */ {_SigNotify + _SigDefault, "SIGTTIN: background read from tty"},
+ /* 22 */ {_SigNotify + _SigDefault, "SIGTTOU: background write to tty"},
+ /* 23 */ {_SigNotify, "SIGIO: i/o now possible"},
+ /* 24 */ {_SigNotify, "SIGXCPU: cpu limit exceeded"},
+ /* 25 */ {_SigNotify, "SIGXFSZ: file size limit exceeded"},
+ /* 26 */ {_SigNotify, "SIGVTALRM: virtual alarm clock"},
+ /* 27 */ {_SigNotify, "SIGPROF: profiling alarm clock"},
+ /* 28 */ {_SigNotify, "SIGWINCH: window size change"},
+ /* 29 */ {_SigNotify, "SIGINFO: status request from keyboard"},
+ /* 30 */ {_SigNotify, "SIGUSR1: user-defined signal 1"},
+ /* 31 */ {_SigNotify, "SIGUSR2: user-defined signal 2"},
+ /* 32 */ {_SigNotify, "SIGTHR: reserved"},
+}
diff --git a/src/runtime/signal_openbsd_386.go b/src/runtime/signal_openbsd_386.go
new file mode 100644
index 000000000..c582a4493
--- /dev/null
+++ b/src/runtime/signal_openbsd_386.go
@@ -0,0 +1,41 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *sigcontext {
+ return (*sigcontext)(c.ctxt)
+}
+
+func (c *sigctxt) eax() uint32 { return c.regs().sc_eax }
+func (c *sigctxt) ebx() uint32 { return c.regs().sc_ebx }
+func (c *sigctxt) ecx() uint32 { return c.regs().sc_ecx }
+func (c *sigctxt) edx() uint32 { return c.regs().sc_edx }
+func (c *sigctxt) edi() uint32 { return c.regs().sc_edi }
+func (c *sigctxt) esi() uint32 { return c.regs().sc_esi }
+func (c *sigctxt) ebp() uint32 { return c.regs().sc_ebp }
+func (c *sigctxt) esp() uint32 { return c.regs().sc_esp }
+func (c *sigctxt) eip() uint32 { return c.regs().sc_eip }
+func (c *sigctxt) eflags() uint32 { return c.regs().sc_eflags }
+func (c *sigctxt) cs() uint32 { return c.regs().sc_cs }
+func (c *sigctxt) fs() uint32 { return c.regs().sc_fs }
+func (c *sigctxt) gs() uint32 { return c.regs().sc_gs }
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint32 {
+ return *(*uint32)(add(unsafe.Pointer(c.info), 12))
+}
+
+func (c *sigctxt) set_eip(x uint32) { c.regs().sc_eip = x }
+func (c *sigctxt) set_esp(x uint32) { c.regs().sc_esp = x }
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint32) {
+ *(*uint32)(add(unsafe.Pointer(c.info), 12)) = x
+}
diff --git a/src/runtime/signal_openbsd_386.h b/src/runtime/signal_openbsd_386.h
deleted file mode 100644
index 6742db8d4..000000000
--- a/src/runtime/signal_openbsd_386.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (*(Sigcontext*)(ctxt))
-
-#define SIG_EAX(info, ctxt) (SIG_REGS(ctxt).sc_eax)
-#define SIG_EBX(info, ctxt) (SIG_REGS(ctxt).sc_ebx)
-#define SIG_ECX(info, ctxt) (SIG_REGS(ctxt).sc_ecx)
-#define SIG_EDX(info, ctxt) (SIG_REGS(ctxt).sc_edx)
-#define SIG_EDI(info, ctxt) (SIG_REGS(ctxt).sc_edi)
-#define SIG_ESI(info, ctxt) (SIG_REGS(ctxt).sc_esi)
-#define SIG_EBP(info, ctxt) (SIG_REGS(ctxt).sc_ebp)
-#define SIG_ESP(info, ctxt) (SIG_REGS(ctxt).sc_esp)
-#define SIG_EIP(info, ctxt) (SIG_REGS(ctxt).sc_eip)
-#define SIG_EFLAGS(info, ctxt) (SIG_REGS(ctxt).sc_eflags)
-
-#define SIG_CS(info, ctxt) (SIG_REGS(ctxt).sc_cs)
-#define SIG_FS(info, ctxt) (SIG_REGS(ctxt).sc_fs)
-#define SIG_GS(info, ctxt) (SIG_REGS(ctxt).sc_gs)
-
-#define SIG_CODE0(info, ctxt) ((info)->si_code)
-#define SIG_CODE1(info, ctxt) (*(uintptr*)((byte*)info + 12))
diff --git a/src/runtime/signal_openbsd_amd64.go b/src/runtime/signal_openbsd_amd64.go
new file mode 100644
index 000000000..4f0d19ddd
--- /dev/null
+++ b/src/runtime/signal_openbsd_amd64.go
@@ -0,0 +1,49 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *sigcontext {
+ return (*sigcontext)(c.ctxt)
+}
+
+func (c *sigctxt) rax() uint64 { return c.regs().sc_rax }
+func (c *sigctxt) rbx() uint64 { return c.regs().sc_rbx }
+func (c *sigctxt) rcx() uint64 { return c.regs().sc_rcx }
+func (c *sigctxt) rdx() uint64 { return c.regs().sc_rdx }
+func (c *sigctxt) rdi() uint64 { return c.regs().sc_rdi }
+func (c *sigctxt) rsi() uint64 { return c.regs().sc_rsi }
+func (c *sigctxt) rbp() uint64 { return c.regs().sc_rbp }
+func (c *sigctxt) rsp() uint64 { return c.regs().sc_rsp }
+func (c *sigctxt) r8() uint64 { return c.regs().sc_r8 }
+func (c *sigctxt) r9() uint64 { return c.regs().sc_r9 }
+func (c *sigctxt) r10() uint64 { return c.regs().sc_r10 }
+func (c *sigctxt) r11() uint64 { return c.regs().sc_r11 }
+func (c *sigctxt) r12() uint64 { return c.regs().sc_r12 }
+func (c *sigctxt) r13() uint64 { return c.regs().sc_r13 }
+func (c *sigctxt) r14() uint64 { return c.regs().sc_r14 }
+func (c *sigctxt) r15() uint64 { return c.regs().sc_r15 }
+func (c *sigctxt) rip() uint64 { return c.regs().sc_rip }
+func (c *sigctxt) rflags() uint64 { return c.regs().sc_rflags }
+func (c *sigctxt) cs() uint64 { return c.regs().sc_cs }
+func (c *sigctxt) fs() uint64 { return c.regs().sc_fs }
+func (c *sigctxt) gs() uint64 { return c.regs().sc_gs }
+func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 {
+ return *(*uint64)(add(unsafe.Pointer(c.info), 16))
+}
+
+func (c *sigctxt) set_rip(x uint64) { c.regs().sc_rip = x }
+func (c *sigctxt) set_rsp(x uint64) { c.regs().sc_rsp = x }
+func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) {
+ *(*uint64)(add(unsafe.Pointer(c.info), 16)) = x
+}
diff --git a/src/runtime/signal_openbsd_amd64.h b/src/runtime/signal_openbsd_amd64.h
deleted file mode 100644
index b46a5dfa6..000000000
--- a/src/runtime/signal_openbsd_amd64.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (*(Sigcontext*)(ctxt))
-
-#define SIG_RAX(info, ctxt) (SIG_REGS(ctxt).sc_rax)
-#define SIG_RBX(info, ctxt) (SIG_REGS(ctxt).sc_rbx)
-#define SIG_RCX(info, ctxt) (SIG_REGS(ctxt).sc_rcx)
-#define SIG_RDX(info, ctxt) (SIG_REGS(ctxt).sc_rdx)
-#define SIG_RDI(info, ctxt) (SIG_REGS(ctxt).sc_rdi)
-#define SIG_RSI(info, ctxt) (SIG_REGS(ctxt).sc_rsi)
-#define SIG_RBP(info, ctxt) (SIG_REGS(ctxt).sc_rbp)
-#define SIG_RSP(info, ctxt) (SIG_REGS(ctxt).sc_rsp)
-#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).sc_r8)
-#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).sc_r9)
-#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).sc_r10)
-#define SIG_R11(info, ctxt) (SIG_REGS(ctxt).sc_r11)
-#define SIG_R12(info, ctxt) (SIG_REGS(ctxt).sc_r12)
-#define SIG_R13(info, ctxt) (SIG_REGS(ctxt).sc_r13)
-#define SIG_R14(info, ctxt) (SIG_REGS(ctxt).sc_r14)
-#define SIG_R15(info, ctxt) (SIG_REGS(ctxt).sc_r15)
-#define SIG_RIP(info, ctxt) (SIG_REGS(ctxt).sc_rip)
-#define SIG_RFLAGS(info, ctxt) (SIG_REGS(ctxt).sc_rflags)
-
-#define SIG_CS(info, ctxt) (SIG_REGS(ctxt).sc_cs)
-#define SIG_FS(info, ctxt) (SIG_REGS(ctxt).sc_fs)
-#define SIG_GS(info, ctxt) (SIG_REGS(ctxt).sc_gs)
-
-#define SIG_CODE0(info, ctxt) ((info)->si_code)
-#define SIG_CODE1(info, ctxt) (*(uintptr*)((byte*)(info) + 16))
diff --git a/src/runtime/signal_solaris.go b/src/runtime/signal_solaris.go
new file mode 100644
index 000000000..2986c5aab
--- /dev/null
+++ b/src/runtime/signal_solaris.go
@@ -0,0 +1,88 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+type sigTabT struct {
+ flags int32
+ name string
+}
+
+var sigtable = [...]sigTabT{
+ /* 0 */ {0, "SIGNONE: no trap"},
+ /* 1 */ {_SigNotify + _SigKill, "SIGHUP: hangup"},
+ /* 2 */ {_SigNotify + _SigKill, "SIGINT: interrupt (rubout)"},
+ /* 3 */ {_SigNotify + _SigThrow, "SIGQUIT: quit (ASCII FS)"},
+ /* 4 */ {_SigThrow, "SIGILL: illegal instruction (not reset when caught)"},
+ /* 5 */ {_SigThrow, "SIGTRAP: trace trap (not reset when caught)"},
+ /* 6 */ {_SigNotify + _SigThrow, "SIGABRT: used by abort, replace SIGIOT in the future"},
+ /* 7 */ {_SigThrow, "SIGEMT: EMT instruction"},
+ /* 8 */ {_SigPanic, "SIGFPE: floating point exception"},
+ /* 9 */ {0, "SIGKILL: kill (cannot be caught or ignored)"},
+ /* 10 */ {_SigPanic, "SIGBUS: bus error"},
+ /* 11 */ {_SigPanic, "SIGSEGV: segmentation violation"},
+ /* 12 */ {_SigThrow, "SIGSYS: bad argument to system call"},
+ /* 13 */ {_SigNotify, "SIGPIPE: write on a pipe with no one to read it"},
+ /* 14 */ {_SigNotify, "SIGALRM: alarm clock"},
+ /* 15 */ {_SigNotify + _SigKill, "SIGTERM: software termination signal from kill"},
+ /* 16 */ {_SigNotify, "SIGUSR1: user defined signal 1"},
+ /* 17 */ {_SigNotify, "SIGUSR2: user defined signal 2"},
+ /* 18 */ {_SigNotify, "SIGCHLD: child status change alias (POSIX)"},
+ /* 19 */ {_SigNotify, "SIGPWR: power-fail restart"},
+ /* 20 */ {_SigNotify, "SIGWINCH: window size change"},
+ /* 21 */ {_SigNotify, "SIGURG: urgent socket condition"},
+ /* 22 */ {_SigNotify, "SIGPOLL: pollable event occured"},
+ /* 23 */ {_SigNotify + _SigDefault, "SIGSTOP: stop (cannot be caught or ignored)"},
+ /* 24 */ {0, "SIGTSTP: user stop requested from tty"},
+ /* 25 */ {0, "SIGCONT: stopped process has been continued"},
+ /* 26 */ {_SigNotify + _SigDefault, "SIGTTIN: background tty read attempted"},
+ /* 27 */ {_SigNotify + _SigDefault, "SIGTTOU: background tty write attempted"},
+ /* 28 */ {_SigNotify, "SIGVTALRM: virtual timer expired"},
+ /* 29 */ {_SigNotify, "SIGPROF: profiling timer expired"},
+ /* 30 */ {_SigNotify, "SIGXCPU: exceeded cpu limit"},
+ /* 31 */ {_SigNotify, "SIGXFSZ: exceeded file size limit"},
+ /* 32 */ {_SigNotify, "SIGWAITING: reserved signal no longer used by"},
+ /* 33 */ {_SigNotify, "SIGLWP: reserved signal no longer used by"},
+ /* 34 */ {_SigNotify, "SIGFREEZE: special signal used by CPR"},
+ /* 35 */ {_SigNotify, "SIGTHAW: special signal used by CPR"},
+ /* 36 */ {0, "SIGCANCEL: reserved signal for thread cancellation"},
+ /* 37 */ {_SigNotify, "SIGLOST: resource lost (eg, record-lock lost)"},
+ /* 38 */ {_SigNotify, "SIGXRES: resource control exceeded"},
+ /* 39 */ {_SigNotify, "SIGJVM1: reserved signal for Java Virtual Machine"},
+ /* 40 */ {_SigNotify, "SIGJVM2: reserved signal for Java Virtual Machine"},
+
+ /* TODO(aram): what should be do about these signals? _SigDefault or _SigNotify? is this set static? */
+ /* 41 */ {_SigNotify, "real time signal"},
+ /* 42 */ {_SigNotify, "real time signal"},
+ /* 43 */ {_SigNotify, "real time signal"},
+ /* 44 */ {_SigNotify, "real time signal"},
+ /* 45 */ {_SigNotify, "real time signal"},
+ /* 46 */ {_SigNotify, "real time signal"},
+ /* 47 */ {_SigNotify, "real time signal"},
+ /* 48 */ {_SigNotify, "real time signal"},
+ /* 49 */ {_SigNotify, "real time signal"},
+ /* 50 */ {_SigNotify, "real time signal"},
+ /* 51 */ {_SigNotify, "real time signal"},
+ /* 52 */ {_SigNotify, "real time signal"},
+ /* 53 */ {_SigNotify, "real time signal"},
+ /* 54 */ {_SigNotify, "real time signal"},
+ /* 55 */ {_SigNotify, "real time signal"},
+ /* 56 */ {_SigNotify, "real time signal"},
+ /* 57 */ {_SigNotify, "real time signal"},
+ /* 58 */ {_SigNotify, "real time signal"},
+ /* 59 */ {_SigNotify, "real time signal"},
+ /* 60 */ {_SigNotify, "real time signal"},
+ /* 61 */ {_SigNotify, "real time signal"},
+ /* 62 */ {_SigNotify, "real time signal"},
+ /* 63 */ {_SigNotify, "real time signal"},
+ /* 64 */ {_SigNotify, "real time signal"},
+ /* 65 */ {_SigNotify, "real time signal"},
+ /* 66 */ {_SigNotify, "real time signal"},
+ /* 67 */ {_SigNotify, "real time signal"},
+ /* 68 */ {_SigNotify, "real time signal"},
+ /* 69 */ {_SigNotify, "real time signal"},
+ /* 70 */ {_SigNotify, "real time signal"},
+ /* 71 */ {_SigNotify, "real time signal"},
+ /* 72 */ {_SigNotify, "real time signal"},
+}
diff --git a/src/runtime/signal_solaris_amd64.go b/src/runtime/signal_solaris_amd64.go
new file mode 100644
index 000000000..a577c8c19
--- /dev/null
+++ b/src/runtime/signal_solaris_amd64.go
@@ -0,0 +1,46 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *mcontext {
+ return (*mcontext)(unsafe.Pointer(&(*ucontext)(c.ctxt).uc_mcontext))
+}
+func (c *sigctxt) rax() uint64 { return uint64(c.regs().gregs[_REG_RAX]) }
+func (c *sigctxt) rbx() uint64 { return uint64(c.regs().gregs[_REG_RBX]) }
+func (c *sigctxt) rcx() uint64 { return uint64(c.regs().gregs[_REG_RCX]) }
+func (c *sigctxt) rdx() uint64 { return uint64(c.regs().gregs[_REG_RDX]) }
+func (c *sigctxt) rdi() uint64 { return uint64(c.regs().gregs[_REG_RDI]) }
+func (c *sigctxt) rsi() uint64 { return uint64(c.regs().gregs[_REG_RSI]) }
+func (c *sigctxt) rbp() uint64 { return uint64(c.regs().gregs[_REG_RBP]) }
+func (c *sigctxt) rsp() uint64 { return uint64(c.regs().gregs[_REG_RSP]) }
+func (c *sigctxt) r8() uint64 { return uint64(c.regs().gregs[_REG_R8]) }
+func (c *sigctxt) r9() uint64 { return uint64(c.regs().gregs[_REG_R9]) }
+func (c *sigctxt) r10() uint64 { return uint64(c.regs().gregs[_REG_R10]) }
+func (c *sigctxt) r11() uint64 { return uint64(c.regs().gregs[_REG_R11]) }
+func (c *sigctxt) r12() uint64 { return uint64(c.regs().gregs[_REG_R12]) }
+func (c *sigctxt) r13() uint64 { return uint64(c.regs().gregs[_REG_R13]) }
+func (c *sigctxt) r14() uint64 { return uint64(c.regs().gregs[_REG_R14]) }
+func (c *sigctxt) r15() uint64 { return uint64(c.regs().gregs[_REG_R15]) }
+func (c *sigctxt) rip() uint64 { return uint64(c.regs().gregs[_REG_RIP]) }
+func (c *sigctxt) rflags() uint64 { return uint64(c.regs().gregs[_REG_RFLAGS]) }
+func (c *sigctxt) cs() uint64 { return uint64(c.regs().gregs[_REG_CS]) }
+func (c *sigctxt) fs() uint64 { return uint64(c.regs().gregs[_REG_FS]) }
+func (c *sigctxt) gs() uint64 { return uint64(c.regs().gregs[_REG_GS]) }
+func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return *(*uint64)(unsafe.Pointer(&c.info.__data[0])) }
+
+func (c *sigctxt) set_rip(x uint64) { c.regs().gregs[_REG_RIP] = int64(x) }
+func (c *sigctxt) set_rsp(x uint64) { c.regs().gregs[_REG_RSP] = int64(x) }
+func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) {
+ *(*uintptr)(unsafe.Pointer(&c.info.__data[0])) = uintptr(x)
+}
diff --git a/src/runtime/signal_solaris_amd64.h b/src/runtime/signal_solaris_amd64.h
deleted file mode 100644
index c2e0a1549..000000000
--- a/src/runtime/signal_solaris_amd64.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#define SIG_REGS(ctxt) (((Ucontext*)(ctxt))->uc_mcontext)
-
-#define SIG_RAX(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RAX])
-#define SIG_RBX(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RBX])
-#define SIG_RCX(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RCX])
-#define SIG_RDX(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RDX])
-#define SIG_RDI(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RDI])
-#define SIG_RSI(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RSI])
-#define SIG_RBP(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RBP])
-#define SIG_RSP(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RSP])
-#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R8])
-#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R9])
-#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R10])
-#define SIG_R11(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R11])
-#define SIG_R12(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R12])
-#define SIG_R13(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R13])
-#define SIG_R14(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R14])
-#define SIG_R15(info, ctxt) (SIG_REGS(ctxt).gregs[REG_R15])
-#define SIG_RIP(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RIP])
-#define SIG_RFLAGS(info, ctxt) (SIG_REGS(ctxt).gregs[REG_RFLAGS])
-
-#define SIG_CS(info, ctxt) (SIG_REGS(ctxt).gregs[REG_CS])
-#define SIG_FS(info, ctxt) (SIG_REGS(ctxt).gregs[REG_FS])
-#define SIG_GS(info, ctxt) (SIG_REGS(ctxt).gregs[REG_GS])
-
-#define SIG_CODE0(info, ctxt) ((info)->si_code)
-#define SIG_CODE1(info, ctxt) (*(uintptr*)&(info)->__data[0])
diff --git a/src/runtime/signal_unix.c b/src/runtime/signal_unix.c
deleted file mode 100644
index 0e33ece49..000000000
--- a/src/runtime/signal_unix.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
-
-#include "runtime.h"
-#include "defs_GOOS_GOARCH.h"
-#include "os_GOOS.h"
-#include "signal_unix.h"
-
-extern SigTab runtime·sigtab[];
-
-void
-runtime·initsig(void)
-{
- int32 i;
- SigTab *t;
-
- // First call: basic setup.
- for(i = 0; i<NSIG; i++) {
- t = &runtime·sigtab[i];
- if((t->flags == 0) || (t->flags & SigDefault))
- continue;
-
- // For some signals, we respect an inherited SIG_IGN handler
- // rather than insist on installing our own default handler.
- // Even these signals can be fetched using the os/signal package.
- switch(i) {
- case SIGHUP:
- case SIGINT:
- if(runtime·getsig(i) == SIG_IGN) {
- t->flags = SigNotify | SigIgnored;
- continue;
- }
- }
-
- t->flags |= SigHandling;
- runtime·setsig(i, runtime·sighandler, true);
- }
-}
-
-void
-runtime·sigenable(uint32 sig)
-{
- SigTab *t;
-
- if(sig >= NSIG)
- return;
-
- t = &runtime·sigtab[sig];
- if((t->flags & SigNotify) && !(t->flags & SigHandling)) {
- t->flags |= SigHandling;
- if(runtime·getsig(sig) == SIG_IGN)
- t->flags |= SigIgnored;
- runtime·setsig(sig, runtime·sighandler, true);
- }
-}
-
-void
-runtime·sigdisable(uint32 sig)
-{
- SigTab *t;
-
- if(sig >= NSIG)
- return;
-
- t = &runtime·sigtab[sig];
- if((t->flags & SigNotify) && (t->flags & SigHandling)) {
- t->flags &= ~SigHandling;
- if(t->flags & SigIgnored)
- runtime·setsig(sig, SIG_IGN, true);
- else
- runtime·setsig(sig, SIG_DFL, true);
- }
-}
-
-void
-runtime·resetcpuprofiler(int32 hz)
-{
- Itimerval it;
-
- runtime·memclr((byte*)&it, sizeof it);
- if(hz == 0) {
- runtime·setitimer(ITIMER_PROF, &it, nil);
- } else {
- it.it_interval.tv_sec = 0;
- it.it_interval.tv_usec = 1000000 / hz;
- it.it_value = it.it_interval;
- runtime·setitimer(ITIMER_PROF, &it, nil);
- }
- g->m->profilehz = hz;
-}
-
-void
-runtime·sigpipe(void)
-{
- runtime·setsig(SIGPIPE, SIG_DFL, false);
- runtime·raise(SIGPIPE);
-}
-
-void
-runtime·crash(void)
-{
-#ifdef GOOS_darwin
- // OS X core dumps are linear dumps of the mapped memory,
- // from the first virtual byte to the last, with zeros in the gaps.
- // Because of the way we arrange the address space on 64-bit systems,
- // this means the OS X core file will be >128 GB and even on a zippy
- // workstation can take OS X well over an hour to write (uninterruptible).
- // Save users from making that mistake.
- if(sizeof(void*) == 8)
- return;
-#endif
-
- runtime·unblocksignals();
- runtime·setsig(SIGABRT, SIG_DFL, false);
- runtime·raise(SIGABRT);
-}
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index ba77b6e7b..c457083dc 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -6,8 +6,6 @@
package runtime
-func sigpipe()
-
func os_sigpipe() {
- onM(sigpipe)
+ systemstack(sigpipe)
}
diff --git a/src/runtime/signals_darwin.h b/src/runtime/signals_darwin.h
deleted file mode 100644
index 8761e1bd9..000000000
--- a/src/runtime/signals_darwin.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-
-#define N SigNotify
-#define K SigKill
-#define T SigThrow
-#define P SigPanic
-#define D SigDefault
-
-#pragma dataflag NOPTR
-SigTab runtime·sigtab[] = {
- /* 0 */ 0, "SIGNONE: no trap",
- /* 1 */ N+K, "SIGHUP: terminal line hangup",
- /* 2 */ N+K, "SIGINT: interrupt",
- /* 3 */ N+T, "SIGQUIT: quit",
- /* 4 */ T, "SIGILL: illegal instruction",
- /* 5 */ T, "SIGTRAP: trace trap",
- /* 6 */ N+T, "SIGABRT: abort",
- /* 7 */ T, "SIGEMT: emulate instruction executed",
- /* 8 */ P, "SIGFPE: floating-point exception",
- /* 9 */ 0, "SIGKILL: kill",
- /* 10 */ P, "SIGBUS: bus error",
- /* 11 */ P, "SIGSEGV: segmentation violation",
- /* 12 */ T, "SIGSYS: bad system call",
- /* 13 */ N, "SIGPIPE: write to broken pipe",
- /* 14 */ N, "SIGALRM: alarm clock",
- /* 15 */ N+K, "SIGTERM: termination",
- /* 16 */ N, "SIGURG: urgent condition on socket",
- /* 17 */ 0, "SIGSTOP: stop",
- /* 18 */ N+D, "SIGTSTP: keyboard stop",
- /* 19 */ 0, "SIGCONT: continue after stop",
- /* 20 */ N, "SIGCHLD: child status has changed",
- /* 21 */ N+D, "SIGTTIN: background read from tty",
- /* 22 */ N+D, "SIGTTOU: background write to tty",
- /* 23 */ N, "SIGIO: i/o now possible",
- /* 24 */ N, "SIGXCPU: cpu limit exceeded",
- /* 25 */ N, "SIGXFSZ: file size limit exceeded",
- /* 26 */ N, "SIGVTALRM: virtual alarm clock",
- /* 27 */ N, "SIGPROF: profiling alarm clock",
- /* 28 */ N, "SIGWINCH: window size change",
- /* 29 */ N, "SIGINFO: status request from keyboard",
- /* 30 */ N, "SIGUSR1: user-defined signal 1",
- /* 31 */ N, "SIGUSR2: user-defined signal 2",
-};
-
-#undef N
-#undef K
-#undef T
-#undef P
-#undef D
diff --git a/src/runtime/signals_dragonfly.h b/src/runtime/signals_dragonfly.h
deleted file mode 100644
index 07343a766..000000000
--- a/src/runtime/signals_dragonfly.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-
-#define N SigNotify
-#define K SigKill
-#define T SigThrow
-#define P SigPanic
-#define D SigDefault
-
-#pragma dataflag NOPTR
-SigTab runtime·sigtab[] = {
- /* 0 */ 0, "SIGNONE: no trap",
- /* 1 */ N+K, "SIGHUP: terminal line hangup",
- /* 2 */ N+K, "SIGINT: interrupt",
- /* 3 */ N+T, "SIGQUIT: quit",
- /* 4 */ T, "SIGILL: illegal instruction",
- /* 5 */ T, "SIGTRAP: trace trap",
- /* 6 */ N+T, "SIGABRT: abort",
- /* 7 */ T, "SIGEMT: emulate instruction executed",
- /* 8 */ P, "SIGFPE: floating-point exception",
- /* 9 */ 0, "SIGKILL: kill",
- /* 10 */ P, "SIGBUS: bus error",
- /* 11 */ P, "SIGSEGV: segmentation violation",
- /* 12 */ T, "SIGSYS: bad system call",
- /* 13 */ N, "SIGPIPE: write to broken pipe",
- /* 14 */ N, "SIGALRM: alarm clock",
- /* 15 */ N+K, "SIGTERM: termination",
- /* 16 */ N, "SIGURG: urgent condition on socket",
- /* 17 */ 0, "SIGSTOP: stop",
- /* 18 */ N+D, "SIGTSTP: keyboard stop",
- /* 19 */ 0, "SIGCONT: continue after stop",
- /* 20 */ N, "SIGCHLD: child status has changed",
- /* 21 */ N+D, "SIGTTIN: background read from tty",
- /* 22 */ N+D, "SIGTTOU: background write to tty",
- /* 23 */ N, "SIGIO: i/o now possible",
- /* 24 */ N, "SIGXCPU: cpu limit exceeded",
- /* 25 */ N, "SIGXFSZ: file size limit exceeded",
- /* 26 */ N, "SIGVTALRM: virtual alarm clock",
- /* 27 */ N, "SIGPROF: profiling alarm clock",
- /* 28 */ N, "SIGWINCH: window size change",
- /* 29 */ N, "SIGINFO: status request from keyboard",
- /* 30 */ N, "SIGUSR1: user-defined signal 1",
- /* 31 */ N, "SIGUSR2: user-defined signal 2",
- /* 32 */ N, "SIGTHR: reserved",
-};
-
-#undef N
-#undef K
-#undef T
-#undef P
-#undef D
diff --git a/src/runtime/signals_freebsd.h b/src/runtime/signals_freebsd.h
deleted file mode 100644
index 39e0a947e..000000000
--- a/src/runtime/signals_freebsd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-
-#define N SigNotify
-#define K SigKill
-#define T SigThrow
-#define P SigPanic
-#define D SigDefault
-
-#pragma dataflag NOPTR
-SigTab runtime·sigtab[] = {
- /* 0 */ 0, "SIGNONE: no trap",
- /* 1 */ N+K, "SIGHUP: terminal line hangup",
- /* 2 */ N+K, "SIGINT: interrupt",
- /* 3 */ N+T, "SIGQUIT: quit",
- /* 4 */ T, "SIGILL: illegal instruction",
- /* 5 */ T, "SIGTRAP: trace trap",
- /* 6 */ N+T, "SIGABRT: abort",
- /* 7 */ T, "SIGEMT: emulate instruction executed",
- /* 8 */ P, "SIGFPE: floating-point exception",
- /* 9 */ 0, "SIGKILL: kill",
- /* 10 */ P, "SIGBUS: bus error",
- /* 11 */ P, "SIGSEGV: segmentation violation",
- /* 12 */ N, "SIGSYS: bad system call",
- /* 13 */ N, "SIGPIPE: write to broken pipe",
- /* 14 */ N, "SIGALRM: alarm clock",
- /* 15 */ N+K, "SIGTERM: termination",
- /* 16 */ N, "SIGURG: urgent condition on socket",
- /* 17 */ 0, "SIGSTOP: stop",
- /* 18 */ N+D, "SIGTSTP: keyboard stop",
- /* 19 */ 0, "SIGCONT: continue after stop",
- /* 20 */ N, "SIGCHLD: child status has changed",
- /* 21 */ N+D, "SIGTTIN: background read from tty",
- /* 22 */ N+D, "SIGTTOU: background write to tty",
- /* 23 */ N, "SIGIO: i/o now possible",
- /* 24 */ N, "SIGXCPU: cpu limit exceeded",
- /* 25 */ N, "SIGXFSZ: file size limit exceeded",
- /* 26 */ N, "SIGVTALRM: virtual alarm clock",
- /* 27 */ N, "SIGPROF: profiling alarm clock",
- /* 28 */ N, "SIGWINCH: window size change",
- /* 29 */ N, "SIGINFO: status request from keyboard",
- /* 30 */ N, "SIGUSR1: user-defined signal 1",
- /* 31 */ N, "SIGUSR2: user-defined signal 2",
- /* 32 */ N, "SIGTHR: reserved",
-};
-
-#undef N
-#undef K
-#undef T
-#undef P
-#undef D
diff --git a/src/runtime/signals_linux.h b/src/runtime/signals_linux.h
deleted file mode 100644
index 374107609..000000000
--- a/src/runtime/signals_linux.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-
-#define N SigNotify
-#define K SigKill
-#define T SigThrow
-#define P SigPanic
-#define D SigDefault
-
-#pragma dataflag NOPTR
-SigTab runtime·sigtab[] = {
- /* 0 */ 0, "SIGNONE: no trap",
- /* 1 */ N+K, "SIGHUP: terminal line hangup",
- /* 2 */ N+K, "SIGINT: interrupt",
- /* 3 */ N+T, "SIGQUIT: quit",
- /* 4 */ T, "SIGILL: illegal instruction",
- /* 5 */ T, "SIGTRAP: trace trap",
- /* 6 */ N+T, "SIGABRT: abort",
- /* 7 */ P, "SIGBUS: bus error",
- /* 8 */ P, "SIGFPE: floating-point exception",
- /* 9 */ 0, "SIGKILL: kill",
- /* 10 */ N, "SIGUSR1: user-defined signal 1",
- /* 11 */ P, "SIGSEGV: segmentation violation",
- /* 12 */ N, "SIGUSR2: user-defined signal 2",
- /* 13 */ N, "SIGPIPE: write to broken pipe",
- /* 14 */ N, "SIGALRM: alarm clock",
- /* 15 */ N+K, "SIGTERM: termination",
- /* 16 */ T, "SIGSTKFLT: stack fault",
- /* 17 */ N, "SIGCHLD: child status has changed",
- /* 18 */ 0, "SIGCONT: continue",
- /* 19 */ 0, "SIGSTOP: stop, unblockable",
- /* 20 */ N+D, "SIGTSTP: keyboard stop",
- /* 21 */ N+D, "SIGTTIN: background read from tty",
- /* 22 */ N+D, "SIGTTOU: background write to tty",
- /* 23 */ N, "SIGURG: urgent condition on socket",
- /* 24 */ N, "SIGXCPU: cpu limit exceeded",
- /* 25 */ N, "SIGXFSZ: file size limit exceeded",
- /* 26 */ N, "SIGVTALRM: virtual alarm clock",
- /* 27 */ N, "SIGPROF: profiling alarm clock",
- /* 28 */ N, "SIGWINCH: window size change",
- /* 29 */ N, "SIGIO: i/o now possible",
- /* 30 */ N, "SIGPWR: power failure restart",
- /* 31 */ N, "SIGSYS: bad system call",
- /* 32 */ 0, "signal 32", /* SIGCANCEL; see issue 6997 */
- /* 33 */ 0, "signal 33", /* SIGSETXID; see issue 3871 */
- /* 34 */ N, "signal 34",
- /* 35 */ N, "signal 35",
- /* 36 */ N, "signal 36",
- /* 37 */ N, "signal 37",
- /* 38 */ N, "signal 38",
- /* 39 */ N, "signal 39",
- /* 40 */ N, "signal 40",
- /* 41 */ N, "signal 41",
- /* 42 */ N, "signal 42",
- /* 43 */ N, "signal 43",
- /* 44 */ N, "signal 44",
- /* 45 */ N, "signal 45",
- /* 46 */ N, "signal 46",
- /* 47 */ N, "signal 47",
- /* 48 */ N, "signal 48",
- /* 49 */ N, "signal 49",
- /* 50 */ N, "signal 50",
- /* 51 */ N, "signal 51",
- /* 52 */ N, "signal 52",
- /* 53 */ N, "signal 53",
- /* 54 */ N, "signal 54",
- /* 55 */ N, "signal 55",
- /* 56 */ N, "signal 56",
- /* 57 */ N, "signal 57",
- /* 58 */ N, "signal 58",
- /* 59 */ N, "signal 59",
- /* 60 */ N, "signal 60",
- /* 61 */ N, "signal 61",
- /* 62 */ N, "signal 62",
- /* 63 */ N, "signal 63",
- /* 64 */ N, "signal 64",
-};
-
-#undef N
-#undef K
-#undef T
-#undef P
-#undef D
diff --git a/src/runtime/signals_openbsd.h b/src/runtime/signals_openbsd.h
deleted file mode 100644
index 950a2fe62..000000000
--- a/src/runtime/signals_openbsd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-
-#define N SigNotify
-#define K SigKill
-#define T SigThrow
-#define P SigPanic
-#define D SigDefault
-
-#pragma dataflag NOPTR
-SigTab runtime·sigtab[] = {
- /* 0 */ 0, "SIGNONE: no trap",
- /* 1 */ N+K, "SIGHUP: terminal line hangup",
- /* 2 */ N+K, "SIGINT: interrupt",
- /* 3 */ N+T, "SIGQUIT: quit",
- /* 4 */ T, "SIGILL: illegal instruction",
- /* 5 */ T, "SIGTRAP: trace trap",
- /* 6 */ N+T, "SIGABRT: abort",
- /* 7 */ T, "SIGEMT: emulate instruction executed",
- /* 8 */ P, "SIGFPE: floating-point exception",
- /* 9 */ 0, "SIGKILL: kill",
- /* 10 */ P, "SIGBUS: bus error",
- /* 11 */ P, "SIGSEGV: segmentation violation",
- /* 12 */ T, "SIGSYS: bad system call",
- /* 13 */ N, "SIGPIPE: write to broken pipe",
- /* 14 */ N, "SIGALRM: alarm clock",
- /* 15 */ N+K, "SIGTERM: termination",
- /* 16 */ N, "SIGURG: urgent condition on socket",
- /* 17 */ 0, "SIGSTOP: stop",
- /* 18 */ N+D, "SIGTSTP: keyboard stop",
- /* 19 */ 0, "SIGCONT: continue after stop",
- /* 20 */ N, "SIGCHLD: child status has changed",
- /* 21 */ N+D, "SIGTTIN: background read from tty",
- /* 22 */ N+D, "SIGTTOU: background write to tty",
- /* 23 */ N, "SIGIO: i/o now possible",
- /* 24 */ N, "SIGXCPU: cpu limit exceeded",
- /* 25 */ N, "SIGXFSZ: file size limit exceeded",
- /* 26 */ N, "SIGVTALRM: virtual alarm clock",
- /* 27 */ N, "SIGPROF: profiling alarm clock",
- /* 28 */ N, "SIGWINCH: window size change",
- /* 29 */ N, "SIGINFO: status request from keyboard",
- /* 30 */ N, "SIGUSR1: user-defined signal 1",
- /* 31 */ N, "SIGUSR2: user-defined signal 2",
- /* 32 */ N, "SIGTHR: reserved",
-};
-
-#undef N
-#undef K
-#undef T
-#undef P
-#undef D
diff --git a/src/runtime/signals_solaris.h b/src/runtime/signals_solaris.h
deleted file mode 100644
index 1f0a65ea6..000000000
--- a/src/runtime/signals_solaris.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-
-#define N SigNotify
-#define K SigKill
-#define T SigThrow
-#define P SigPanic
-#define D SigDefault
-
-#pragma dataflag NOPTR
-SigTab runtime·sigtab[] = {
- /* 0 */ 0, "SIGNONE: no trap",
- /* 1 */ N+K, "SIGHUP: hangup",
- /* 2 */ N+K, "SIGINT: interrupt (rubout)",
- /* 3 */ N+T, "SIGQUIT: quit (ASCII FS)",
- /* 4 */ T, "SIGILL: illegal instruction (not reset when caught)",
- /* 5 */ T, "SIGTRAP: trace trap (not reset when caught)",
- /* 6 */ N+T, "SIGABRT: used by abort, replace SIGIOT in the future",
- /* 7 */ T, "SIGEMT: EMT instruction",
- /* 8 */ P, "SIGFPE: floating point exception",
- /* 9 */ 0, "SIGKILL: kill (cannot be caught or ignored)",
- /* 10 */ P, "SIGBUS: bus error",
- /* 11 */ P, "SIGSEGV: segmentation violation",
- /* 12 */ T, "SIGSYS: bad argument to system call",
- /* 13 */ N, "SIGPIPE: write on a pipe with no one to read it",
- /* 14 */ N, "SIGALRM: alarm clock",
- /* 15 */ N+K, "SIGTERM: software termination signal from kill",
- /* 16 */ N, "SIGUSR1: user defined signal 1",
- /* 17 */ N, "SIGUSR2: user defined signal 2",
- /* 18 */ N, "SIGCLD: child status change",
- /* 18 */ N, "SIGCHLD: child status change alias (POSIX)",
- /* 19 */ N, "SIGPWR: power-fail restart",
- /* 20 */ N, "SIGWINCH: window size change",
- /* 21 */ N, "SIGURG: urgent socket condition",
- /* 22 */ N, "SIGPOLL: pollable event occured",
- /* 23 */ N+D, "SIGSTOP: stop (cannot be caught or ignored)",
- /* 24 */ 0, "SIGTSTP: user stop requested from tty",
- /* 25 */ 0, "SIGCONT: stopped process has been continued",
- /* 26 */ N+D, "SIGTTIN: background tty read attempted",
- /* 27 */ N+D, "SIGTTOU: background tty write attempted",
- /* 28 */ N, "SIGVTALRM: virtual timer expired",
- /* 29 */ N, "SIGPROF: profiling timer expired",
- /* 30 */ N, "SIGXCPU: exceeded cpu limit",
- /* 31 */ N, "SIGXFSZ: exceeded file size limit",
- /* 32 */ N, "SIGWAITING: reserved signal no longer used by",
- /* 33 */ N, "SIGLWP: reserved signal no longer used by",
- /* 34 */ N, "SIGFREEZE: special signal used by CPR",
- /* 35 */ N, "SIGTHAW: special signal used by CPR",
- /* 36 */ 0, "SIGCANCEL: reserved signal for thread cancellation",
- /* 37 */ N, "SIGLOST: resource lost (eg, record-lock lost)",
- /* 38 */ N, "SIGXRES: resource control exceeded",
- /* 39 */ N, "SIGJVM1: reserved signal for Java Virtual Machine",
- /* 40 */ N, "SIGJVM2: reserved signal for Java Virtual Machine",
-
- /* TODO(aram): what should be do about these signals? D or N? is this set static? */
- /* 41 */ N, "real time signal",
- /* 42 */ N, "real time signal",
- /* 43 */ N, "real time signal",
- /* 44 */ N, "real time signal",
- /* 45 */ N, "real time signal",
- /* 46 */ N, "real time signal",
- /* 47 */ N, "real time signal",
- /* 48 */ N, "real time signal",
- /* 49 */ N, "real time signal",
- /* 50 */ N, "real time signal",
- /* 51 */ N, "real time signal",
- /* 52 */ N, "real time signal",
- /* 53 */ N, "real time signal",
- /* 54 */ N, "real time signal",
- /* 55 */ N, "real time signal",
- /* 56 */ N, "real time signal",
- /* 57 */ N, "real time signal",
- /* 58 */ N, "real time signal",
- /* 59 */ N, "real time signal",
- /* 60 */ N, "real time signal",
- /* 61 */ N, "real time signal",
- /* 62 */ N, "real time signal",
- /* 63 */ N, "real time signal",
- /* 64 */ N, "real time signal",
- /* 65 */ N, "real time signal",
- /* 66 */ N, "real time signal",
- /* 67 */ N, "real time signal",
- /* 68 */ N, "real time signal",
- /* 69 */ N, "real time signal",
- /* 70 */ N, "real time signal",
- /* 71 */ N, "real time signal",
- /* 72 */ N, "real time signal",
-};
-
-#undef N
-#undef K
-#undef T
-#undef P
-#undef D
diff --git a/src/runtime/sigpanic_unix.go b/src/runtime/sigpanic_unix.go
index 68079859b..7bf2c1540 100644
--- a/src/runtime/sigpanic_unix.go
+++ b/src/runtime/sigpanic_unix.go
@@ -6,8 +6,6 @@
package runtime
-func signame(int32) *byte
-
func sigpanic() {
g := getg()
if !canpanic(g) {
@@ -36,5 +34,10 @@ func sigpanic() {
}
panicfloat()
}
- panic(errorString(gostringnocopy(signame(g.sig))))
+
+ if g.sig >= uint32(len(sigtable)) {
+ // can't happen: we looked up g.sig in sigtable to decide to call sigpanic
+ gothrow("unexpected signal value")
+ }
+ panic(errorString(sigtable[g.sig].name))
}
diff --git a/src/runtime/sigqueue.go b/src/runtime/sigqueue.go
index 2d9c24d2d..82ead228f 100644
--- a/src/runtime/sigqueue.go
+++ b/src/runtime/sigqueue.go
@@ -45,7 +45,7 @@ const (
// Called from sighandler to send a signal back out of the signal handling thread.
// Reports whether the signal was sent. If not, the caller typically crashes the program.
-func sigsend(s int32) bool {
+func sigsend(s uint32) bool {
bit := uint32(1) << uint(s&31)
if !sig.inuse || s < 0 || int(s) >= 32*len(sig.wanted) || sig.wanted[s/32]&bit == 0 {
return false
@@ -139,7 +139,7 @@ func signal_enable(s uint32) {
return
}
sig.wanted[s/32] |= 1 << (s & 31)
- sigenable_go(s)
+ sigenable(s)
}
// Must only be called from a single goroutine at a time.
@@ -148,7 +148,7 @@ func signal_disable(s uint32) {
return
}
sig.wanted[s/32] &^= 1 << (s & 31)
- sigdisable_go(s)
+ sigdisable(s)
}
// This runs on a foreign stack, without an m or a g. No stack split.
@@ -156,18 +156,3 @@ func signal_disable(s uint32) {
func badsignal(sig uintptr) {
cgocallback(unsafe.Pointer(funcPC(sigsend)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
}
-
-func sigenable_m()
-func sigdisable_m()
-
-func sigenable_go(s uint32) {
- g := getg()
- g.m.scalararg[0] = uintptr(s)
- onM(sigenable_m)
-}
-
-func sigdisable_go(s uint32) {
- g := getg()
- g.m.scalararg[0] = uintptr(s)
- onM(sigdisable_m)
-}
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index 171087d7f..93cea5cc3 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -22,11 +22,11 @@ func makeslice(t *slicetype, len64 int64, cap64 int64) sliceStruct {
// but since the cap is only being supplied implicitly, saying len is clearer.
// See issue 4085.
len := int(len64)
- if len64 < 0 || int64(len) != len64 || t.elem.size > 0 && uintptr(len) > maxmem/uintptr(t.elem.size) {
+ if len64 < 0 || int64(len) != len64 || t.elem.size > 0 && uintptr(len) > _MaxMem/uintptr(t.elem.size) {
panic(errorString("makeslice: len out of range"))
}
cap := int(cap64)
- if cap < len || int64(cap) != cap64 || t.elem.size > 0 && uintptr(cap) > maxmem/uintptr(t.elem.size) {
+ if cap < len || int64(cap) != cap64 || t.elem.size > 0 && uintptr(cap) > _MaxMem/uintptr(t.elem.size) {
panic(errorString("makeslice: cap out of range"))
}
p := newarray(t.elem, uintptr(cap))
@@ -42,7 +42,7 @@ func growslice(t *slicetype, old sliceStruct, n int64) sliceStruct {
cap64 := int64(old.cap) + n
cap := int(cap64)
- if int64(cap) != cap64 || cap < old.cap || t.elem.size > 0 && uintptr(cap) > maxmem/uintptr(t.elem.size) {
+ if int64(cap) != cap64 || cap < old.cap || t.elem.size > 0 && uintptr(cap) > _MaxMem/uintptr(t.elem.size) {
panic(errorString("growslice: cap out of range"))
}
@@ -72,7 +72,7 @@ func growslice(t *slicetype, old sliceStruct, n int64) sliceStruct {
}
}
- if uintptr(newcap) >= maxmem/uintptr(et.size) {
+ if uintptr(newcap) >= _MaxMem/uintptr(et.size) {
panic(errorString("growslice: cap out of range"))
}
lenmem := uintptr(old.len) * uintptr(et.size)
diff --git a/src/runtime/softfloat64.go b/src/runtime/softfloat64.go
index 4fcf8f269..c157a14e2 100644
--- a/src/runtime/softfloat64.go
+++ b/src/runtime/softfloat64.go
@@ -340,7 +340,7 @@ func f32to64(f uint32) uint64 {
return fpack64(fs64, uint64(fm)<<d, fe, 0)
}
-func fcmp64(f, g uint64) (cmp int, isnan bool) {
+func fcmp64(f, g uint64) (cmp int32, isnan bool) {
fs, fm, _, fi, fn := funpack64(f)
gs, gm, _, gi, gn := funpack64(g)
@@ -486,13 +486,13 @@ again2:
// callable from C
-func fadd64c(f, g uint64, ret *uint64) { *ret = fadd64(f, g) }
-func fsub64c(f, g uint64, ret *uint64) { *ret = fsub64(f, g) }
-func fmul64c(f, g uint64, ret *uint64) { *ret = fmul64(f, g) }
-func fdiv64c(f, g uint64, ret *uint64) { *ret = fdiv64(f, g) }
-func fneg64c(f uint64, ret *uint64) { *ret = fneg64(f) }
-func f32to64c(f uint32, ret *uint64) { *ret = f32to64(f) }
-func f64to32c(f uint64, ret *uint32) { *ret = f64to32(f) }
-func fcmp64c(f, g uint64, ret *int, retnan *bool) { *ret, *retnan = fcmp64(f, g) }
-func fintto64c(val int64, ret *uint64) { *ret = fintto64(val) }
-func f64tointc(f uint64, ret *int64, retok *bool) { *ret, *retok = f64toint(f) }
+func fadd64c(f, g uint64, ret *uint64) { *ret = fadd64(f, g) }
+func fsub64c(f, g uint64, ret *uint64) { *ret = fsub64(f, g) }
+func fmul64c(f, g uint64, ret *uint64) { *ret = fmul64(f, g) }
+func fdiv64c(f, g uint64, ret *uint64) { *ret = fdiv64(f, g) }
+func fneg64c(f uint64, ret *uint64) { *ret = fneg64(f) }
+func f32to64c(f uint32, ret *uint64) { *ret = f32to64(f) }
+func f64to32c(f uint64, ret *uint32) { *ret = f64to32(f) }
+func fcmp64c(f, g uint64, ret *int32, retnan *bool) { *ret, *retnan = fcmp64(f, g) }
+func fintto64c(val int64, ret *uint64) { *ret = fintto64(val) }
+func f64tointc(f uint64, ret *int64, retok *bool) { *ret, *retok = f64toint(f) }
diff --git a/src/runtime/softfloat64_test.go b/src/runtime/softfloat64_test.go
index df63010fb..e10887283 100644
--- a/src/runtime/softfloat64_test.go
+++ b/src/runtime/softfloat64_test.go
@@ -182,7 +182,7 @@ func hwcmp(f, g float64) (cmp int, isnan bool) {
func testcmp(t *testing.T, f, g float64) {
hcmp, hisnan := hwcmp(f, g)
scmp, sisnan := Fcmp64(math.Float64bits(f), math.Float64bits(g))
- if hcmp != scmp || hisnan != sisnan {
+ if int32(hcmp) != scmp || hisnan != sisnan {
err(t, "cmp(%g, %g) = sw %v, %v, hw %v, %v\n", f, g, scmp, sisnan, hcmp, hisnan)
}
}
diff --git a/src/runtime/softfloat_arm.c b/src/runtime/softfloat_arm.c
deleted file mode 100644
index 3f3f33a19..000000000
--- a/src/runtime/softfloat_arm.c
+++ /dev/null
@@ -1,687 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Software floating point interpretaton of ARM 7500 FP instructions.
-// The interpretation is not bit compatible with the 7500.
-// It uses true little-endian doubles, while the 7500 used mixed-endian.
-
-#include "runtime.h"
-#include "textflag.h"
-
-#define CPSR 14
-#define FLAGS_N (1U << 31)
-#define FLAGS_Z (1U << 30)
-#define FLAGS_C (1U << 29)
-#define FLAGS_V (1U << 28)
-
-void runtime·abort(void);
-void runtime·sqrtC(uint64, uint64*);
-
-static uint32 trace = 0;
-
-static void
-fabort(void)
-{
- if (1) {
- runtime·printf("Unsupported floating point instruction\n");
- runtime·abort();
- }
-}
-
-static void
-putf(uint32 reg, uint32 val)
-{
- g->m->freglo[reg] = val;
-}
-
-static void
-putd(uint32 reg, uint64 val)
-{
- g->m->freglo[reg] = (uint32)val;
- g->m->freghi[reg] = (uint32)(val>>32);
-}
-
-static uint64
-getd(uint32 reg)
-{
- return (uint64)g->m->freglo[reg] | ((uint64)g->m->freghi[reg]<<32);
-}
-
-static void
-fprint(void)
-{
- uint32 i;
- for (i = 0; i < 16; i++) {
- runtime·printf("\tf%d:\t%X %X\n", i, g->m->freghi[i], g->m->freglo[i]);
- }
-}
-
-static uint32
-d2f(uint64 d)
-{
- uint32 x;
-
- runtime·f64to32c(d, &x);
- return x;
-}
-
-static uint64
-f2d(uint32 f)
-{
- uint64 x;
-
- runtime·f32to64c(f, &x);
- return x;
-}
-
-static uint32
-fstatus(bool nan, int32 cmp)
-{
- if(nan)
- return FLAGS_C | FLAGS_V;
- if(cmp == 0)
- return FLAGS_Z | FLAGS_C;
- if(cmp < 0)
- return FLAGS_N;
- return FLAGS_C;
-}
-
-// conditions array record the required CPSR cond field for the
-// first 5 pairs of conditional execution opcodes
-// higher 4 bits are must set, lower 4 bits are must clear
-#pragma dataflag NOPTR
-static const uint8 conditions[10/2] = {
- [0/2] = (FLAGS_Z >> 24) | 0, // 0: EQ (Z set), 1: NE (Z clear)
- [2/2] = (FLAGS_C >> 24) | 0, // 2: CS/HS (C set), 3: CC/LO (C clear)
- [4/2] = (FLAGS_N >> 24) | 0, // 4: MI (N set), 5: PL (N clear)
- [6/2] = (FLAGS_V >> 24) | 0, // 6: VS (V set), 7: VC (V clear)
- [8/2] = (FLAGS_C >> 24) |
- (FLAGS_Z >> 28), // 8: HI (C set and Z clear), 9: LS (C clear and Z set)
-};
-
-#define FAULT (0x80000000U) // impossible PC offset
-
-// returns number of words that the fp instruction
-// is occupying, 0 if next instruction isn't float.
-static uint32
-stepflt(uint32 *pc, uint32 *regs)
-{
- uint32 i, opc, regd, regm, regn, cpsr;
- int32 delta;
- uint32 *addr;
- uint64 uval;
- int64 sval;
- bool nan, ok;
- int32 cmp;
- M *m;
-
- // m is locked in vlop_arm.s, so g->m cannot change during this function call,
- // so caching it in a local variable is safe.
- m = g->m;
- i = *pc;
-
- if(trace)
- runtime·printf("stepflt %p %x (cpsr %x)\n", pc, i, regs[CPSR] >> 28);
-
- opc = i >> 28;
- if(opc == 14) // common case first
- goto execute;
- cpsr = regs[CPSR] >> 28;
- switch(opc) {
- case 0: case 1: case 2: case 3: case 4:
- case 5: case 6: case 7: case 8: case 9:
- if(((cpsr & (conditions[opc/2] >> 4)) == (conditions[opc/2] >> 4)) &&
- ((cpsr & (conditions[opc/2] & 0xf)) == 0)) {
- if(opc & 1) return 1;
- } else {
- if(!(opc & 1)) return 1;
- }
- break;
- case 10: // GE (N == V)
- case 11: // LT (N != V)
- if((cpsr & (FLAGS_N >> 28)) == (cpsr & (FLAGS_V >> 28))) {
- if(opc & 1) return 1;
- } else {
- if(!(opc & 1)) return 1;
- }
- break;
- case 12: // GT (N == V and Z == 0)
- case 13: // LE (N != V or Z == 1)
- if((cpsr & (FLAGS_N >> 28)) == (cpsr & (FLAGS_V >> 28)) &&
- (cpsr & (FLAGS_Z >> 28)) == 0) {
- if(opc & 1) return 1;
- } else {
- if(!(opc & 1)) return 1;
- }
- break;
- case 14: // AL
- break;
- case 15: // shouldn't happen
- return 0;
- }
- if(trace)
- runtime·printf("conditional %x (cpsr %x) pass\n", opc, cpsr);
- i = (0xeU << 28) | (i & 0xfffffff);
-
-execute:
- // special cases
- if((i&0xfffff000) == 0xe59fb000) {
- // load r11 from pc-relative address.
- // might be part of a floating point move
- // (or might not, but no harm in simulating
- // one instruction too many).
- addr = (uint32*)((uint8*)pc + (i&0xfff) + 8);
- regs[11] = addr[0];
-
- if(trace)
- runtime·printf("*** cpu R[%d] = *(%p) %x\n",
- 11, addr, regs[11]);
- return 1;
- }
- if(i == 0xe08bb00d) {
- // add sp to r11.
- // might be part of a large stack offset address
- // (or might not, but again no harm done).
- regs[11] += regs[13];
-
- if(trace)
- runtime·printf("*** cpu R[%d] += R[%d] %x\n",
- 11, 13, regs[11]);
- return 1;
- }
- if(i == 0xeef1fa10) {
- regs[CPSR] = (regs[CPSR]&0x0fffffff) | m->fflag;
-
- if(trace)
- runtime·printf("*** fpsr R[CPSR] = F[CPSR] %x\n", regs[CPSR]);
- return 1;
- }
- if((i&0xff000000) == 0xea000000) {
- // unconditional branch
- // can happen in the middle of floating point
- // if the linker decides it is time to lay down
- // a sequence of instruction stream constants.
- delta = i&0xffffff;
- delta = (delta<<8) >> 8; // sign extend
-
- if(trace)
- runtime·printf("*** cpu PC += %x\n", (delta+2)*4);
- return delta+2;
- }
-
- goto stage1;
-
-stage1: // load/store regn is cpureg, regm is 8bit offset
- regd = i>>12 & 0xf;
- regn = i>>16 & 0xf;
- regm = (i & 0xff) << 2; // PLUS or MINUS ??
-
- switch(i & 0xfff00f00) {
- default:
- goto stage2;
-
- case 0xed900a00: // single load
- addr = (uint32*)(regs[regn] + regm);
- if((uintptr)addr < 4096) {
- if(trace)
- runtime·printf("*** load @%p => fault\n", addr);
- return FAULT;
- }
- m->freglo[regd] = addr[0];
-
- if(trace)
- runtime·printf("*** load F[%d] = %x\n",
- regd, m->freglo[regd]);
- break;
-
- case 0xed900b00: // double load
- addr = (uint32*)(regs[regn] + regm);
- if((uintptr)addr < 4096) {
- if(trace)
- runtime·printf("*** double load @%p => fault\n", addr);
- return FAULT;
- }
- m->freglo[regd] = addr[0];
- m->freghi[regd] = addr[1];
-
- if(trace)
- runtime·printf("*** load D[%d] = %x-%x\n",
- regd, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xed800a00: // single store
- addr = (uint32*)(regs[regn] + regm);
- if((uintptr)addr < 4096) {
- if(trace)
- runtime·printf("*** store @%p => fault\n", addr);
- return FAULT;
- }
- addr[0] = m->freglo[regd];
-
- if(trace)
- runtime·printf("*** *(%p) = %x\n",
- addr, addr[0]);
- break;
-
- case 0xed800b00: // double store
- addr = (uint32*)(regs[regn] + regm);
- if((uintptr)addr < 4096) {
- if(trace)
- runtime·printf("*** double store @%p => fault\n", addr);
- return FAULT;
- }
- addr[0] = m->freglo[regd];
- addr[1] = m->freghi[regd];
-
- if(trace)
- runtime·printf("*** *(%p) = %x-%x\n",
- addr, addr[1], addr[0]);
- break;
- }
- return 1;
-
-stage2: // regd, regm, regn are 4bit variables
- regm = i>>0 & 0xf;
- switch(i & 0xfff00ff0) {
- default:
- goto stage3;
-
- case 0xf3000110: // veor
- m->freglo[regd] = m->freglo[regm]^m->freglo[regn];
- m->freghi[regd] = m->freghi[regm]^m->freghi[regn];
-
- if(trace)
- runtime·printf("*** veor D[%d] = %x-%x\n",
- regd, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xeeb00b00: // D[regd] = const(regn,regm)
- regn = (regn<<4) | regm;
- regm = 0x40000000UL;
- if(regn & 0x80)
- regm |= 0x80000000UL;
- if(regn & 0x40)
- regm ^= 0x7fc00000UL;
- regm |= (regn & 0x3f) << 16;
- m->freglo[regd] = 0;
- m->freghi[regd] = regm;
-
- if(trace)
- runtime·printf("*** immed D[%d] = %x-%x\n",
- regd, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xeeb00a00: // F[regd] = const(regn,regm)
- regn = (regn<<4) | regm;
- regm = 0x40000000UL;
- if(regn & 0x80)
- regm |= 0x80000000UL;
- if(regn & 0x40)
- regm ^= 0x7e000000UL;
- regm |= (regn & 0x3f) << 19;
- m->freglo[regd] = regm;
-
- if(trace)
- runtime·printf("*** immed D[%d] = %x\n",
- regd, m->freglo[regd]);
- break;
-
- case 0xee300b00: // D[regd] = D[regn]+D[regm]
- runtime·fadd64c(getd(regn), getd(regm), &uval);
- putd(regd, uval);
-
- if(trace)
- runtime·printf("*** add D[%d] = D[%d]+D[%d] %x-%x\n",
- regd, regn, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xee300a00: // F[regd] = F[regn]+F[regm]
- runtime·fadd64c(f2d(m->freglo[regn]), f2d(m->freglo[regm]), &uval);
- m->freglo[regd] = d2f(uval);
-
- if(trace)
- runtime·printf("*** add F[%d] = F[%d]+F[%d] %x\n",
- regd, regn, regm, m->freglo[regd]);
- break;
-
- case 0xee300b40: // D[regd] = D[regn]-D[regm]
- runtime·fsub64c(getd(regn), getd(regm), &uval);
- putd(regd, uval);
-
- if(trace)
- runtime·printf("*** sub D[%d] = D[%d]-D[%d] %x-%x\n",
- regd, regn, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xee300a40: // F[regd] = F[regn]-F[regm]
- runtime·fsub64c(f2d(m->freglo[regn]), f2d(m->freglo[regm]), &uval);
- m->freglo[regd] = d2f(uval);
-
- if(trace)
- runtime·printf("*** sub F[%d] = F[%d]-F[%d] %x\n",
- regd, regn, regm, m->freglo[regd]);
- break;
-
- case 0xee200b00: // D[regd] = D[regn]*D[regm]
- runtime·fmul64c(getd(regn), getd(regm), &uval);
- putd(regd, uval);
-
- if(trace)
- runtime·printf("*** mul D[%d] = D[%d]*D[%d] %x-%x\n",
- regd, regn, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xee200a00: // F[regd] = F[regn]*F[regm]
- runtime·fmul64c(f2d(m->freglo[regn]), f2d(m->freglo[regm]), &uval);
- m->freglo[regd] = d2f(uval);
-
- if(trace)
- runtime·printf("*** mul F[%d] = F[%d]*F[%d] %x\n",
- regd, regn, regm, m->freglo[regd]);
- break;
-
- case 0xee800b00: // D[regd] = D[regn]/D[regm]
- runtime·fdiv64c(getd(regn), getd(regm), &uval);
- putd(regd, uval);
-
- if(trace)
- runtime·printf("*** div D[%d] = D[%d]/D[%d] %x-%x\n",
- regd, regn, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xee800a00: // F[regd] = F[regn]/F[regm]
- runtime·fdiv64c(f2d(m->freglo[regn]), f2d(m->freglo[regm]), &uval);
- m->freglo[regd] = d2f(uval);
-
- if(trace)
- runtime·printf("*** div F[%d] = F[%d]/F[%d] %x\n",
- regd, regn, regm, m->freglo[regd]);
- break;
-
- case 0xee000b10: // S[regn] = R[regd] (MOVW) (regm ignored)
- m->freglo[regn] = regs[regd];
-
- if(trace)
- runtime·printf("*** cpy S[%d] = R[%d] %x\n",
- regn, regd, m->freglo[regn]);
- break;
-
- case 0xee100b10: // R[regd] = S[regn] (MOVW) (regm ignored)
- regs[regd] = m->freglo[regn];
-
- if(trace)
- runtime·printf("*** cpy R[%d] = S[%d] %x\n",
- regd, regn, regs[regd]);
- break;
- }
- return 1;
-
-stage3: // regd, regm are 4bit variables
- switch(i & 0xffff0ff0) {
- default:
- goto done;
-
- case 0xeeb00a40: // F[regd] = F[regm] (MOVF)
- m->freglo[regd] = m->freglo[regm];
-
- if(trace)
- runtime·printf("*** F[%d] = F[%d] %x\n",
- regd, regm, m->freglo[regd]);
- break;
-
- case 0xeeb00b40: // D[regd] = D[regm] (MOVD)
- m->freglo[regd] = m->freglo[regm];
- m->freghi[regd] = m->freghi[regm];
-
- if(trace)
- runtime·printf("*** D[%d] = D[%d] %x-%x\n",
- regd, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xeeb10bc0: // D[regd] = sqrt D[regm]
- runtime·sqrtC(getd(regm), &uval);
- putd(regd, uval);
-
- if(trace)
- runtime·printf("*** D[%d] = sqrt D[%d] %x-%x\n",
- regd, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xeeb00bc0: // D[regd] = abs D[regm]
- m->freglo[regd] = m->freglo[regm];
- m->freghi[regd] = m->freghi[regm] & ((1<<31)-1);
-
- if(trace)
- runtime·printf("*** D[%d] = abs D[%d] %x-%x\n",
- regd, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xeeb00ac0: // F[regd] = abs F[regm]
- m->freglo[regd] = m->freglo[regm] & ((1<<31)-1);
-
- if(trace)
- runtime·printf("*** F[%d] = abs F[%d] %x\n",
- regd, regm, m->freglo[regd]);
- break;
-
- case 0xeeb40bc0: // D[regd] :: D[regm] (CMPD)
- runtime·fcmp64c(getd(regd), getd(regm), &cmp, &nan);
- m->fflag = fstatus(nan, cmp);
-
- if(trace)
- runtime·printf("*** cmp D[%d]::D[%d] %x\n",
- regd, regm, m->fflag);
- break;
-
- case 0xeeb40ac0: // F[regd] :: F[regm] (CMPF)
- runtime·fcmp64c(f2d(m->freglo[regd]), f2d(m->freglo[regm]), &cmp, &nan);
- m->fflag = fstatus(nan, cmp);
-
- if(trace)
- runtime·printf("*** cmp F[%d]::F[%d] %x\n",
- regd, regm, m->fflag);
- break;
-
- case 0xeeb70ac0: // D[regd] = F[regm] (MOVFD)
- putd(regd, f2d(m->freglo[regm]));
-
- if(trace)
- runtime·printf("*** f2d D[%d]=F[%d] %x-%x\n",
- regd, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xeeb70bc0: // F[regd] = D[regm] (MOVDF)
- m->freglo[regd] = d2f(getd(regm));
-
- if(trace)
- runtime·printf("*** d2f F[%d]=D[%d] %x-%x\n",
- regd, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xeebd0ac0: // S[regd] = F[regm] (MOVFW)
- runtime·f64tointc(f2d(m->freglo[regm]), &sval, &ok);
- if(!ok || (int32)sval != sval)
- sval = 0;
- m->freglo[regd] = sval;
-
- if(trace)
- runtime·printf("*** fix S[%d]=F[%d] %x\n",
- regd, regm, m->freglo[regd]);
- break;
-
- case 0xeebc0ac0: // S[regd] = F[regm] (MOVFW.U)
- runtime·f64tointc(f2d(m->freglo[regm]), &sval, &ok);
- if(!ok || (uint32)sval != sval)
- sval = 0;
- m->freglo[regd] = sval;
-
- if(trace)
- runtime·printf("*** fix unsigned S[%d]=F[%d] %x\n",
- regd, regm, m->freglo[regd]);
- break;
-
- case 0xeebd0bc0: // S[regd] = D[regm] (MOVDW)
- runtime·f64tointc(getd(regm), &sval, &ok);
- if(!ok || (int32)sval != sval)
- sval = 0;
- m->freglo[regd] = sval;
-
- if(trace)
- runtime·printf("*** fix S[%d]=D[%d] %x\n",
- regd, regm, m->freglo[regd]);
- break;
-
- case 0xeebc0bc0: // S[regd] = D[regm] (MOVDW.U)
- runtime·f64tointc(getd(regm), &sval, &ok);
- if(!ok || (uint32)sval != sval)
- sval = 0;
- m->freglo[regd] = sval;
-
- if(trace)
- runtime·printf("*** fix unsigned S[%d]=D[%d] %x\n",
- regd, regm, m->freglo[regd]);
- break;
-
- case 0xeeb80ac0: // D[regd] = S[regm] (MOVWF)
- cmp = m->freglo[regm];
- if(cmp < 0) {
- runtime·fintto64c(-cmp, &uval);
- putf(regd, d2f(uval));
- m->freglo[regd] ^= 0x80000000;
- } else {
- runtime·fintto64c(cmp, &uval);
- putf(regd, d2f(uval));
- }
-
- if(trace)
- runtime·printf("*** float D[%d]=S[%d] %x-%x\n",
- regd, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xeeb80a40: // D[regd] = S[regm] (MOVWF.U)
- runtime·fintto64c(m->freglo[regm], &uval);
- putf(regd, d2f(uval));
-
- if(trace)
- runtime·printf("*** float unsigned D[%d]=S[%d] %x-%x\n",
- regd, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xeeb80bc0: // D[regd] = S[regm] (MOVWD)
- cmp = m->freglo[regm];
- if(cmp < 0) {
- runtime·fintto64c(-cmp, &uval);
- putd(regd, uval);
- m->freghi[regd] ^= 0x80000000;
- } else {
- runtime·fintto64c(cmp, &uval);
- putd(regd, uval);
- }
-
- if(trace)
- runtime·printf("*** float D[%d]=S[%d] %x-%x\n",
- regd, regm, m->freghi[regd], m->freglo[regd]);
- break;
-
- case 0xeeb80b40: // D[regd] = S[regm] (MOVWD.U)
- runtime·fintto64c(m->freglo[regm], &uval);
- putd(regd, uval);
-
- if(trace)
- runtime·printf("*** float unsigned D[%d]=S[%d] %x-%x\n",
- regd, regm, m->freghi[regd], m->freglo[regd]);
- break;
- }
- return 1;
-
-done:
- if((i&0xff000000) == 0xee000000 ||
- (i&0xff000000) == 0xed000000) {
- runtime·printf("stepflt %p %x\n", pc, i);
- fabort();
- }
- return 0;
-}
-
-typedef struct Sfregs Sfregs;
-
-// NOTE: These are all recorded as pointers because they are possibly live registers,
-// and we don't know what they contain. Recording them as pointers should be
-// safer than not.
-struct Sfregs
-{
- uint32 *r0;
- uint32 *r1;
- uint32 *r2;
- uint32 *r3;
- uint32 *r4;
- uint32 *r5;
- uint32 *r6;
- uint32 *r7;
- uint32 *r8;
- uint32 *r9;
- uint32 *r10;
- uint32 *r11;
- uint32 *r12;
- uint32 *r13;
- uint32 cspr;
-};
-
-static void sfloat2(void);
-void _sfloatpanic(void);
-
-#pragma textflag NOSPLIT
-uint32*
-runtime·_sfloat2(uint32 *pc, Sfregs regs)
-{
- void (*fn)(void);
-
- g->m->ptrarg[0] = pc;
- g->m->ptrarg[1] = &regs;
- fn = sfloat2;
- runtime·onM(&fn);
- pc = g->m->ptrarg[0];
- g->m->ptrarg[0] = nil;
- return pc;
-}
-
-static void
-sfloat2(void)
-{
- uint32 *pc;
- G *curg;
- Sfregs *regs;
- int32 skip;
- bool first;
-
- pc = g->m->ptrarg[0];
- regs = g->m->ptrarg[1];
- g->m->ptrarg[0] = nil;
- g->m->ptrarg[1] = nil;
-
- first = true;
- while(skip = stepflt(pc, (uint32*)&regs->r0)) {
- first = false;
- if(skip == FAULT) {
- // Encountered bad address in store/load.
- // Record signal information and return to assembly
- // trampoline that fakes the call.
- enum { SIGSEGV = 11 };
- curg = g->m->curg;
- curg->sig = SIGSEGV;
- curg->sigcode0 = 0;
- curg->sigcode1 = 0;
- curg->sigpc = (uint32)pc;
- pc = (uint32*)_sfloatpanic;
- break;
- }
- pc += skip;
- }
- if(first) {
- runtime·printf("sfloat2 %p %x\n", pc, *pc);
- fabort(); // not ok to fail first instruction
- }
-
- g->m->ptrarg[0] = pc;
-}
diff --git a/src/runtime/softfloat_arm.go b/src/runtime/softfloat_arm.go
new file mode 100644
index 000000000..746b9ea21
--- /dev/null
+++ b/src/runtime/softfloat_arm.go
@@ -0,0 +1,644 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Software floating point interpretaton of ARM 7500 FP instructions.
+// The interpretation is not bit compatible with the 7500.
+// It uses true little-endian doubles, while the 7500 used mixed-endian.
+
+package runtime
+
+import "unsafe"
+
+const (
+ _CPSR = 14
+ _FLAGS_N = 1 << 31
+ _FLAGS_Z = 1 << 30
+ _FLAGS_C = 1 << 29
+ _FLAGS_V = 1 << 28
+)
+
+var fptrace = 0
+
+func fabort() {
+ gothrow("unsupported floating point instruction")
+}
+
+func fputf(reg uint32, val uint32) {
+ _g_ := getg()
+ _g_.m.freglo[reg] = val
+}
+
+func fputd(reg uint32, val uint64) {
+ _g_ := getg()
+ _g_.m.freglo[reg] = uint32(val)
+ _g_.m.freghi[reg] = uint32(val >> 32)
+}
+
+func fgetd(reg uint32) uint64 {
+ _g_ := getg()
+ return uint64(_g_.m.freglo[reg]) | uint64(_g_.m.freghi[reg])<<32
+}
+
+func fprintregs() {
+ _g_ := getg()
+ for i := range _g_.m.freglo {
+ print("\tf", i, ":\t", hex(_g_.m.freghi[i]), " ", hex(_g_.m.freglo[i]), "\n")
+ }
+}
+
+func fstatus(nan bool, cmp int32) uint32 {
+ if nan {
+ return _FLAGS_C | _FLAGS_V
+ }
+ if cmp == 0 {
+ return _FLAGS_Z | _FLAGS_C
+ }
+ if cmp < 0 {
+ return _FLAGS_N
+ }
+ return _FLAGS_C
+}
+
+// conditions array record the required CPSR cond field for the
+// first 5 pairs of conditional execution opcodes
+// higher 4 bits are must set, lower 4 bits are must clear
+var conditions = [10 / 2]uint32{
+ 0 / 2: _FLAGS_Z>>24 | 0, // 0: EQ (Z set), 1: NE (Z clear)
+ 2 / 2: _FLAGS_C>>24 | 0, // 2: CS/HS (C set), 3: CC/LO (C clear)
+ 4 / 2: _FLAGS_N>>24 | 0, // 4: MI (N set), 5: PL (N clear)
+ 6 / 2: _FLAGS_V>>24 | 0, // 6: VS (V set), 7: VC (V clear)
+ 8 / 2: _FLAGS_C>>24 |
+ _FLAGS_Z>>28,
+}
+
+const _FAULT = 0x80000000 // impossible PC offset
+
+// returns number of words that the fp instruction
+// is occupying, 0 if next instruction isn't float.
+func stepflt(pc *uint32, regs *[15]uint32) uint32 {
+ var (
+ i, opc, regd, regm, regn, cpsr uint32
+ cmp, delta int32
+ uval uint64
+ sval int64
+ nan, ok bool
+ )
+
+ // m is locked in vlop_arm.s, so g.m cannot change during this function call,
+ // so caching it in a local variable is safe.
+ m := getg().m
+ i = *pc
+
+ if fptrace > 0 {
+ print("stepflt ", pc, " ", hex(i), " (cpsr ", hex(regs[_CPSR]>>28), ")\n")
+ }
+
+ opc = i >> 28
+ if opc == 14 { // common case first
+ goto execute
+ }
+
+ cpsr = regs[_CPSR] >> 28
+ switch opc {
+ case 0, 1, 2, 3, 4, 5, 6, 7, 8, 9:
+ if cpsr&(conditions[opc/2]>>4) == conditions[opc/2]>>4 &&
+ cpsr&(conditions[opc/2]&0xf) == 0 {
+ if opc&1 != 0 {
+ return 1
+ }
+ } else {
+ if opc&1 == 0 {
+ return 1
+ }
+ }
+
+ case 10, 11: // GE (N == V), LT (N != V)
+ if cpsr&(_FLAGS_N>>28) == cpsr&(_FLAGS_V>>28) {
+ if opc&1 != 0 {
+ return 1
+ }
+ } else {
+ if opc&1 == 0 {
+ return 1
+ }
+ }
+
+ case 12, 13: // GT (N == V and Z == 0), LE (N != V or Z == 1)
+ if cpsr&(_FLAGS_N>>28) == cpsr&(_FLAGS_V>>28) &&
+ cpsr&(_FLAGS_Z>>28) == 0 {
+ if opc&1 != 0 {
+ return 1
+ }
+ } else {
+ if opc&1 == 0 {
+ return 1
+ }
+ }
+
+ case 14: // AL
+ // ok
+
+ case 15: // shouldn't happen
+ return 0
+ }
+
+ if fptrace > 0 {
+ print("conditional ", hex(opc), " (cpsr ", hex(cpsr), ") pass\n")
+ }
+ i = 0xe<<28 | i&(1<<28-1)
+
+execute:
+ // special cases
+ if i&0xfffff000 == 0xe59fb000 {
+ // load r11 from pc-relative address.
+ // might be part of a floating point move
+ // (or might not, but no harm in simulating
+ // one instruction too many).
+ addr := (*[1]uint32)(add(unsafe.Pointer(pc), uintptr(i&0xfff+8)))
+ regs[11] = addr[0]
+
+ if fptrace > 0 {
+ print("*** cpu R[11] = *(", addr, ") ", hex(regs[11]), "\n")
+ }
+ return 1
+ }
+ if i == 0xe08bb00d {
+ // add sp to r11.
+ // might be part of a large stack offset address
+ // (or might not, but again no harm done).
+ regs[11] += regs[13]
+
+ if fptrace > 0 {
+ print("*** cpu R[11] += R[13] ", hex(regs[11]), "\n")
+ }
+ return 1
+ }
+ if i == 0xeef1fa10 {
+ regs[_CPSR] = regs[_CPSR]&0x0fffffff | m.fflag
+
+ if fptrace > 0 {
+ print("*** fpsr R[CPSR] = F[CPSR] ", hex(regs[_CPSR]), "\n")
+ }
+ return 1
+ }
+ if i&0xff000000 == 0xea000000 {
+ // unconditional branch
+ // can happen in the middle of floating point
+ // if the linker decides it is time to lay down
+ // a sequence of instruction stream constants.
+ delta = int32(i&0xffffff) << 8 >> 8 // sign extend
+
+ if fptrace > 0 {
+ print("*** cpu PC += ", hex((delta+2)*4), "\n")
+ }
+ return uint32(delta + 2)
+ }
+
+ goto stage1
+
+stage1: // load/store regn is cpureg, regm is 8bit offset
+ regd = i >> 12 & 0xf
+ regn = i >> 16 & 0xf
+ regm = i & 0xff << 2 // PLUS or MINUS ??
+
+ switch i & 0xfff00f00 {
+ default:
+ goto stage2
+
+ case 0xed900a00: // single load
+ uaddr := uintptr(regs[regn] + regm)
+ if uaddr < 4096 {
+ if fptrace > 0 {
+ print("*** load @", hex(uaddr), " => fault\n")
+ }
+ return _FAULT
+ }
+ addr := (*[1]uint32)(unsafe.Pointer(uaddr))
+ m.freglo[regd] = addr[0]
+
+ if fptrace > 0 {
+ print("*** load F[", regd, "] = ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xed900b00: // double load
+ uaddr := uintptr(regs[regn] + regm)
+ if uaddr < 4096 {
+ if fptrace > 0 {
+ print("*** double load @", hex(uaddr), " => fault\n")
+ }
+ return _FAULT
+ }
+ addr := (*[2]uint32)(unsafe.Pointer(uaddr))
+ m.freglo[regd] = addr[0]
+ m.freghi[regd] = addr[1]
+
+ if fptrace > 0 {
+ print("*** load D[", regd, "] = ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xed800a00: // single store
+ uaddr := uintptr(regs[regn] + regm)
+ if uaddr < 4096 {
+ if fptrace > 0 {
+ print("*** store @", hex(uaddr), " => fault\n")
+ }
+ return _FAULT
+ }
+ addr := (*[1]uint32)(unsafe.Pointer(uaddr))
+ addr[0] = m.freglo[regd]
+
+ if fptrace > 0 {
+ print("*** *(", addr, ") = ", hex(addr[0]), "\n")
+ }
+ break
+
+ case 0xed800b00: // double store
+ uaddr := uintptr(regs[regn] + regm)
+ if uaddr < 4096 {
+ if fptrace > 0 {
+ print("*** double store @", hex(uaddr), " => fault\n")
+ }
+ return _FAULT
+ }
+ addr := (*[2]uint32)(unsafe.Pointer(uaddr))
+ addr[0] = m.freglo[regd]
+ addr[1] = m.freghi[regd]
+
+ if fptrace > 0 {
+ print("*** *(", addr, ") = ", hex(addr[1]), "-", hex(addr[0]), "\n")
+ }
+ break
+ }
+ return 1
+
+stage2: // regd, regm, regn are 4bit variables
+ regm = i >> 0 & 0xf
+ switch i & 0xfff00ff0 {
+ default:
+ goto stage3
+
+ case 0xf3000110: // veor
+ m.freglo[regd] = m.freglo[regm] ^ m.freglo[regn]
+ m.freghi[regd] = m.freghi[regm] ^ m.freghi[regn]
+
+ if fptrace > 0 {
+ print("*** veor D[", regd, "] = ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb00b00: // D[regd] = const(regn,regm)
+ regn = regn<<4 | regm
+ regm = 0x40000000
+ if regn&0x80 != 0 {
+ regm |= 0x80000000
+ }
+ if regn&0x40 != 0 {
+ regm ^= 0x7fc00000
+ }
+ regm |= regn & 0x3f << 16
+ m.freglo[regd] = 0
+ m.freghi[regd] = regm
+
+ if fptrace > 0 {
+ print("*** immed D[", regd, "] = ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb00a00: // F[regd] = const(regn,regm)
+ regn = regn<<4 | regm
+ regm = 0x40000000
+ if regn&0x80 != 0 {
+ regm |= 0x80000000
+ }
+ if regn&0x40 != 0 {
+ regm ^= 0x7e000000
+ }
+ regm |= regn & 0x3f << 19
+ m.freglo[regd] = regm
+
+ if fptrace > 0 {
+ print("*** immed D[", regd, "] = ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xee300b00: // D[regd] = D[regn]+D[regm]
+ fadd64c(fgetd(regn), fgetd(regm), &uval)
+ fputd(regd, uval)
+
+ if fptrace > 0 {
+ print("*** add D[", regd, "] = D[", regn, "]+D[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xee300a00: // F[regd] = F[regn]+F[regm]
+ fadd64c(f32to64(m.freglo[regn]), f32to64(m.freglo[regm]), &uval)
+ m.freglo[regd] = f64to32(uval)
+
+ if fptrace > 0 {
+ print("*** add F[", regd, "] = F[", regn, "]+F[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xee300b40: // D[regd] = D[regn]-D[regm]
+ fsub64c(fgetd(regn), fgetd(regm), &uval)
+ fputd(regd, uval)
+
+ if fptrace > 0 {
+ print("*** sub D[", regd, "] = D[", regn, "]-D[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xee300a40: // F[regd] = F[regn]-F[regm]
+ fsub64c(f32to64(m.freglo[regn]), f32to64(m.freglo[regm]), &uval)
+ m.freglo[regd] = f64to32(uval)
+
+ if fptrace > 0 {
+ print("*** sub F[", regd, "] = F[", regn, "]-F[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xee200b00: // D[regd] = D[regn]*D[regm]
+ fmul64c(fgetd(regn), fgetd(regm), &uval)
+ fputd(regd, uval)
+
+ if fptrace > 0 {
+ print("*** mul D[", regd, "] = D[", regn, "]*D[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xee200a00: // F[regd] = F[regn]*F[regm]
+ fmul64c(f32to64(m.freglo[regn]), f32to64(m.freglo[regm]), &uval)
+ m.freglo[regd] = f64to32(uval)
+
+ if fptrace > 0 {
+ print("*** mul F[", regd, "] = F[", regn, "]*F[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xee800b00: // D[regd] = D[regn]/D[regm]
+ fdiv64c(fgetd(regn), fgetd(regm), &uval)
+ fputd(regd, uval)
+
+ if fptrace > 0 {
+ print("*** div D[", regd, "] = D[", regn, "]/D[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xee800a00: // F[regd] = F[regn]/F[regm]
+ fdiv64c(f32to64(m.freglo[regn]), f32to64(m.freglo[regm]), &uval)
+ m.freglo[regd] = f64to32(uval)
+
+ if fptrace > 0 {
+ print("*** div F[", regd, "] = F[", regn, "]/F[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xee000b10: // S[regn] = R[regd] (MOVW) (regm ignored)
+ m.freglo[regn] = regs[regd]
+
+ if fptrace > 0 {
+ print("*** cpy S[", regn, "] = R[", regd, "] ", hex(m.freglo[regn]), "\n")
+ }
+ break
+
+ case 0xee100b10: // R[regd] = S[regn] (MOVW) (regm ignored)
+ regs[regd] = m.freglo[regn]
+
+ if fptrace > 0 {
+ print("*** cpy R[", regd, "] = S[", regn, "] ", hex(regs[regd]), "\n")
+ }
+ break
+ }
+ return 1
+
+stage3: // regd, regm are 4bit variables
+ switch i & 0xffff0ff0 {
+ default:
+ goto done
+
+ case 0xeeb00a40: // F[regd] = F[regm] (MOVF)
+ m.freglo[regd] = m.freglo[regm]
+
+ if fptrace > 0 {
+ print("*** F[", regd, "] = F[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb00b40: // D[regd] = D[regm] (MOVD)
+ m.freglo[regd] = m.freglo[regm]
+ m.freghi[regd] = m.freghi[regm]
+
+ if fptrace > 0 {
+ print("*** D[", regd, "] = D[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb10bc0: // D[regd] = sqrt D[regm]
+ uval = float64bits(sqrt(float64frombits(fgetd(regm))))
+ fputd(regd, uval)
+
+ if fptrace > 0 {
+ print("*** D[", regd, "] = sqrt D[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb00bc0: // D[regd] = abs D[regm]
+ m.freglo[regd] = m.freglo[regm]
+ m.freghi[regd] = m.freghi[regm] & (1<<31 - 1)
+
+ if fptrace > 0 {
+ print("*** D[", regd, "] = abs D[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb00ac0: // F[regd] = abs F[regm]
+ m.freglo[regd] = m.freglo[regm] & (1<<31 - 1)
+
+ if fptrace > 0 {
+ print("*** F[", regd, "] = abs F[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb40bc0: // D[regd] :: D[regm] (CMPD)
+ fcmp64c(fgetd(regd), fgetd(regm), &cmp, &nan)
+ m.fflag = fstatus(nan, cmp)
+
+ if fptrace > 0 {
+ print("*** cmp D[", regd, "]::D[", regm, "] ", hex(m.fflag), "\n")
+ }
+ break
+
+ case 0xeeb40ac0: // F[regd] :: F[regm] (CMPF)
+ fcmp64c(f32to64(m.freglo[regd]), f32to64(m.freglo[regm]), &cmp, &nan)
+ m.fflag = fstatus(nan, cmp)
+
+ if fptrace > 0 {
+ print("*** cmp F[", regd, "]::F[", regm, "] ", hex(m.fflag), "\n")
+ }
+ break
+
+ case 0xeeb70ac0: // D[regd] = F[regm] (MOVFD)
+ fputd(regd, f32to64(m.freglo[regm]))
+
+ if fptrace > 0 {
+ print("*** f2d D[", regd, "]=F[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb70bc0: // F[regd] = D[regm] (MOVDF)
+ m.freglo[regd] = f64to32(fgetd(regm))
+
+ if fptrace > 0 {
+ print("*** d2f F[", regd, "]=D[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeebd0ac0: // S[regd] = F[regm] (MOVFW)
+ f64tointc(f32to64(m.freglo[regm]), &sval, &ok)
+ if !ok || int64(int32(sval)) != sval {
+ sval = 0
+ }
+ m.freglo[regd] = uint32(sval)
+ if fptrace > 0 {
+ print("*** fix S[", regd, "]=F[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeebc0ac0: // S[regd] = F[regm] (MOVFW.U)
+ f64tointc(f32to64(m.freglo[regm]), &sval, &ok)
+ if !ok || int64(uint32(sval)) != sval {
+ sval = 0
+ }
+ m.freglo[regd] = uint32(sval)
+
+ if fptrace > 0 {
+ print("*** fix unsigned S[", regd, "]=F[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeebd0bc0: // S[regd] = D[regm] (MOVDW)
+ f64tointc(fgetd(regm), &sval, &ok)
+ if !ok || int64(int32(sval)) != sval {
+ sval = 0
+ }
+ m.freglo[regd] = uint32(sval)
+
+ if fptrace > 0 {
+ print("*** fix S[", regd, "]=D[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeebc0bc0: // S[regd] = D[regm] (MOVDW.U)
+ f64tointc(fgetd(regm), &sval, &ok)
+ if !ok || int64(uint32(sval)) != sval {
+ sval = 0
+ }
+ m.freglo[regd] = uint32(sval)
+
+ if fptrace > 0 {
+ print("*** fix unsigned S[", regd, "]=D[", regm, "] ", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb80ac0: // D[regd] = S[regm] (MOVWF)
+ cmp = int32(m.freglo[regm])
+ if cmp < 0 {
+ fintto64c(int64(-cmp), &uval)
+ fputf(regd, f64to32(uval))
+ m.freglo[regd] ^= 0x80000000
+ } else {
+ fintto64c(int64(cmp), &uval)
+ fputf(regd, f64to32(uval))
+ }
+
+ if fptrace > 0 {
+ print("*** float D[", regd, "]=S[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb80a40: // D[regd] = S[regm] (MOVWF.U)
+ fintto64c(int64(m.freglo[regm]), &uval)
+ fputf(regd, f64to32(uval))
+
+ if fptrace > 0 {
+ print("*** float unsigned D[", regd, "]=S[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb80bc0: // D[regd] = S[regm] (MOVWD)
+ cmp = int32(m.freglo[regm])
+ if cmp < 0 {
+ fintto64c(int64(-cmp), &uval)
+ fputd(regd, uval)
+ m.freghi[regd] ^= 0x80000000
+ } else {
+ fintto64c(int64(cmp), &uval)
+ fputd(regd, uval)
+ }
+
+ if fptrace > 0 {
+ print("*** float D[", regd, "]=S[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+
+ case 0xeeb80b40: // D[regd] = S[regm] (MOVWD.U)
+ fintto64c(int64(m.freglo[regm]), &uval)
+ fputd(regd, uval)
+
+ if fptrace > 0 {
+ print("*** float unsigned D[", regd, "]=S[", regm, "] ", hex(m.freghi[regd]), "-", hex(m.freglo[regd]), "\n")
+ }
+ break
+ }
+ return 1
+
+done:
+ if i&0xff000000 == 0xee000000 ||
+ i&0xff000000 == 0xed000000 {
+ print("stepflt ", pc, " ", hex(i), "\n")
+ fabort()
+ }
+ return 0
+}
+
+//go:nosplit
+func _sfloat2(pc uint32, regs *[15]uint32) {
+ systemstack(func() {
+ pc = sfloat2(pc, regs)
+ })
+}
+
+func _sfloatpanic()
+
+func sfloat2(pc uint32, regs *[15]uint32) uint32 {
+ first := true
+ for {
+ skip := stepflt((*uint32)(unsafe.Pointer(uintptr(pc))), regs)
+ if skip == 0 {
+ break
+ }
+ first = false
+ if skip == _FAULT {
+ // Encountered bad address in store/load.
+ // Record signal information and return to assembly
+ // trampoline that fakes the call.
+ const SIGSEGV = 11
+ curg := getg().m.curg
+ curg.sig = SIGSEGV
+ curg.sigcode0 = 0
+ curg.sigcode1 = 0
+ curg.sigpc = uintptr(pc)
+ pc = uint32(funcPC(_sfloatpanic))
+ break
+ }
+ pc += 4 * uint32(skip)
+ }
+ if first {
+ print("sfloat2 ", pc, " ", hex(*(*uint32)(unsafe.Pointer(uintptr(pc)))), "\n")
+ fabort() // not ok to fail first instruction
+ }
+ return pc
+}
diff --git a/src/runtime/sqrt.go b/src/runtime/sqrt.go
index 34a8c3806..e3a27014b 100644
--- a/src/runtime/sqrt.go
+++ b/src/runtime/sqrt.go
@@ -86,9 +86,6 @@ import "unsafe"
// Notes: Rounding mode detection omitted.
const (
- uvnan = 0x7FF8000000000001
- uvinf = 0x7FF0000000000000
- uvneginf = 0xFFF0000000000000
mask = 0x7FF
shift = 64 - 11 - 1
bias = 1023
@@ -104,7 +101,7 @@ func sqrt(x float64) float64 {
case x == 0 || x != x || x > maxFloat64:
return x
case x < 0:
- return nan
+ return nan()
}
ix := float64bits(x)
// normalize x
@@ -144,7 +141,3 @@ func sqrt(x float64) float64 {
ix = q>>1 + uint64(exp-1+bias)<<shift // significand + biased exponent
return float64frombits(ix)
}
-
-func sqrtC(f float64, r *float64) {
- *r = sqrt(f)
-}
diff --git a/src/runtime/stack.c b/src/runtime/stack.c
deleted file mode 100644
index ffae73a2a..000000000
--- a/src/runtime/stack.c
+++ /dev/null
@@ -1,874 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-#include "stack.h"
-#include "funcdata.h"
-#include "typekind.h"
-#include "type.h"
-#include "race.h"
-#include "mgc0.h"
-#include "textflag.h"
-
-enum
-{
- // StackDebug == 0: no logging
- // == 1: logging of per-stack operations
- // == 2: logging of per-frame operations
- // == 3: logging of per-word updates
- // == 4: logging of per-word reads
- StackDebug = 0,
- StackFromSystem = 0, // allocate stacks from system memory instead of the heap
- StackFaultOnFree = 0, // old stacks are mapped noaccess to detect use after free
- StackPoisonCopy = 0, // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
-
- StackCache = 1,
-};
-
-// Global pool of spans that have free stacks.
-// Stacks are assigned an order according to size.
-// order = log_2(size/FixedStack)
-// There is a free list for each order.
-MSpan runtime·stackpool[NumStackOrders];
-Mutex runtime·stackpoolmu;
-// TODO: one lock per order?
-
-static Stack stackfreequeue;
-
-void
-runtime·stackinit(void)
-{
- int32 i;
-
- if((StackCacheSize & PageMask) != 0)
- runtime·throw("cache size must be a multiple of page size");
-
- for(i = 0; i < NumStackOrders; i++)
- runtime·MSpanList_Init(&runtime·stackpool[i]);
-}
-
-// Allocates a stack from the free pool. Must be called with
-// stackpoolmu held.
-static MLink*
-poolalloc(uint8 order)
-{
- MSpan *list;
- MSpan *s;
- MLink *x;
- uintptr i;
-
- list = &runtime·stackpool[order];
- s = list->next;
- if(s == list) {
- // no free stacks. Allocate another span worth.
- s = runtime·MHeap_AllocStack(&runtime·mheap, StackCacheSize >> PageShift);
- if(s == nil)
- runtime·throw("out of memory");
- if(s->ref != 0)
- runtime·throw("bad ref");
- if(s->freelist != nil)
- runtime·throw("bad freelist");
- for(i = 0; i < StackCacheSize; i += FixedStack << order) {
- x = (MLink*)((s->start << PageShift) + i);
- x->next = s->freelist;
- s->freelist = x;
- }
- runtime·MSpanList_Insert(list, s);
- }
- x = s->freelist;
- if(x == nil)
- runtime·throw("span has no free stacks");
- s->freelist = x->next;
- s->ref++;
- if(s->freelist == nil) {
- // all stacks in s are allocated.
- runtime·MSpanList_Remove(s);
- }
- return x;
-}
-
-// Adds stack x to the free pool. Must be called with stackpoolmu held.
-static void
-poolfree(MLink *x, uint8 order)
-{
- MSpan *s;
-
- s = runtime·MHeap_Lookup(&runtime·mheap, x);
- if(s->state != MSpanStack)
- runtime·throw("freeing stack not in a stack span");
- if(s->freelist == nil) {
- // s will now have a free stack
- runtime·MSpanList_Insert(&runtime·stackpool[order], s);
- }
- x->next = s->freelist;
- s->freelist = x;
- s->ref--;
- if(s->ref == 0) {
- // span is completely free - return to heap
- runtime·MSpanList_Remove(s);
- s->freelist = nil;
- runtime·MHeap_FreeStack(&runtime·mheap, s);
- }
-}
-
-// stackcacherefill/stackcacherelease implement a global pool of stack segments.
-// The pool is required to prevent unlimited growth of per-thread caches.
-static void
-stackcacherefill(MCache *c, uint8 order)
-{
- MLink *x, *list;
- uintptr size;
-
- if(StackDebug >= 1)
- runtime·printf("stackcacherefill order=%d\n", order);
-
- // Grab some stacks from the global cache.
- // Grab half of the allowed capacity (to prevent thrashing).
- list = nil;
- size = 0;
- runtime·lock(&runtime·stackpoolmu);
- while(size < StackCacheSize/2) {
- x = poolalloc(order);
- x->next = list;
- list = x;
- size += FixedStack << order;
- }
- runtime·unlock(&runtime·stackpoolmu);
-
- c->stackcache[order].list = list;
- c->stackcache[order].size = size;
-}
-
-static void
-stackcacherelease(MCache *c, uint8 order)
-{
- MLink *x, *y;
- uintptr size;
-
- if(StackDebug >= 1)
- runtime·printf("stackcacherelease order=%d\n", order);
- x = c->stackcache[order].list;
- size = c->stackcache[order].size;
- runtime·lock(&runtime·stackpoolmu);
- while(size > StackCacheSize/2) {
- y = x->next;
- poolfree(x, order);
- x = y;
- size -= FixedStack << order;
- }
- runtime·unlock(&runtime·stackpoolmu);
- c->stackcache[order].list = x;
- c->stackcache[order].size = size;
-}
-
-void
-runtime·stackcache_clear(MCache *c)
-{
- uint8 order;
- MLink *x, *y;
-
- if(StackDebug >= 1)
- runtime·printf("stackcache clear\n");
- runtime·lock(&runtime·stackpoolmu);
- for(order = 0; order < NumStackOrders; order++) {
- x = c->stackcache[order].list;
- while(x != nil) {
- y = x->next;
- poolfree(x, order);
- x = y;
- }
- c->stackcache[order].list = nil;
- c->stackcache[order].size = 0;
- }
- runtime·unlock(&runtime·stackpoolmu);
-}
-
-Stack
-runtime·stackalloc(uint32 n)
-{
- uint8 order;
- uint32 n2;
- void *v;
- MLink *x;
- MSpan *s;
- MCache *c;
-
- // Stackalloc must be called on scheduler stack, so that we
- // never try to grow the stack during the code that stackalloc runs.
- // Doing so would cause a deadlock (issue 1547).
- if(g != g->m->g0)
- runtime·throw("stackalloc not on scheduler stack");
- if((n & (n-1)) != 0)
- runtime·throw("stack size not a power of 2");
- if(StackDebug >= 1)
- runtime·printf("stackalloc %d\n", n);
-
- if(runtime·debug.efence || StackFromSystem) {
- v = runtime·sysAlloc(ROUND(n, PageSize), &mstats.stacks_sys);
- if(v == nil)
- runtime·throw("out of memory (stackalloc)");
- return (Stack){(uintptr)v, (uintptr)v+n};
- }
-
- // Small stacks are allocated with a fixed-size free-list allocator.
- // If we need a stack of a bigger size, we fall back on allocating
- // a dedicated span.
- if(StackCache && n < FixedStack << NumStackOrders && n < StackCacheSize) {
- order = 0;
- n2 = n;
- while(n2 > FixedStack) {
- order++;
- n2 >>= 1;
- }
- c = g->m->mcache;
- if(c == nil || g->m->gcing || g->m->helpgc) {
- // c == nil can happen in the guts of exitsyscall or
- // procresize. Just get a stack from the global pool.
- // Also don't touch stackcache during gc
- // as it's flushed concurrently.
- runtime·lock(&runtime·stackpoolmu);
- x = poolalloc(order);
- runtime·unlock(&runtime·stackpoolmu);
- } else {
- x = c->stackcache[order].list;
- if(x == nil) {
- stackcacherefill(c, order);
- x = c->stackcache[order].list;
- }
- c->stackcache[order].list = x->next;
- c->stackcache[order].size -= n;
- }
- v = (byte*)x;
- } else {
- s = runtime·MHeap_AllocStack(&runtime·mheap, ROUND(n, PageSize) >> PageShift);
- if(s == nil)
- runtime·throw("out of memory");
- v = (byte*)(s->start<<PageShift);
- }
-
- if(raceenabled)
- runtime·racemalloc(v, n);
- if(StackDebug >= 1)
- runtime·printf(" allocated %p\n", v);
- return (Stack){(uintptr)v, (uintptr)v+n};
-}
-
-void
-runtime·stackfree(Stack stk)
-{
- uint8 order;
- uintptr n, n2;
- MSpan *s;
- MLink *x;
- MCache *c;
- void *v;
-
- n = stk.hi - stk.lo;
- v = (void*)stk.lo;
- if(n & (n-1))
- runtime·throw("stack not a power of 2");
- if(StackDebug >= 1) {
- runtime·printf("stackfree %p %d\n", v, (int32)n);
- runtime·memclr(v, n); // for testing, clobber stack data
- }
- if(runtime·debug.efence || StackFromSystem) {
- if(runtime·debug.efence || StackFaultOnFree)
- runtime·SysFault(v, n);
- else
- runtime·SysFree(v, n, &mstats.stacks_sys);
- return;
- }
- if(StackCache && n < FixedStack << NumStackOrders && n < StackCacheSize) {
- order = 0;
- n2 = n;
- while(n2 > FixedStack) {
- order++;
- n2 >>= 1;
- }
- x = (MLink*)v;
- c = g->m->mcache;
- if(c == nil || g->m->gcing || g->m->helpgc) {
- runtime·lock(&runtime·stackpoolmu);
- poolfree(x, order);
- runtime·unlock(&runtime·stackpoolmu);
- } else {
- if(c->stackcache[order].size >= StackCacheSize)
- stackcacherelease(c, order);
- x->next = c->stackcache[order].list;
- c->stackcache[order].list = x;
- c->stackcache[order].size += n;
- }
- } else {
- s = runtime·MHeap_Lookup(&runtime·mheap, v);
- if(s->state != MSpanStack) {
- runtime·printf("%p %p\n", s->start<<PageShift, v);
- runtime·throw("bad span state");
- }
- runtime·MHeap_FreeStack(&runtime·mheap, s);
- }
-}
-
-uintptr runtime·maxstacksize = 1<<20; // enough until runtime.main sets it for real
-
-static uint8*
-mapnames[] = {
- (uint8*)"---",
- (uint8*)"scalar",
- (uint8*)"ptr",
- (uint8*)"multi",
-};
-
-// Stack frame layout
-//
-// (x86)
-// +------------------+
-// | args from caller |
-// +------------------+ <- frame->argp
-// | return address |
-// +------------------+ <- frame->varp
-// | locals |
-// +------------------+
-// | args to callee |
-// +------------------+ <- frame->sp
-//
-// (arm)
-// +------------------+
-// | args from caller |
-// +------------------+ <- frame->argp
-// | caller's retaddr |
-// +------------------+ <- frame->varp
-// | locals |
-// +------------------+
-// | args to callee |
-// +------------------+
-// | return address |
-// +------------------+ <- frame->sp
-
-void runtime·main(void);
-void runtime·switchtoM(void(*)(void));
-
-typedef struct AdjustInfo AdjustInfo;
-struct AdjustInfo {
- Stack old;
- uintptr delta; // ptr distance from old to new stack (newbase - oldbase)
-};
-
-// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
-// If so, it rewrites *vpp to point into the new stack.
-static void
-adjustpointer(AdjustInfo *adjinfo, void *vpp)
-{
- byte **pp, *p;
-
- pp = vpp;
- p = *pp;
- if(StackDebug >= 4)
- runtime·printf(" %p:%p\n", pp, p);
- if(adjinfo->old.lo <= (uintptr)p && (uintptr)p < adjinfo->old.hi) {
- *pp = p + adjinfo->delta;
- if(StackDebug >= 3)
- runtime·printf(" adjust ptr %p: %p -> %p\n", pp, p, *pp);
- }
-}
-
-// bv describes the memory starting at address scanp.
-// Adjust any pointers contained therein.
-static void
-adjustpointers(byte **scanp, BitVector *bv, AdjustInfo *adjinfo, Func *f)
-{
- uintptr delta;
- int32 num, i;
- byte *p, *minp, *maxp;
-
- minp = (byte*)adjinfo->old.lo;
- maxp = (byte*)adjinfo->old.hi;
- delta = adjinfo->delta;
- num = bv->n / BitsPerPointer;
- for(i = 0; i < num; i++) {
- if(StackDebug >= 4)
- runtime·printf(" %p:%s:%p\n", &scanp[i], mapnames[bv->bytedata[i / (8 / BitsPerPointer)] >> (i * BitsPerPointer & 7) & 3], scanp[i]);
- switch(bv->bytedata[i / (8 / BitsPerPointer)] >> (i * BitsPerPointer & 7) & 3) {
- case BitsDead:
- if(runtime·debug.gcdead)
- scanp[i] = (byte*)PoisonStack;
- break;
- case BitsScalar:
- break;
- case BitsPointer:
- p = scanp[i];
- if(f != nil && (byte*)0 < p && (p < (byte*)PageSize && runtime·invalidptr || (uintptr)p == PoisonGC || (uintptr)p == PoisonStack)) {
- // Looks like a junk value in a pointer slot.
- // Live analysis wrong?
- g->m->traceback = 2;
- runtime·printf("runtime: bad pointer in frame %s at %p: %p\n", runtime·funcname(f), &scanp[i], p);
- runtime·throw("invalid stack pointer");
- }
- if(minp <= p && p < maxp) {
- if(StackDebug >= 3)
- runtime·printf("adjust ptr %p %s\n", p, runtime·funcname(f));
- scanp[i] = p + delta;
- }
- break;
- case BitsMultiWord:
- runtime·throw("adjustpointers: unexpected garbage collection bits");
- }
- }
-}
-
-// Note: the argument/return area is adjusted by the callee.
-static bool
-adjustframe(Stkframe *frame, void *arg)
-{
- AdjustInfo *adjinfo;
- Func *f;
- StackMap *stackmap;
- int32 pcdata;
- BitVector bv;
- uintptr targetpc, size, minsize;
-
- adjinfo = arg;
- targetpc = frame->continpc;
- if(targetpc == 0) {
- // Frame is dead.
- return true;
- }
- f = frame->fn;
- if(StackDebug >= 2)
- runtime·printf(" adjusting %s frame=[%p,%p] pc=%p continpc=%p\n", runtime·funcname(f), frame->sp, frame->fp, frame->pc, frame->continpc);
- if(f->entry == (uintptr)runtime·switchtoM) {
- // A special routine at the bottom of stack of a goroutine that does an onM call.
- // We will allow it to be copied even though we don't
- // have full GC info for it (because it is written in asm).
- return true;
- }
- if(targetpc != f->entry)
- targetpc--;
- pcdata = runtime·pcdatavalue(f, PCDATA_StackMapIndex, targetpc);
- if(pcdata == -1)
- pcdata = 0; // in prologue
-
- // Adjust local variables if stack frame has been allocated.
- size = frame->varp - frame->sp;
- if(thechar != '6' && thechar != '8')
- minsize = sizeof(uintptr);
- else
- minsize = 0;
- if(size > minsize) {
- stackmap = runtime·funcdata(f, FUNCDATA_LocalsPointerMaps);
- if(stackmap == nil || stackmap->n <= 0) {
- runtime·printf("runtime: frame %s untyped locals %p+%p\n", runtime·funcname(f), (byte*)(frame->varp-size), size);
- runtime·throw("missing stackmap");
- }
- // Locals bitmap information, scan just the pointers in locals.
- if(pcdata < 0 || pcdata >= stackmap->n) {
- // don't know where we are
- runtime·printf("runtime: pcdata is %d and %d locals stack map entries for %s (targetpc=%p)\n",
- pcdata, stackmap->n, runtime·funcname(f), targetpc);
- runtime·throw("bad symbol table");
- }
- bv = runtime·stackmapdata(stackmap, pcdata);
- size = (bv.n * PtrSize) / BitsPerPointer;
- if(StackDebug >= 3)
- runtime·printf(" locals\n");
- adjustpointers((byte**)(frame->varp - size), &bv, adjinfo, f);
- }
-
- // Adjust arguments.
- if(frame->arglen > 0) {
- if(frame->argmap != nil) {
- bv = *frame->argmap;
- } else {
- stackmap = runtime·funcdata(f, FUNCDATA_ArgsPointerMaps);
- if(stackmap == nil || stackmap->n <= 0) {
- runtime·printf("runtime: frame %s untyped args %p+%p\n", runtime·funcname(f), frame->argp, (uintptr)frame->arglen);
- runtime·throw("missing stackmap");
- }
- if(pcdata < 0 || pcdata >= stackmap->n) {
- // don't know where we are
- runtime·printf("runtime: pcdata is %d and %d args stack map entries for %s (targetpc=%p)\n",
- pcdata, stackmap->n, runtime·funcname(f), targetpc);
- runtime·throw("bad symbol table");
- }
- bv = runtime·stackmapdata(stackmap, pcdata);
- }
- if(StackDebug >= 3)
- runtime·printf(" args\n");
- adjustpointers((byte**)frame->argp, &bv, adjinfo, nil);
- }
-
- return true;
-}
-
-static void
-adjustctxt(G *gp, AdjustInfo *adjinfo)
-{
- adjustpointer(adjinfo, &gp->sched.ctxt);
-}
-
-static void
-adjustdefers(G *gp, AdjustInfo *adjinfo)
-{
- Defer *d;
- bool (*cb)(Stkframe*, void*);
-
- // Adjust defer argument blocks the same way we adjust active stack frames.
- cb = adjustframe;
- runtime·tracebackdefers(gp, &cb, adjinfo);
-
- // Adjust pointers in the Defer structs.
- // Defer structs themselves are never on the stack.
- for(d = gp->defer; d != nil; d = d->link) {
- adjustpointer(adjinfo, &d->fn);
- adjustpointer(adjinfo, &d->argp);
- adjustpointer(adjinfo, &d->panic);
- }
-}
-
-static void
-adjustpanics(G *gp, AdjustInfo *adjinfo)
-{
- // Panics are on stack and already adjusted.
- // Update pointer to head of list in G.
- adjustpointer(adjinfo, &gp->panic);
-}
-
-static void
-adjustsudogs(G *gp, AdjustInfo *adjinfo)
-{
- SudoG *s;
-
- // the data elements pointed to by a SudoG structure
- // might be in the stack.
- for(s = gp->waiting; s != nil; s = s->waitlink) {
- adjustpointer(adjinfo, &s->elem);
- adjustpointer(adjinfo, &s->selectdone);
- }
-}
-
-// Copies gp's stack to a new stack of a different size.
-// Caller must have changed gp status to Gcopystack.
-static void
-copystack(G *gp, uintptr newsize)
-{
- Stack old, new;
- uintptr used;
- AdjustInfo adjinfo;
- bool (*cb)(Stkframe*, void*);
- byte *p, *ep;
-
- if(gp->syscallsp != 0)
- runtime·throw("stack growth not allowed in system call");
- old = gp->stack;
- if(old.lo == 0)
- runtime·throw("nil stackbase");
- used = old.hi - gp->sched.sp;
-
- // allocate new stack
- new = runtime·stackalloc(newsize);
- if(StackPoisonCopy) {
- p = (byte*)new.lo;
- ep = (byte*)new.hi;
- while(p < ep)
- *p++ = 0xfd;
- }
-
- if(StackDebug >= 1)
- runtime·printf("copystack gp=%p [%p %p %p]/%d -> [%p %p %p]/%d\n", gp, old.lo, old.hi-used, old.hi, (int32)(old.hi-old.lo), new.lo, new.hi-used, new.hi, (int32)newsize);
-
- // adjust pointers in the to-be-copied frames
- adjinfo.old = old;
- adjinfo.delta = new.hi - old.hi;
- cb = adjustframe;
- runtime·gentraceback(~(uintptr)0, ~(uintptr)0, 0, gp, 0, nil, 0x7fffffff, &cb, &adjinfo, 0);
-
- // adjust other miscellaneous things that have pointers into stacks.
- adjustctxt(gp, &adjinfo);
- adjustdefers(gp, &adjinfo);
- adjustpanics(gp, &adjinfo);
- adjustsudogs(gp, &adjinfo);
-
- // copy the stack to the new location
- if(StackPoisonCopy) {
- p = (byte*)new.lo;
- ep = (byte*)new.hi;
- while(p < ep)
- *p++ = 0xfb;
- }
- runtime·memmove((byte*)new.hi - used, (byte*)old.hi - used, used);
-
- // Swap out old stack for new one
- gp->stack = new;
- gp->stackguard0 = new.lo + StackGuard; // NOTE: might clobber a preempt request
- gp->sched.sp = new.hi - used;
-
- // free old stack
- if(StackPoisonCopy) {
- p = (byte*)old.lo;
- ep = (byte*)old.hi;
- while(p < ep)
- *p++ = 0xfc;
- }
- if(newsize > old.hi-old.lo) {
- // growing, free stack immediately
- runtime·stackfree(old);
- } else {
- // shrinking, queue up free operation. We can't actually free the stack
- // just yet because we might run into the following situation:
- // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
- // 2) The stack that pointer points to is shrunk
- // 3) The old stack is freed
- // 4) The containing span is marked free
- // 5) GC attempts to mark the SudoG.elem pointer. The marking fails because
- // the pointer looks like a pointer into a free span.
- // By not freeing, we prevent step #4 until GC is done.
- runtime·lock(&runtime·stackpoolmu);
- *(Stack*)old.lo = stackfreequeue;
- stackfreequeue = old;
- runtime·unlock(&runtime·stackpoolmu);
- }
-}
-
-// round x up to a power of 2.
-int32
-runtime·round2(int32 x)
-{
- int32 s;
-
- s = 0;
- while((1 << s) < x)
- s++;
- return 1 << s;
-}
-
-// Called from runtime·morestack when more stack is needed.
-// Allocate larger stack and relocate to new stack.
-// Stack growth is multiplicative, for constant amortized cost.
-//
-// g->atomicstatus will be Grunning or Gscanrunning upon entry.
-// If the GC is trying to stop this g then it will set preemptscan to true.
-void
-runtime·newstack(void)
-{
- int32 oldsize, newsize;
- uint32 oldstatus;
- uintptr sp;
- G *gp;
- Gobuf morebuf;
-
- if(g->m->morebuf.g->stackguard0 == (uintptr)StackFork)
- runtime·throw("stack growth after fork");
- if(g->m->morebuf.g != g->m->curg) {
- runtime·printf("runtime: newstack called from g=%p\n"
- "\tm=%p m->curg=%p m->g0=%p m->gsignal=%p\n",
- g->m->morebuf.g, g->m, g->m->curg, g->m->g0, g->m->gsignal);
- morebuf = g->m->morebuf;
- runtime·traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g);
- runtime·throw("runtime: wrong goroutine in newstack");
- }
- if(g->m->curg->throwsplit)
- runtime·throw("runtime: stack split at bad time");
-
- // The goroutine must be executing in order to call newstack,
- // so it must be Grunning or Gscanrunning.
-
- gp = g->m->curg;
- morebuf = g->m->morebuf;
- g->m->morebuf.pc = (uintptr)nil;
- g->m->morebuf.lr = (uintptr)nil;
- g->m->morebuf.sp = (uintptr)nil;
- g->m->morebuf.g = (G*)nil;
-
- runtime·casgstatus(gp, Grunning, Gwaiting);
- gp->waitreason = runtime·gostringnocopy((byte*)"stack growth");
-
- runtime·rewindmorestack(&gp->sched);
-
- if(gp->stack.lo == 0)
- runtime·throw("missing stack in newstack");
- sp = gp->sched.sp;
- if(thechar == '6' || thechar == '8') {
- // The call to morestack cost a word.
- sp -= sizeof(uintreg);
- }
- if(StackDebug >= 1 || sp < gp->stack.lo) {
- runtime·printf("runtime: newstack sp=%p stack=[%p, %p]\n"
- "\tmorebuf={pc:%p sp:%p lr:%p}\n"
- "\tsched={pc:%p sp:%p lr:%p ctxt:%p}\n",
- sp, gp->stack.lo, gp->stack.hi,
- g->m->morebuf.pc, g->m->morebuf.sp, g->m->morebuf.lr,
- gp->sched.pc, gp->sched.sp, gp->sched.lr, gp->sched.ctxt);
- }
- if(sp < gp->stack.lo) {
- runtime·printf("runtime: gp=%p, gp->status=%d\n ", (void*)gp, runtime·readgstatus(gp));
- runtime·printf("runtime: split stack overflow: %p < %p\n", sp, gp->stack.lo);
- runtime·throw("runtime: split stack overflow");
- }
-
- if(gp->sched.ctxt != nil) {
- // morestack wrote sched.ctxt on its way in here,
- // without a write barrier. Run the write barrier now.
- // It is not possible to be preempted between then
- // and now, so it's okay.
- runtime·writebarrierptr_nostore(&gp->sched.ctxt, gp->sched.ctxt);
- }
-
- if(gp->stackguard0 == (uintptr)StackPreempt) {
- if(gp == g->m->g0)
- runtime·throw("runtime: preempt g0");
- if(g->m->p == nil && g->m->locks == 0)
- runtime·throw("runtime: g is running but p is not");
- if(gp->preemptscan) {
- runtime·gcphasework(gp);
- runtime·casgstatus(gp, Gwaiting, Grunning);
- gp->stackguard0 = gp->stack.lo + StackGuard;
- gp->preempt = false;
- gp->preemptscan = false; // Tells the GC premption was successful.
- runtime·gogo(&gp->sched); // never return
- }
-
- // Be conservative about where we preempt.
- // We are interested in preempting user Go code, not runtime code.
- if(g->m->locks || g->m->mallocing || g->m->gcing || g->m->p->status != Prunning) {
- // Let the goroutine keep running for now.
- // gp->preempt is set, so it will be preempted next time.
- gp->stackguard0 = gp->stack.lo + StackGuard;
- runtime·casgstatus(gp, Gwaiting, Grunning);
- runtime·gogo(&gp->sched); // never return
- }
- // Act like goroutine called runtime.Gosched.
- runtime·casgstatus(gp, Gwaiting, Grunning);
- runtime·gosched_m(gp); // never return
- }
-
- // Allocate a bigger segment and move the stack.
- oldsize = gp->stack.hi - gp->stack.lo;
- newsize = oldsize * 2;
- if(newsize > runtime·maxstacksize) {
- runtime·printf("runtime: goroutine stack exceeds %D-byte limit\n", (uint64)runtime·maxstacksize);
- runtime·throw("stack overflow");
- }
-
- oldstatus = runtime·readgstatus(gp);
- oldstatus &= ~Gscan;
- runtime·casgstatus(gp, oldstatus, Gcopystack); // oldstatus is Gwaiting or Grunnable
- // The concurrent GC will not scan the stack while we are doing the copy since
- // the gp is in a Gcopystack status.
- copystack(gp, newsize);
- if(StackDebug >= 1)
- runtime·printf("stack grow done\n");
- runtime·casgstatus(gp, Gcopystack, Grunning);
- runtime·gogo(&gp->sched);
-}
-
-#pragma textflag NOSPLIT
-void
-runtime·nilfunc(void)
-{
- *(byte*)0 = 0;
-}
-
-// adjust Gobuf as if it executed a call to fn
-// and then did an immediate gosave.
-void
-runtime·gostartcallfn(Gobuf *gobuf, FuncVal *fv)
-{
- void *fn;
-
- if(fv != nil)
- fn = fv->fn;
- else
- fn = runtime·nilfunc;
- runtime·gostartcall(gobuf, fn, fv);
-}
-
-// Maybe shrink the stack being used by gp.
-// Called at garbage collection time.
-void
-runtime·shrinkstack(G *gp)
-{
- uintptr used, oldsize, newsize;
- uint32 oldstatus;
-
- if(runtime·readgstatus(gp) == Gdead) {
- if(gp->stack.lo != 0) {
- // Free whole stack - it will get reallocated
- // if G is used again.
- runtime·stackfree(gp->stack);
- gp->stack.lo = 0;
- gp->stack.hi = 0;
- }
- return;
- }
- if(gp->stack.lo == 0)
- runtime·throw("missing stack in shrinkstack");
-
- oldsize = gp->stack.hi - gp->stack.lo;
- newsize = oldsize / 2;
- if(newsize < FixedStack)
- return; // don't shrink below the minimum-sized stack
- used = gp->stack.hi - gp->sched.sp;
- if(used >= oldsize / 4)
- return; // still using at least 1/4 of the segment.
-
- // We can't copy the stack if we're in a syscall.
- // The syscall might have pointers into the stack.
- if(gp->syscallsp != 0)
- return;
-
-#ifdef GOOS_windows
- if(gp->m != nil && gp->m->libcallsp != 0)
- return;
-#endif
- if(StackDebug > 0)
- runtime·printf("shrinking stack %D->%D\n", (uint64)oldsize, (uint64)newsize);
- // This is being done in a Gscan state and was initiated by the GC so no need to move to
- // the Gcopystate.
- // The world is stopped, so the goroutine must be Gwaiting or Grunnable,
- // and what it is is not changing underfoot.
-
- oldstatus = runtime·readgstatus(gp);
- oldstatus &= ~Gscan;
- if(oldstatus != Gwaiting && oldstatus != Grunnable)
- runtime·throw("status is not Gwaiting or Grunnable");
- runtime·casgstatus(gp, oldstatus, Gcopystack);
- copystack(gp, newsize);
- runtime·casgstatus(gp, Gcopystack, oldstatus);
- }
-
-// Do any delayed stack freeing that was queued up during GC.
-void
-runtime·shrinkfinish(void)
-{
- Stack s, t;
-
- runtime·lock(&runtime·stackpoolmu);
- s = stackfreequeue;
- stackfreequeue = (Stack){0,0};
- runtime·unlock(&runtime·stackpoolmu);
- while(s.lo != 0) {
- t = *(Stack*)s.lo;
- runtime·stackfree(s);
- s = t;
- }
-}
-
-static void badc(void);
-
-#pragma textflag NOSPLIT
-void
-runtime·morestackc(void)
-{
- void (*fn)(void);
-
- fn = badc;
- runtime·onM(&fn);
-}
-
-static void
-badc(void)
-{
- runtime·throw("attempt to execute C code on Go stack");
-}
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
deleted file mode 100644
index f1b7d32d2..000000000
--- a/src/runtime/stack.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
- // Goroutine preemption request.
- // Stored into g->stackguard0 to cause split stack check failure.
- // Must be greater than any real sp.
- // 0xfffffade in hex.
- stackPreempt = ^uintptr(1313)
-)
diff --git a/src/runtime/stack.h b/src/runtime/stack.h
index f97dc4ed8..0099d05c2 100644
--- a/src/runtime/stack.h
+++ b/src/runtime/stack.h
@@ -2,117 +2,24 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-/*
-Stack layout parameters.
-Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
-
-The per-goroutine g->stackguard is set to point StackGuard bytes
-above the bottom of the stack. Each function compares its stack
-pointer against g->stackguard to check for overflow. To cut one
-instruction from the check sequence for functions with tiny frames,
-the stack is allowed to protrude StackSmall bytes below the stack
-guard. Functions with large frames don't bother with the check and
-always call morestack. The sequences are (for amd64, others are
-similar):
-
- guard = g->stackguard
- frame = function's stack frame size
- argsize = size of function arguments (call + return)
-
- stack frame size <= StackSmall:
- CMPQ guard, SP
- JHI 3(PC)
- MOVQ m->morearg, $(argsize << 32)
- CALL morestack(SB)
-
- stack frame size > StackSmall but < StackBig
- LEAQ (frame-StackSmall)(SP), R0
- CMPQ guard, R0
- JHI 3(PC)
- MOVQ m->morearg, $(argsize << 32)
- CALL morestack(SB)
-
- stack frame size >= StackBig:
- MOVQ m->morearg, $((argsize << 32) | frame)
- CALL morestack(SB)
-
-The bottom StackGuard - StackSmall bytes are important: there has
-to be enough room to execute functions that refuse to check for
-stack overflow, either because they need to be adjacent to the
-actual caller's frame (deferproc) or because they handle the imminent
-stack overflow (morestack).
-
-For example, deferproc might call malloc, which does one of the
-above checks (without allocating a full frame), which might trigger
-a call to morestack. This sequence needs to fit in the bottom
-section of the stack. On amd64, morestack's frame is 40 bytes, and
-deferproc's frame is 56 bytes. That fits well within the
-StackGuard - StackSmall bytes at the bottom.
-The linkers explore all possible call traces involving non-splitting
-functions to make sure that this limit cannot be violated.
- */
+// For the linkers. Must match Go definitions.
+// TODO(rsc): Share Go definitions with linkers directly.
enum {
- // StackSystem is a number of additional bytes to add
- // to each stack below the usual guard area for OS-specific
- // purposes like signal handling. Used on Windows and on
- // Plan 9 because they do not use a separate stack.
#ifdef GOOS_windows
StackSystem = 512 * sizeof(uintptr),
#else
#ifdef GOOS_plan9
- // The size of the note handler frame varies among architectures,
- // but 512 bytes should be enough for every implementation.
StackSystem = 512,
#else
StackSystem = 0,
#endif // Plan 9
#endif // Windows
- // The minimum size of stack used by Go code
- StackMin = 2048,
-
- // The minimum stack size to allocate.
- // The hackery here rounds FixedStack0 up to a power of 2.
- FixedStack0 = StackMin + StackSystem,
- FixedStack1 = FixedStack0 - 1,
- FixedStack2 = FixedStack1 | (FixedStack1 >> 1),
- FixedStack3 = FixedStack2 | (FixedStack2 >> 2),
- FixedStack4 = FixedStack3 | (FixedStack3 >> 4),
- FixedStack5 = FixedStack4 | (FixedStack4 >> 8),
- FixedStack6 = FixedStack5 | (FixedStack5 >> 16),
- FixedStack = FixedStack6 + 1,
-
- // Functions that need frames bigger than this use an extra
- // instruction to do the stack split check, to avoid overflow
- // in case SP - framesize wraps below zero.
- // This value can be no bigger than the size of the unmapped
- // space at zero.
StackBig = 4096,
-
- // The stack guard is a pointer this many bytes above the
- // bottom of the stack.
StackGuard = 512 + StackSystem,
-
- // After a stack split check the SP is allowed to be this
- // many bytes below the stack guard. This saves an instruction
- // in the checking sequence for tiny frames.
StackSmall = 128,
-
- // The maximum number of bytes that a chain of NOSPLIT
- // functions can use.
StackLimit = StackGuard - StackSystem - StackSmall,
};
-// Goroutine preemption request.
-// Stored into g->stackguard0 to cause split stack check failure.
-// Must be greater than any real sp.
-// 0xfffffade in hex.
#define StackPreempt ((uint64)-1314)
-/*c2go
-enum
-{
- StackPreempt = -1314,
-};
-*/
-#define StackFork ((uint64)-1234)
diff --git a/src/runtime/stack1.go b/src/runtime/stack1.go
new file mode 100644
index 000000000..963f4fa73
--- /dev/null
+++ b/src/runtime/stack1.go
@@ -0,0 +1,818 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+const (
+ // StackDebug == 0: no logging
+ // == 1: logging of per-stack operations
+ // == 2: logging of per-frame operations
+ // == 3: logging of per-word updates
+ // == 4: logging of per-word reads
+ stackDebug = 0
+ stackFromSystem = 0 // allocate stacks from system memory instead of the heap
+ stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
+ stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
+
+ stackCache = 1
+)
+
+const (
+ uintptrMask = 1<<(8*ptrSize) - 1
+ poisonGC = uintptrMask & 0xf969696969696969
+ poisonStack = uintptrMask & 0x6868686868686868
+
+ // Goroutine preemption request.
+ // Stored into g->stackguard0 to cause split stack check failure.
+ // Must be greater than any real sp.
+ // 0xfffffade in hex.
+ stackPreempt = uintptrMask & -1314
+
+ // Thread is forking.
+ // Stored into g->stackguard0 to cause split stack check failure.
+ // Must be greater than any real sp.
+ stackFork = uintptrMask & -1234
+)
+
+// Global pool of spans that have free stacks.
+// Stacks are assigned an order according to size.
+// order = log_2(size/FixedStack)
+// There is a free list for each order.
+// TODO: one lock per order?
+var stackpool [_NumStackOrders]mspan
+var stackpoolmu mutex
+
+var stackfreequeue stack
+
+func stackinit() {
+ if _StackCacheSize&_PageMask != 0 {
+ gothrow("cache size must be a multiple of page size")
+ }
+ for i := range stackpool {
+ mSpanList_Init(&stackpool[i])
+ }
+}
+
+// Allocates a stack from the free pool. Must be called with
+// stackpoolmu held.
+func stackpoolalloc(order uint8) *mlink {
+ list := &stackpool[order]
+ s := list.next
+ if s == list {
+ // no free stacks. Allocate another span worth.
+ s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
+ if s == nil {
+ gothrow("out of memory")
+ }
+ if s.ref != 0 {
+ gothrow("bad ref")
+ }
+ if s.freelist != nil {
+ gothrow("bad freelist")
+ }
+ for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
+ x := (*mlink)(unsafe.Pointer(uintptr(s.start)<<_PageShift + i))
+ x.next = s.freelist
+ s.freelist = x
+ }
+ mSpanList_Insert(list, s)
+ }
+ x := s.freelist
+ if x == nil {
+ gothrow("span has no free stacks")
+ }
+ s.freelist = x.next
+ s.ref++
+ if s.freelist == nil {
+ // all stacks in s are allocated.
+ mSpanList_Remove(s)
+ }
+ return x
+}
+
+// Adds stack x to the free pool. Must be called with stackpoolmu held.
+func stackpoolfree(x *mlink, order uint8) {
+ s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
+ if s.state != _MSpanStack {
+ gothrow("freeing stack not in a stack span")
+ }
+ if s.freelist == nil {
+ // s will now have a free stack
+ mSpanList_Insert(&stackpool[order], s)
+ }
+ x.next = s.freelist
+ s.freelist = x
+ s.ref--
+ if s.ref == 0 {
+ // span is completely free - return to heap
+ mSpanList_Remove(s)
+ s.freelist = nil
+ mHeap_FreeStack(&mheap_, s)
+ }
+}
+
+// stackcacherefill/stackcacherelease implement a global pool of stack segments.
+// The pool is required to prevent unlimited growth of per-thread caches.
+func stackcacherefill(c *mcache, order uint8) {
+ if stackDebug >= 1 {
+ print("stackcacherefill order=", order, "\n")
+ }
+
+ // Grab some stacks from the global cache.
+ // Grab half of the allowed capacity (to prevent thrashing).
+ var list *mlink
+ var size uintptr
+ lock(&stackpoolmu)
+ for size < _StackCacheSize/2 {
+ x := stackpoolalloc(order)
+ x.next = list
+ list = x
+ size += _FixedStack << order
+ }
+ unlock(&stackpoolmu)
+ c.stackcache[order].list = list
+ c.stackcache[order].size = size
+}
+
+func stackcacherelease(c *mcache, order uint8) {
+ if stackDebug >= 1 {
+ print("stackcacherelease order=", order, "\n")
+ }
+ x := c.stackcache[order].list
+ size := c.stackcache[order].size
+ lock(&stackpoolmu)
+ for size > _StackCacheSize/2 {
+ y := x.next
+ stackpoolfree(x, order)
+ x = y
+ size -= _FixedStack << order
+ }
+ unlock(&stackpoolmu)
+ c.stackcache[order].list = x
+ c.stackcache[order].size = size
+}
+
+func stackcache_clear(c *mcache) {
+ if stackDebug >= 1 {
+ print("stackcache clear\n")
+ }
+ lock(&stackpoolmu)
+ for order := uint8(0); order < _NumStackOrders; order++ {
+ x := c.stackcache[order].list
+ for x != nil {
+ y := x.next
+ stackpoolfree(x, order)
+ x = y
+ }
+ c.stackcache[order].list = nil
+ c.stackcache[order].size = 0
+ }
+ unlock(&stackpoolmu)
+}
+
+func stackalloc(n uint32) stack {
+ // Stackalloc must be called on scheduler stack, so that we
+ // never try to grow the stack during the code that stackalloc runs.
+ // Doing so would cause a deadlock (issue 1547).
+ thisg := getg()
+ if thisg != thisg.m.g0 {
+ gothrow("stackalloc not on scheduler stack")
+ }
+ if n&(n-1) != 0 {
+ gothrow("stack size not a power of 2")
+ }
+ if stackDebug >= 1 {
+ print("stackalloc ", n, "\n")
+ }
+
+ if debug.efence != 0 || stackFromSystem != 0 {
+ v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
+ if v == nil {
+ gothrow("out of memory (stackalloc)")
+ }
+ return stack{uintptr(v), uintptr(v) + uintptr(n)}
+ }
+
+ // Small stacks are allocated with a fixed-size free-list allocator.
+ // If we need a stack of a bigger size, we fall back on allocating
+ // a dedicated span.
+ var v unsafe.Pointer
+ if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
+ order := uint8(0)
+ n2 := n
+ for n2 > _FixedStack {
+ order++
+ n2 >>= 1
+ }
+ var x *mlink
+ c := thisg.m.mcache
+ if c == nil || thisg.m.gcing != 0 || thisg.m.helpgc != 0 {
+ // c == nil can happen in the guts of exitsyscall or
+ // procresize. Just get a stack from the global pool.
+ // Also don't touch stackcache during gc
+ // as it's flushed concurrently.
+ lock(&stackpoolmu)
+ x = stackpoolalloc(order)
+ unlock(&stackpoolmu)
+ } else {
+ x = c.stackcache[order].list
+ if x == nil {
+ stackcacherefill(c, order)
+ x = c.stackcache[order].list
+ }
+ c.stackcache[order].list = x.next
+ c.stackcache[order].size -= uintptr(n)
+ }
+ v = (unsafe.Pointer)(x)
+ } else {
+ s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
+ if s == nil {
+ gothrow("out of memory")
+ }
+ v = (unsafe.Pointer)(s.start << _PageShift)
+ }
+
+ if raceenabled {
+ racemalloc(v, uintptr(n))
+ }
+ if stackDebug >= 1 {
+ print(" allocated ", v, "\n")
+ }
+ return stack{uintptr(v), uintptr(v) + uintptr(n)}
+}
+
+func stackfree(stk stack) {
+ gp := getg()
+ n := stk.hi - stk.lo
+ v := (unsafe.Pointer)(stk.lo)
+ if n&(n-1) != 0 {
+ gothrow("stack not a power of 2")
+ }
+ if stackDebug >= 1 {
+ println("stackfree", v, n)
+ memclr(v, n) // for testing, clobber stack data
+ }
+ if debug.efence != 0 || stackFromSystem != 0 {
+ if debug.efence != 0 || stackFaultOnFree != 0 {
+ sysFault(v, n)
+ } else {
+ sysFree(v, n, &memstats.stacks_sys)
+ }
+ return
+ }
+ if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
+ order := uint8(0)
+ n2 := n
+ for n2 > _FixedStack {
+ order++
+ n2 >>= 1
+ }
+ x := (*mlink)(v)
+ c := gp.m.mcache
+ if c == nil || gp.m.gcing != 0 || gp.m.helpgc != 0 {
+ lock(&stackpoolmu)
+ stackpoolfree(x, order)
+ unlock(&stackpoolmu)
+ } else {
+ if c.stackcache[order].size >= _StackCacheSize {
+ stackcacherelease(c, order)
+ }
+ x.next = c.stackcache[order].list
+ c.stackcache[order].list = x
+ c.stackcache[order].size += n
+ }
+ } else {
+ s := mHeap_Lookup(&mheap_, v)
+ if s.state != _MSpanStack {
+ println(hex(s.start<<_PageShift), v)
+ gothrow("bad span state")
+ }
+ mHeap_FreeStack(&mheap_, s)
+ }
+}
+
+var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
+
+var mapnames = []string{
+ _BitsDead: "---",
+ _BitsScalar: "scalar",
+ _BitsPointer: "ptr",
+}
+
+// Stack frame layout
+//
+// (x86)
+// +------------------+
+// | args from caller |
+// +------------------+ <- frame->argp
+// | return address |
+// +------------------+ <- frame->varp
+// | locals |
+// +------------------+
+// | args to callee |
+// +------------------+ <- frame->sp
+//
+// (arm)
+// +------------------+
+// | args from caller |
+// +------------------+ <- frame->argp
+// | caller's retaddr |
+// +------------------+ <- frame->varp
+// | locals |
+// +------------------+
+// | args to callee |
+// +------------------+
+// | return address |
+// +------------------+ <- frame->sp
+
+type adjustinfo struct {
+ old stack
+ delta uintptr // ptr distance from old to new stack (newbase - oldbase)
+}
+
+// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
+// If so, it rewrites *vpp to point into the new stack.
+func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
+ pp := (*unsafe.Pointer)(vpp)
+ p := *pp
+ if stackDebug >= 4 {
+ print(" ", pp, ":", p, "\n")
+ }
+ if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
+ *pp = add(p, adjinfo.delta)
+ if stackDebug >= 3 {
+ print(" adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
+ }
+ }
+}
+
+type gobitvector struct {
+ n uintptr
+ bytedata []uint8
+}
+
+func gobv(bv bitvector) gobitvector {
+ return gobitvector{
+ uintptr(bv.n),
+ (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
+ }
+}
+
+func ptrbits(bv *gobitvector, i uintptr) uint8 {
+ return (bv.bytedata[i/4] >> ((i & 3) * 2)) & 3
+}
+
+// bv describes the memory starting at address scanp.
+// Adjust any pointers contained therein.
+func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
+ bv := gobv(*cbv)
+ minp := adjinfo.old.lo
+ maxp := adjinfo.old.hi
+ delta := adjinfo.delta
+ num := uintptr(bv.n / _BitsPerPointer)
+ for i := uintptr(0); i < num; i++ {
+ if stackDebug >= 4 {
+ print(" ", add(scanp, i*ptrSize), ":", mapnames[ptrbits(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/4], "\n")
+ }
+ switch ptrbits(&bv, i) {
+ default:
+ gothrow("unexpected pointer bits")
+ case _BitsDead:
+ if debug.gcdead != 0 {
+ *(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(uintptr(poisonStack))
+ }
+ case _BitsScalar:
+ // ok
+ case _BitsPointer:
+ p := *(*unsafe.Pointer)(add(scanp, i*ptrSize))
+ up := uintptr(p)
+ if f != nil && 0 < up && up < _PageSize && invalidptr != 0 || up == poisonGC || up == poisonStack {
+ // Looks like a junk value in a pointer slot.
+ // Live analysis wrong?
+ getg().m.traceback = 2
+ print("runtime: bad pointer in frame ", gofuncname(f), " at ", add(scanp, i*ptrSize), ": ", p, "\n")
+ gothrow("invalid stack pointer")
+ }
+ if minp <= up && up < maxp {
+ if stackDebug >= 3 {
+ print("adjust ptr ", p, " ", gofuncname(f), "\n")
+ }
+ *(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(up + delta)
+ }
+ }
+ }
+}
+
+// Note: the argument/return area is adjusted by the callee.
+func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
+ adjinfo := (*adjustinfo)(arg)
+ targetpc := frame.continpc
+ if targetpc == 0 {
+ // Frame is dead.
+ return true
+ }
+ f := frame.fn
+ if stackDebug >= 2 {
+ print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
+ }
+ if f.entry == systemstack_switchPC {
+ // A special routine at the bottom of stack of a goroutine that does an systemstack call.
+ // We will allow it to be copied even though we don't
+ // have full GC info for it (because it is written in asm).
+ return true
+ }
+ if targetpc != f.entry {
+ targetpc--
+ }
+ pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
+ if pcdata == -1 {
+ pcdata = 0 // in prologue
+ }
+
+ // Adjust local variables if stack frame has been allocated.
+ size := frame.varp - frame.sp
+ var minsize uintptr
+ if thechar != '6' && thechar != '8' {
+ minsize = ptrSize
+ } else {
+ minsize = 0
+ }
+ if size > minsize {
+ var bv bitvector
+ stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
+ if stackmap == nil || stackmap.n <= 0 {
+ print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
+ gothrow("missing stackmap")
+ }
+ // Locals bitmap information, scan just the pointers in locals.
+ if pcdata < 0 || pcdata >= stackmap.n {
+ // don't know where we are
+ print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
+ gothrow("bad symbol table")
+ }
+ bv = stackmapdata(stackmap, pcdata)
+ size = (uintptr(bv.n) * ptrSize) / _BitsPerPointer
+ if stackDebug >= 3 {
+ print(" locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n")
+ }
+ adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
+ }
+
+ // Adjust arguments.
+ if frame.arglen > 0 {
+ var bv bitvector
+ if frame.argmap != nil {
+ bv = *frame.argmap
+ } else {
+ stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
+ if stackmap == nil || stackmap.n <= 0 {
+ print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
+ gothrow("missing stackmap")
+ }
+ if pcdata < 0 || pcdata >= stackmap.n {
+ // don't know where we are
+ print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
+ gothrow("bad symbol table")
+ }
+ bv = stackmapdata(stackmap, pcdata)
+ }
+ if stackDebug >= 3 {
+ print(" args\n")
+ }
+ adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
+ }
+ return true
+}
+
+func adjustctxt(gp *g, adjinfo *adjustinfo) {
+ adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt))
+}
+
+func adjustdefers(gp *g, adjinfo *adjustinfo) {
+ // Adjust defer argument blocks the same way we adjust active stack frames.
+ tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
+
+ // Adjust pointers in the Defer structs.
+ // Defer structs themselves are never on the stack.
+ for d := gp._defer; d != nil; d = d.link {
+ adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn))
+ adjustpointer(adjinfo, (unsafe.Pointer)(&d.argp))
+ adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic))
+ }
+}
+
+func adjustpanics(gp *g, adjinfo *adjustinfo) {
+ // Panics are on stack and already adjusted.
+ // Update pointer to head of list in G.
+ adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic))
+}
+
+func adjustsudogs(gp *g, adjinfo *adjustinfo) {
+ // the data elements pointed to by a SudoG structure
+ // might be in the stack.
+ for s := gp.waiting; s != nil; s = s.waitlink {
+ adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem))
+ adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone))
+ }
+}
+
+func fillstack(stk stack, b byte) {
+ for p := stk.lo; p < stk.hi; p++ {
+ *(*byte)(unsafe.Pointer(p)) = b
+ }
+}
+
+// Copies gp's stack to a new stack of a different size.
+// Caller must have changed gp status to Gcopystack.
+func copystack(gp *g, newsize uintptr) {
+ if gp.syscallsp != 0 {
+ gothrow("stack growth not allowed in system call")
+ }
+ old := gp.stack
+ if old.lo == 0 {
+ gothrow("nil stackbase")
+ }
+ used := old.hi - gp.sched.sp
+
+ // allocate new stack
+ new := stackalloc(uint32(newsize))
+ if stackPoisonCopy != 0 {
+ fillstack(new, 0xfd)
+ }
+ if stackDebug >= 1 {
+ print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", old.hi-old.lo, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
+ }
+
+ // adjust pointers in the to-be-copied frames
+ var adjinfo adjustinfo
+ adjinfo.old = old
+ adjinfo.delta = new.hi - old.hi
+ gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
+
+ // adjust other miscellaneous things that have pointers into stacks.
+ adjustctxt(gp, &adjinfo)
+ adjustdefers(gp, &adjinfo)
+ adjustpanics(gp, &adjinfo)
+ adjustsudogs(gp, &adjinfo)
+
+ // copy the stack to the new location
+ if stackPoisonCopy != 0 {
+ fillstack(new, 0xfb)
+ }
+ memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
+
+ // Swap out old stack for new one
+ gp.stack = new
+ gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
+ gp.sched.sp = new.hi - used
+
+ // free old stack
+ if stackPoisonCopy != 0 {
+ fillstack(old, 0xfc)
+ }
+ if newsize > old.hi-old.lo {
+ // growing, free stack immediately
+ stackfree(old)
+ } else {
+ // shrinking, queue up free operation. We can't actually free the stack
+ // just yet because we might run into the following situation:
+ // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
+ // 2) The stack that pointer points to is shrunk
+ // 3) The old stack is freed
+ // 4) The containing span is marked free
+ // 5) GC attempts to mark the SudoG.elem pointer. The marking fails because
+ // the pointer looks like a pointer into a free span.
+ // By not freeing, we prevent step #4 until GC is done.
+ lock(&stackpoolmu)
+ *(*stack)(unsafe.Pointer(old.lo)) = stackfreequeue
+ stackfreequeue = old
+ unlock(&stackpoolmu)
+ }
+}
+
+// round x up to a power of 2.
+func round2(x int32) int32 {
+ s := uint(0)
+ for 1<<s < x {
+ s++
+ }
+ return 1 << s
+}
+
+// Called from runtime·morestack when more stack is needed.
+// Allocate larger stack and relocate to new stack.
+// Stack growth is multiplicative, for constant amortized cost.
+//
+// g->atomicstatus will be Grunning or Gscanrunning upon entry.
+// If the GC is trying to stop this g then it will set preemptscan to true.
+func newstack() {
+ thisg := getg()
+ // TODO: double check all gp. shouldn't be getg().
+ if thisg.m.morebuf.g.stackguard0 == stackFork {
+ gothrow("stack growth after fork")
+ }
+ if thisg.m.morebuf.g != thisg.m.curg {
+ print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
+ morebuf := thisg.m.morebuf
+ traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g)
+ gothrow("runtime: wrong goroutine in newstack")
+ }
+ if thisg.m.curg.throwsplit {
+ gp := thisg.m.curg
+ // Update syscallsp, syscallpc in case traceback uses them.
+ morebuf := thisg.m.morebuf
+ gp.syscallsp = morebuf.sp
+ gp.syscallpc = morebuf.pc
+ print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
+ "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
+ "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
+ gothrow("runtime: stack split at bad time")
+ }
+
+ // The goroutine must be executing in order to call newstack,
+ // so it must be Grunning or Gscanrunning.
+
+ gp := thisg.m.curg
+ morebuf := thisg.m.morebuf
+ thisg.m.morebuf.pc = 0
+ thisg.m.morebuf.lr = 0
+ thisg.m.morebuf.sp = 0
+ thisg.m.morebuf.g = nil
+
+ casgstatus(gp, _Grunning, _Gwaiting)
+ gp.waitreason = "stack growth"
+
+ rewindmorestack(&gp.sched)
+
+ if gp.stack.lo == 0 {
+ gothrow("missing stack in newstack")
+ }
+ sp := gp.sched.sp
+ if thechar == '6' || thechar == '8' {
+ // The call to morestack cost a word.
+ sp -= ptrSize
+ }
+ if stackDebug >= 1 || sp < gp.stack.lo {
+ print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
+ "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
+ "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
+ }
+ if sp < gp.stack.lo {
+ print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
+ print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
+ gothrow("runtime: split stack overflow")
+ }
+
+ if gp.sched.ctxt != nil {
+ // morestack wrote sched.ctxt on its way in here,
+ // without a write barrier. Run the write barrier now.
+ // It is not possible to be preempted between then
+ // and now, so it's okay.
+ writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
+ }
+
+ if gp.stackguard0 == stackPreempt {
+ if gp == thisg.m.g0 {
+ gothrow("runtime: preempt g0")
+ }
+ if thisg.m.p == nil && thisg.m.locks == 0 {
+ gothrow("runtime: g is running but p is not")
+ }
+ if gp.preemptscan {
+ gcphasework(gp)
+ casgstatus(gp, _Gwaiting, _Grunning)
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+ gp.preempt = false
+ gp.preemptscan = false // Tells the GC premption was successful.
+ gogo(&gp.sched) // never return
+ }
+
+ // Be conservative about where we preempt.
+ // We are interested in preempting user Go code, not runtime code.
+ if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning {
+ // Let the goroutine keep running for now.
+ // gp->preempt is set, so it will be preempted next time.
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+ casgstatus(gp, _Gwaiting, _Grunning)
+ gogo(&gp.sched) // never return
+ }
+
+ // Act like goroutine called runtime.Gosched.
+ casgstatus(gp, _Gwaiting, _Grunning)
+ gosched_m(gp) // never return
+ }
+
+ // Allocate a bigger segment and move the stack.
+ oldsize := int(gp.stack.hi - gp.stack.lo)
+ newsize := oldsize * 2
+ if uintptr(newsize) > maxstacksize {
+ print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
+ gothrow("stack overflow")
+ }
+
+ oldstatus := readgstatus(gp)
+ oldstatus &^= _Gscan
+ casgstatus(gp, oldstatus, _Gcopystack) // oldstatus is Gwaiting or Grunnable
+
+ // The concurrent GC will not scan the stack while we are doing the copy since
+ // the gp is in a Gcopystack status.
+ copystack(gp, uintptr(newsize))
+ if stackDebug >= 1 {
+ print("stack grow done\n")
+ }
+ casgstatus(gp, _Gcopystack, _Grunning)
+ gogo(&gp.sched)
+}
+
+//go:nosplit
+func nilfunc() {
+ *(*uint8)(nil) = 0
+}
+
+// adjust Gobuf as if it executed a call to fn
+// and then did an immediate gosave.
+func gostartcallfn(gobuf *gobuf, fv *funcval) {
+ var fn unsafe.Pointer
+ if fv != nil {
+ fn = (unsafe.Pointer)(fv.fn)
+ } else {
+ fn = unsafe.Pointer(funcPC(nilfunc))
+ }
+ gostartcall(gobuf, fn, (unsafe.Pointer)(fv))
+}
+
+// Maybe shrink the stack being used by gp.
+// Called at garbage collection time.
+func shrinkstack(gp *g) {
+ if readgstatus(gp) == _Gdead {
+ if gp.stack.lo != 0 {
+ // Free whole stack - it will get reallocated
+ // if G is used again.
+ stackfree(gp.stack)
+ gp.stack.lo = 0
+ gp.stack.hi = 0
+ }
+ return
+ }
+ if gp.stack.lo == 0 {
+ gothrow("missing stack in shrinkstack")
+ }
+
+ oldsize := gp.stack.hi - gp.stack.lo
+ newsize := oldsize / 2
+ if newsize < _FixedStack {
+ return // don't shrink below the minimum-sized stack
+ }
+ used := gp.stack.hi - gp.sched.sp
+ if used >= oldsize/4 {
+ return // still using at least 1/4 of the segment.
+ }
+
+ // We can't copy the stack if we're in a syscall.
+ // The syscall might have pointers into the stack.
+ if gp.syscallsp != 0 {
+ return
+ }
+ if _Windows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
+ return
+ }
+
+ if stackDebug > 0 {
+ print("shrinking stack ", oldsize, "->", newsize, "\n")
+ }
+
+ // This is being done in a Gscan state and was initiated by the GC so no need to move to
+ // the Gcopystate.
+ // The world is stopped, so the goroutine must be Gwaiting or Grunnable,
+ // and what it is is not changing underfoot.
+ oldstatus := readgstatus(gp) &^ _Gscan
+ if oldstatus != _Gwaiting && oldstatus != _Grunnable {
+ gothrow("status is not Gwaiting or Grunnable")
+ }
+ casgstatus(gp, oldstatus, _Gcopystack)
+ copystack(gp, newsize)
+ casgstatus(gp, _Gcopystack, oldstatus)
+}
+
+// Do any delayed stack freeing that was queued up during GC.
+func shrinkfinish() {
+ lock(&stackpoolmu)
+ s := stackfreequeue
+ stackfreequeue = stack{}
+ unlock(&stackpoolmu)
+ for s.lo != 0 {
+ t := *(*stack)(unsafe.Pointer(s.lo))
+ stackfree(s)
+ s = t
+ }
+}
+
+//go:nosplit
+func morestackc() {
+ systemstack(func() {
+ gothrow("attempt to execute C code on Go stack")
+ })
+}
diff --git a/src/runtime/stack2.go b/src/runtime/stack2.go
new file mode 100644
index 000000000..c3718c205
--- /dev/null
+++ b/src/runtime/stack2.go
@@ -0,0 +1,106 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+/*
+Stack layout parameters.
+Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
+
+The per-goroutine g->stackguard is set to point StackGuard bytes
+above the bottom of the stack. Each function compares its stack
+pointer against g->stackguard to check for overflow. To cut one
+instruction from the check sequence for functions with tiny frames,
+the stack is allowed to protrude StackSmall bytes below the stack
+guard. Functions with large frames don't bother with the check and
+always call morestack. The sequences are (for amd64, others are
+similar):
+
+ guard = g->stackguard
+ frame = function's stack frame size
+ argsize = size of function arguments (call + return)
+
+ stack frame size <= StackSmall:
+ CMPQ guard, SP
+ JHI 3(PC)
+ MOVQ m->morearg, $(argsize << 32)
+ CALL morestack(SB)
+
+ stack frame size > StackSmall but < StackBig
+ LEAQ (frame-StackSmall)(SP), R0
+ CMPQ guard, R0
+ JHI 3(PC)
+ MOVQ m->morearg, $(argsize << 32)
+ CALL morestack(SB)
+
+ stack frame size >= StackBig:
+ MOVQ m->morearg, $((argsize << 32) | frame)
+ CALL morestack(SB)
+
+The bottom StackGuard - StackSmall bytes are important: there has
+to be enough room to execute functions that refuse to check for
+stack overflow, either because they need to be adjacent to the
+actual caller's frame (deferproc) or because they handle the imminent
+stack overflow (morestack).
+
+For example, deferproc might call malloc, which does one of the
+above checks (without allocating a full frame), which might trigger
+a call to morestack. This sequence needs to fit in the bottom
+section of the stack. On amd64, morestack's frame is 40 bytes, and
+deferproc's frame is 56 bytes. That fits well within the
+StackGuard - StackSmall bytes at the bottom.
+The linkers explore all possible call traces involving non-splitting
+functions to make sure that this limit cannot be violated.
+*/
+
+const (
+ // StackSystem is a number of additional bytes to add
+ // to each stack below the usual guard area for OS-specific
+ // purposes like signal handling. Used on Windows and on
+ // Plan 9 because they do not use a separate stack.
+ _StackSystem = _Windows*512*ptrSize + _Plan9*512
+
+ // The minimum size of stack used by Go code
+ _StackMin = 2048
+
+ // The minimum stack size to allocate.
+ // The hackery here rounds FixedStack0 up to a power of 2.
+ _FixedStack0 = _StackMin + _StackSystem
+ _FixedStack1 = _FixedStack0 - 1
+ _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
+ _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
+ _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
+ _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
+ _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
+ _FixedStack = _FixedStack6 + 1
+
+ // Functions that need frames bigger than this use an extra
+ // instruction to do the stack split check, to avoid overflow
+ // in case SP - framesize wraps below zero.
+ // This value can be no bigger than the size of the unmapped
+ // space at zero.
+ _StackBig = 4096
+
+ // The stack guard is a pointer this many bytes above the
+ // bottom of the stack.
+ _StackGuard = 512 + _StackSystem
+
+ // After a stack split check the SP is allowed to be this
+ // many bytes below the stack guard. This saves an instruction
+ // in the checking sequence for tiny frames.
+ _StackSmall = 128
+
+ // The maximum number of bytes that a chain of NOSPLIT
+ // functions can use.
+ _StackLimit = _StackGuard - _StackSystem - _StackSmall
+)
+
+// Goroutine preemption request.
+// Stored into g->stackguard0 to cause split stack check failure.
+// Must be greater than any real sp.
+// 0xfffffade in hex.
+const (
+ _StackPreempt = uintptrMask & -1314
+ _StackFork = uintptrMask & -1234
+)
diff --git a/src/runtime/string.c b/src/runtime/string.c
deleted file mode 100644
index 475ea2de6..000000000
--- a/src/runtime/string.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "arch_GOARCH.h"
-#include "malloc.h"
-#include "race.h"
-#include "textflag.h"
-
-String runtime·emptystring;
-
-#pragma textflag NOSPLIT
-intgo
-runtime·findnull(byte *s)
-{
- intgo l;
-
- if(s == nil)
- return 0;
- for(l=0; s[l]!=0; l++)
- ;
- return l;
-}
-
-intgo
-runtime·findnullw(uint16 *s)
-{
- intgo l;
-
- if(s == nil)
- return 0;
- for(l=0; s[l]!=0; l++)
- ;
- return l;
-}
-
-uintptr runtime·maxstring = 256; // a hint for print
-
-#pragma textflag NOSPLIT
-String
-runtime·gostringnocopy(byte *str)
-{
- String s;
- uintptr ms;
-
- s.str = str;
- s.len = runtime·findnull(str);
- while(true) {
- ms = runtime·maxstring;
- if(s.len <= ms || runtime·casuintptr(&runtime·maxstring, ms, s.len))
- return s;
- }
-}
-
-// TODO: move this elsewhere
-enum
-{
- Bit1 = 7,
- Bitx = 6,
- Bit2 = 5,
- Bit3 = 4,
- Bit4 = 3,
- Bit5 = 2,
-
- Tx = ((1<<(Bitx+1))-1) ^ 0xFF, /* 1000 0000 */
- T2 = ((1<<(Bit2+1))-1) ^ 0xFF, /* 1100 0000 */
- T3 = ((1<<(Bit3+1))-1) ^ 0xFF, /* 1110 0000 */
- T4 = ((1<<(Bit4+1))-1) ^ 0xFF, /* 1111 0000 */
-
- Rune1 = (1<<(Bit1+0*Bitx))-1, /* 0000 0000 0111 1111 */
- Rune2 = (1<<(Bit2+1*Bitx))-1, /* 0000 0111 1111 1111 */
- Rune3 = (1<<(Bit3+2*Bitx))-1, /* 1111 1111 1111 1111 */
-
- Maskx = (1<<Bitx)-1, /* 0011 1111 */
-
- Runeerror = 0xFFFD,
-
- SurrogateMin = 0xD800,
- SurrogateMax = 0xDFFF,
-
- Runemax = 0x10FFFF, /* maximum rune value */
-};
-
-static int32
-runetochar(byte *str, int32 rune) /* note: in original, arg2 was pointer */
-{
- /* Runes are signed, so convert to unsigned for range check. */
- uint32 c;
-
- /*
- * one character sequence
- * 00000-0007F => 00-7F
- */
- c = rune;
- if(c <= Rune1) {
- str[0] = c;
- return 1;
- }
-
- /*
- * two character sequence
- * 0080-07FF => T2 Tx
- */
- if(c <= Rune2) {
- str[0] = T2 | (c >> 1*Bitx);
- str[1] = Tx | (c & Maskx);
- return 2;
- }
-
- /*
- * If the Rune is out of range or a surrogate half, convert it to the error rune.
- * Do this test here because the error rune encodes to three bytes.
- * Doing it earlier would duplicate work, since an out of range
- * Rune wouldn't have fit in one or two bytes.
- */
- if (c > Runemax)
- c = Runeerror;
- if (SurrogateMin <= c && c <= SurrogateMax)
- c = Runeerror;
-
- /*
- * three character sequence
- * 0800-FFFF => T3 Tx Tx
- */
- if (c <= Rune3) {
- str[0] = T3 | (c >> 2*Bitx);
- str[1] = Tx | ((c >> 1*Bitx) & Maskx);
- str[2] = Tx | (c & Maskx);
- return 3;
- }
-
- /*
- * four character sequence (21-bit value)
- * 10000-1FFFFF => T4 Tx Tx Tx
- */
- str[0] = T4 | (c >> 3*Bitx);
- str[1] = Tx | ((c >> 2*Bitx) & Maskx);
- str[2] = Tx | ((c >> 1*Bitx) & Maskx);
- str[3] = Tx | (c & Maskx);
- return 4;
-}
-
-String runtime·gostringsize(intgo);
-
-String
-runtime·gostringw(uint16 *str)
-{
- intgo n1, n2, i;
- byte buf[8];
- String s;
-
- n1 = 0;
- for(i=0; str[i]; i++)
- n1 += runetochar(buf, str[i]);
- s = runtime·gostringsize(n1+4);
- n2 = 0;
- for(i=0; str[i]; i++) {
- // check for race
- if(n2 >= n1)
- break;
- n2 += runetochar(s.str+n2, str[i]);
- }
- s.len = n2;
- s.str[s.len] = 0;
- return s;
-}
-
-int32
-runtime·strcmp(byte *s1, byte *s2)
-{
- uintptr i;
- byte c1, c2;
-
- for(i=0;; i++) {
- c1 = s1[i];
- c2 = s2[i];
- if(c1 < c2)
- return -1;
- if(c1 > c2)
- return +1;
- if(c1 == 0)
- return 0;
- }
-}
-
-int32
-runtime·strncmp(byte *s1, byte *s2, uintptr n)
-{
- uintptr i;
- byte c1, c2;
-
- for(i=0; i<n; i++) {
- c1 = s1[i];
- c2 = s2[i];
- if(c1 < c2)
- return -1;
- if(c1 > c2)
- return +1;
- if(c1 == 0)
- break;
- }
- return 0;
-}
-
-byte*
-runtime·strstr(byte *s1, byte *s2)
-{
- byte *sp1, *sp2;
-
- if(*s2 == 0)
- return s1;
- for(; *s1; s1++) {
- if(*s1 != *s2)
- continue;
- sp1 = s1;
- sp2 = s2;
- for(;;) {
- if(*sp2 == 0)
- return s1;
- if(*sp1++ != *sp2++)
- break;
- }
- }
- return nil;
-}
diff --git a/src/runtime/string.go b/src/runtime/string.go
index 882281605..e01bc3b84 100644
--- a/src/runtime/string.go
+++ b/src/runtime/string.go
@@ -221,7 +221,7 @@ func rawbyteslice(size int) (b []byte) {
// rawruneslice allocates a new rune slice. The rune slice is not zeroed.
func rawruneslice(size int) (b []rune) {
- if uintptr(size) > maxmem/4 {
+ if uintptr(size) > _MaxMem/4 {
gothrow("out of memory")
}
mem := goroundupsize(uintptr(size) * 4)
@@ -251,9 +251,6 @@ func gostringsize(n int) string {
return s
}
-//go:noescape
-func findnull(*byte) int
-
func gostring(p *byte) string {
l := findnull(p)
if l == 0 {
@@ -292,3 +289,12 @@ func contains(s, t string) bool {
func hasprefix(s, t string) bool {
return len(s) >= len(t) && s[:len(t)] == t
}
+
+func goatoi(s string) int {
+ n := 0
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ n = n*10 + int(s[0]) - '0'
+ s = s[1:]
+ }
+ return n
+}
diff --git a/src/runtime/string1.go b/src/runtime/string1.go
new file mode 100644
index 000000000..35cde43be
--- /dev/null
+++ b/src/runtime/string1.go
@@ -0,0 +1,108 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+//go:nosplit
+func findnull(s *byte) int {
+ if s == nil {
+ return 0
+ }
+ p := (*[_MaxMem/2 - 1]byte)(unsafe.Pointer(s))
+ l := 0
+ for p[l] != 0 {
+ l++
+ }
+ return l
+}
+
+func findnullw(s *uint16) int {
+ if s == nil {
+ return 0
+ }
+ p := (*[_MaxMem/2/2 - 1]uint16)(unsafe.Pointer(s))
+ l := 0
+ for p[l] != 0 {
+ l++
+ }
+ return l
+}
+
+var maxstring uintptr = 256 // a hint for print
+
+//go:nosplit
+func gostringnocopy(str *byte) string {
+ var s string
+ sp := (*stringStruct)(unsafe.Pointer(&s))
+ sp.str = unsafe.Pointer(str)
+ sp.len = findnull(str)
+ for {
+ ms := maxstring
+ if uintptr(len(s)) <= ms || casuintptr(&maxstring, ms, uintptr(len(s))) {
+ break
+ }
+ }
+ return s
+}
+
+func gostringw(strw *uint16) string {
+ var buf [8]byte
+ str := (*[_MaxMem/2/2 - 1]uint16)(unsafe.Pointer(strw))
+ n1 := 0
+ for i := 0; str[i] != 0; i++ {
+ n1 += runetochar(buf[:], rune(str[i]))
+ }
+ s, b := rawstring(n1 + 4)
+ n2 := 0
+ for i := 0; str[i] != 0; i++ {
+ // check for race
+ if n2 >= n1 {
+ break
+ }
+ n2 += runetochar(b[n2:], rune(str[i]))
+ }
+ b[n2] = 0 // for luck
+ return s[:n2]
+}
+
+func strcmp(s1, s2 *byte) int32 {
+ p1 := (*[_MaxMem/2 - 1]byte)(unsafe.Pointer(s1))
+ p2 := (*[_MaxMem/2 - 1]byte)(unsafe.Pointer(s2))
+
+ for i := uintptr(0); ; i++ {
+ c1 := p1[i]
+ c2 := p2[i]
+ if c1 < c2 {
+ return -1
+ }
+ if c1 > c2 {
+ return +1
+ }
+ if c1 == 0 {
+ return 0
+ }
+ }
+}
+
+func strncmp(s1, s2 *byte, n uintptr) int32 {
+ p1 := (*[_MaxMem/2 - 1]byte)(unsafe.Pointer(s1))
+ p2 := (*[_MaxMem/2 - 1]byte)(unsafe.Pointer(s2))
+
+ for i := uintptr(0); i < n; i++ {
+ c1 := p1[i]
+ c2 := p2[i]
+ if c1 < c2 {
+ return -1
+ }
+ if c1 > c2 {
+ return +1
+ }
+ if c1 == 0 {
+ break
+ }
+ }
+ return 0
+}
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 9889567d6..217307a1e 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -23,12 +23,7 @@ func roundup(p unsafe.Pointer, n uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(p) + delta)
}
-// in runtime.c
func getg() *g
-func acquirem() *m
-func releasem(mp *m)
-func gomcache() *mcache
-func readgstatus(*g) uint32 // proc.c
// mcall switches from the g to the g0 stack and invokes fn(g),
// where g is the goroutine that made the call.
@@ -43,95 +38,30 @@ func readgstatus(*g) uint32 // proc.c
//go:noescape
func mcall(fn func(*g))
-// onM switches from the g to the g0 stack and invokes fn().
-// When fn returns, onM switches back to the g and returns,
-// continuing execution on the g stack.
-// If arguments must be passed to fn, they can be written to
-// g->m->ptrarg (pointers) and g->m->scalararg (non-pointers)
-// before the call and then consulted during fn.
-// Similarly, fn can pass return values back in those locations.
-// If fn is written in Go, it can be a closure, which avoids the need for
-// ptrarg and scalararg entirely.
-// After reading values out of ptrarg and scalararg it is conventional
-// to zero them to avoid (memory or information) leaks.
+// systemstack runs fn on a system stack.
+// If systemstack is called from the per-OS-thread (g0) stack, or
+// if systemstack is called from the signal handling (gsignal) stack,
+// systemstack calls fn directly and returns.
+// Otherwise, systemstack is being called from the limited stack
+// of an ordinary goroutine. In this case, systemstack switches
+// to the per-OS-thread stack, calls fn, and switches back.
+// It is common to use a func literal as the argument, in order
+// to share inputs and outputs with the code around the call
+// to system stack:
//
-// If onM is called from a g0 stack, it invokes fn and returns,
-// without any stack switches.
-//
-// If onM is called from a gsignal stack, it crashes the program.
-// The implication is that functions used in signal handlers must
-// not use onM.
-//
-// NOTE(rsc): We could introduce a separate onMsignal that is
-// like onM but if called from a gsignal stack would just run fn on
-// that stack. The caller of onMsignal would be required to save the
-// old values of ptrarg/scalararg and restore them when the call
-// was finished, in case the signal interrupted an onM sequence
-// in progress on the g or g0 stacks. Until there is a clear need for this,
-// we just reject onM in signal handling contexts entirely.
-//
-//go:noescape
-func onM(fn func())
-
-// onMsignal is like onM but is allowed to be used in code that
-// might run on the gsignal stack. Code running on a signal stack
-// may be interrupting an onM sequence on the main stack, so
-// if the onMsignal calling sequence writes to ptrarg/scalararg,
-// it must first save the old values and then restore them when
-// finished. As an exception to the rule, it is fine not to save and
-// restore the values if the program is trying to crash rather than
-// return from the signal handler.
-// Once all the runtime is written in Go, there will be no ptrarg/scalararg
-// and the distinction between onM and onMsignal (and perhaps mcall)
-// can go away.
-//
-// If onMsignal is called from a gsignal stack, it invokes fn directly,
-// without a stack switch. Otherwise onMsignal behaves like onM.
+// ... set up y ...
+// systemstack(func() {
+// x = bigcall(y)
+// })
+// ... use x ...
//
//go:noescape
-func onM_signalok(fn func())
+func systemstack(fn func())
-func badonm() {
- gothrow("onM called from signal goroutine")
+func badsystemstack() {
+ gothrow("systemstack called from unexpected goroutine")
}
-// C functions that run on the M stack.
-// Call using mcall.
-func gosched_m(*g)
-func park_m(*g)
-func recovery_m(*g)
-
-// More C functions that run on the M stack.
-// Call using onM.
-func mcacheRefill_m()
-func largeAlloc_m()
-func gc_m()
-func gcscan_m()
-func gcmark_m()
-func gccheckmark_m()
-func gccheckmarkenable_m()
-func gccheckmarkdisable_m()
-func gcinstallmarkwb_m()
-func gcinstalloffwb_m()
-func gcmarknewobject_m()
-func gcmarkwb_m()
-func finishsweep_m()
-func scavenge_m()
-func setFinalizer_m()
-func removeFinalizer_m()
-func markallocated_m()
-func unrollgcprog_m()
-func unrollgcproginplace_m()
-func setgcpercent_m()
-func setmaxthreads_m()
-func ready_m()
-func deferproc_m()
-func goexit_m()
-func startpanic_m()
-func dopanic_m()
-func readmemstats_m()
-func writeheapdump_m()
-
// memclr clears n bytes starting at ptr.
// in memclr_*.s
//go:noescape
@@ -142,12 +72,6 @@ func memclr(ptr unsafe.Pointer, n uintptr)
//go:noescape
func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
-func starttheworld()
-func stoptheworld()
-func newextram()
-func lockOSThread()
-func unlockOSThread()
-
// exported value for testing
var hashLoad = loadFactor
@@ -169,16 +93,9 @@ func noescape(p unsafe.Pointer) unsafe.Pointer {
return unsafe.Pointer(x ^ 0)
}
-func entersyscall()
-func reentersyscall(pc uintptr, sp unsafe.Pointer)
-func entersyscallblock()
-func exitsyscall()
-
func cgocallback(fn, frame unsafe.Pointer, framesize uintptr)
func gogo(buf *gobuf)
func gosave(buf *gobuf)
-func read(fd int32, p unsafe.Pointer, n int32) int32
-func close(fd int32) int32
func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
//go:noescape
@@ -186,33 +103,32 @@ func jmpdefer(fv *funcval, argp uintptr)
func exit1(code int32)
func asminit()
func setg(gg *g)
-func exit(code int32)
func breakpoint()
-func nanotime() int64
-func usleep(usec uint32)
-
-// careful: cputicks is not guaranteed to be monotonic! In particular, we have
-// noticed drift between cpus on certain os/arch combinations. See issue 8976.
-func cputicks() int64
-func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
-func munmap(addr unsafe.Pointer, n uintptr)
-func madvise(addr unsafe.Pointer, n uintptr, flags int32)
func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32)
-func osyield()
func procyield(cycles uint32)
func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr)
-func readgogc() int32
-func purgecachedstats(c *mcache)
-func gostringnocopy(b *byte) string
func goexit()
//go:noescape
-func write(fd uintptr, p unsafe.Pointer, n int32) int32
-
-//go:noescape
func cas(ptr *uint32, old, new uint32) bool
+// casp cannot have a go:noescape annotation, because
+// while ptr and old do not escape, new does. If new is marked as
+// not escaping, the compiler will make incorrect escape analysis
+// decisions about the value being xchg'ed.
+// Instead, make casp a wrapper around the actual atomic.
+// When calling the wrapper we mark ptr as noescape explicitly.
+
+//go:nosplit
+func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
+ return casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new)
+}
+
+func casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
+
+func nop() // call to prevent inlining of function body
+
//go:noescape
func casuintptr(ptr *uintptr, old, new uintptr) bool
@@ -268,18 +184,10 @@ func asmcgocall(fn, arg unsafe.Pointer)
//go:noescape
func asmcgocall_errno(fn, arg unsafe.Pointer) int32
-//go:noescape
-func open(name *byte, mode, perm int32) int32
-
-//go:noescape
-func gotraceback(*bool) int32
-
+// argp used in Defer structs when there is no argp.
const _NoArgs = ^uintptr(0)
-func newstack()
-func newproc()
func morestack()
-func mstart()
func rt0_go()
// return0 is a stub used to return 0 from deferproc.
@@ -321,3 +229,5 @@ func call134217728(fn, arg unsafe.Pointer, n, retoffset uint32)
func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32)
func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32)
func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32)
+
+func systemstack_switch()
diff --git a/src/runtime/stubs2.go b/src/runtime/stubs2.go
new file mode 100644
index 000000000..526b3c569
--- /dev/null
+++ b/src/runtime/stubs2.go
@@ -0,0 +1,27 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !solaris
+
+package runtime
+
+import "unsafe"
+
+func read(fd int32, p unsafe.Pointer, n int32) int32
+func close(fd int32) int32
+
+func exit(code int32)
+func nanotime() int64
+func usleep(usec uint32)
+
+func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
+func munmap(addr unsafe.Pointer, n uintptr)
+
+//go:noescape
+func write(fd uintptr, p unsafe.Pointer, n int32) int32
+
+//go:noescape
+func open(name *byte, mode, perm int32) int32
+
+func madvise(addr unsafe.Pointer, n uintptr, flags int32)
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index 45d107b77..749a289cd 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -22,8 +22,7 @@ func (f *Func) raw() *_func {
// funcdata.h
const (
- _PCDATA_ArgSize = 0
- _PCDATA_StackMapIndex = 1
+ _PCDATA_StackMapIndex = 0
_FUNCDATA_ArgsPointerMaps = 0
_FUNCDATA_LocalsPointerMaps = 1
_FUNCDATA_DeadValueMaps = 2
@@ -122,8 +121,8 @@ func (f *Func) Entry() uintptr {
func (f *Func) FileLine(pc uintptr) (file string, line int) {
// Pass strict=false here, because anyone can call this function,
// and they might just be wrong about targetpc belonging to f.
- line = int(funcline1(f.raw(), pc, &file, false))
- return file, line
+ file, line32 := funcline1(f.raw(), pc, false)
+ return file, int(line32)
}
func findfunc(pc uintptr) *_func {
@@ -208,20 +207,19 @@ func gofuncname(f *_func) string {
return gostringnocopy(funcname(f))
}
-func funcline1(f *_func, targetpc uintptr, file *string, strict bool) int32 {
- *file = "?"
+func funcline1(f *_func, targetpc uintptr, strict bool) (file string, line int32) {
fileno := int(pcvalue(f, f.pcfile, targetpc, strict))
- line := pcvalue(f, f.pcln, targetpc, strict)
+ line = pcvalue(f, f.pcln, targetpc, strict)
if fileno == -1 || line == -1 || fileno >= len(filetab) {
// print("looking for ", hex(targetpc), " in ", gofuncname(f), " got file=", fileno, " line=", lineno, "\n")
- return 0
+ return "?", 0
}
- *file = gostringnocopy(&pclntable[filetab[fileno]])
- return line
+ file = gostringnocopy(&pclntable[filetab[fileno]])
+ return
}
-func funcline(f *_func, targetpc uintptr, file *string) int32 {
- return funcline1(f, targetpc, file, true)
+func funcline(f *_func, targetpc uintptr) (file string, line int32) {
+ return funcline1(f, targetpc, true)
}
func funcspdelta(f *_func, targetpc uintptr) int32 {
diff --git a/src/runtime/sys_arm.c b/src/runtime/sys_arm.c
deleted file mode 100644
index a65560e5b..000000000
--- a/src/runtime/sys_arm.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-
-// adjust Gobuf as if it executed a call to fn with context ctxt
-// and then did an immediate Gosave.
-void
-runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt)
-{
- if(gobuf->lr != 0)
- runtime·throw("invalid use of gostartcall");
- gobuf->lr = gobuf->pc;
- gobuf->pc = (uintptr)fn;
- gobuf->ctxt = ctxt;
-}
-
-// Called to rewind context saved during morestack back to beginning of function.
-// To help us, the linker emits a jmp back to the beginning right after the
-// call to morestack. We just have to decode and apply that jump.
-void
-runtime·rewindmorestack(Gobuf *gobuf)
-{
- uint32 inst;
-
- inst = *(uint32*)gobuf->pc;
- if((gobuf->pc&3) == 0 && (inst>>24) == 0x9a) {
- //runtime·printf("runtime: rewind pc=%p to pc=%p\n", gobuf->pc, gobuf->pc + ((int32)(inst<<8)>>6) + 8);
- gobuf->pc += ((int32)(inst<<8)>>6) + 8;
- return;
- }
- runtime·printf("runtime: pc=%p %x\n", gobuf->pc, inst);
- runtime·throw("runtime: misuse of rewindmorestack");
-}
diff --git a/src/runtime/sys_arm.go b/src/runtime/sys_arm.go
new file mode 100644
index 000000000..81777c710
--- /dev/null
+++ b/src/runtime/sys_arm.go
@@ -0,0 +1,35 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// adjust Gobuf as if it executed a call to fn with context ctxt
+// and then did an immediate Gosave.
+func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
+ if buf.lr != 0 {
+ gothrow("invalid use of gostartcall")
+ }
+ buf.lr = buf.pc
+ buf.pc = uintptr(fn)
+ buf.ctxt = ctxt
+}
+
+// Called to rewind context saved during morestack back to beginning of function.
+// To help us, the linker emits a jmp back to the beginning right after the
+// call to morestack. We just have to decode and apply that jump.
+func rewindmorestack(buf *gobuf) {
+ var inst uint32
+ if buf.pc&3 == 0 && buf.pc != 0 {
+ inst = *(*uint32)(unsafe.Pointer(buf.pc))
+ if inst>>24 == 0x9a {
+ buf.pc += uintptr(int32(inst<<8)>>6) + 8
+ return
+ }
+ }
+
+ print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
+ gothrow("runtime: misuse of rewindmorestack")
+}
diff --git a/src/runtime/sys_darwin_386.s b/src/runtime/sys_darwin_386.s
index 3bf8b1d41..7cb5695e7 100644
--- a/src/runtime/sys_darwin_386.s
+++ b/src/runtime/sys_darwin_386.s
@@ -6,7 +6,8 @@
// See http://fxr.watson.org/fxr/source/bsd/kern/syscalls.c?v=xnu-1228
// or /usr/include/sys/syscall.h (on a Mac) for system call numbers.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// Exit the entire program (like C exit)
diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s
index 8a8928e06..0a955f982 100644
--- a/src/runtime/sys_darwin_amd64.s
+++ b/src/runtime/sys_darwin_amd64.s
@@ -11,7 +11,8 @@
// The high 8 bits specify the kind of system call: 1=Mach, 2=BSD, 3=Machine-Dependent.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// Exit the entire program (like C exit)
diff --git a/src/runtime/sys_dragonfly_386.s b/src/runtime/sys_dragonfly_386.s
index 71ece9ecb..bb4903ef6 100644
--- a/src/runtime/sys_dragonfly_386.s
+++ b/src/runtime/sys_dragonfly_386.s
@@ -6,7 +6,8 @@
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
TEXT runtime·sys_umtx_sleep(SB),NOSPLIT,$-4
diff --git a/src/runtime/sys_dragonfly_amd64.s b/src/runtime/sys_dragonfly_amd64.s
index 2c756018c..db07ed703 100644
--- a/src/runtime/sys_dragonfly_amd64.s
+++ b/src/runtime/sys_dragonfly_amd64.s
@@ -6,7 +6,8 @@
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
TEXT runtime·sys_umtx_sleep(SB),NOSPLIT,$0
diff --git a/src/runtime/sys_freebsd_386.s b/src/runtime/sys_freebsd_386.s
index 66d03c27d..d1f67c3fc 100644
--- a/src/runtime/sys_freebsd_386.s
+++ b/src/runtime/sys_freebsd_386.s
@@ -6,7 +6,8 @@
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
TEXT runtime·sys_umtx_op(SB),NOSPLIT,$-4
diff --git a/src/runtime/sys_freebsd_amd64.s b/src/runtime/sys_freebsd_amd64.s
index 65f8c1a6e..84cee51d8 100644
--- a/src/runtime/sys_freebsd_amd64.s
+++ b/src/runtime/sys_freebsd_amd64.s
@@ -6,7 +6,8 @@
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// FreeBSD 8, FreeBSD 9, and older versions that I have checked
diff --git a/src/runtime/sys_freebsd_arm.s b/src/runtime/sys_freebsd_arm.s
index d875138b6..198b427bf 100644
--- a/src/runtime/sys_freebsd_arm.s
+++ b/src/runtime/sys_freebsd_arm.s
@@ -6,7 +6,8 @@
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// for EABI, as we don't support OABI
@@ -362,7 +363,7 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SWI $0
RET
-TEXT runtime·casp(SB),NOSPLIT,$0
+TEXT runtime·casp1(SB),NOSPLIT,$0
B runtime·cas(SB)
// TODO(minux): this is only valid for ARMv6+
diff --git a/src/runtime/sys_linux_386.s b/src/runtime/sys_linux_386.s
index 0f6d4bbb5..1861f237f 100644
--- a/src/runtime/sys_linux_386.s
+++ b/src/runtime/sys_linux_386.s
@@ -6,7 +6,8 @@
// System calls and other sys.stuff for 386, Linux
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
TEXT runtime·exit(SB),NOSPLIT,$0
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index d8d86ffad..6d4dfdbd2 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -6,7 +6,8 @@
// System calls and other sys.stuff for AMD64, Linux
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
TEXT runtime·exit(SB),NOSPLIT,$0-4
diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s
index 033a03642..21d97fda9 100644
--- a/src/runtime/sys_linux_arm.s
+++ b/src/runtime/sys_linux_arm.s
@@ -6,7 +6,8 @@
// System calls and other sys.stuff for arm, Linux
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// for EABI, as we don't support OABI
@@ -391,7 +392,7 @@ check:
MOVB R0, ret+12(FP)
RET
-TEXT runtime·casp(SB),NOSPLIT,$0
+TEXT runtime·casp1(SB),NOSPLIT,$0
B runtime·cas(SB)
TEXT runtime·osyield(SB),NOSPLIT,$0
diff --git a/src/runtime/sys_nacl_386.s b/src/runtime/sys_nacl_386.s
index 16cd721d9..85c8175b1 100644
--- a/src/runtime/sys_nacl_386.s
+++ b/src/runtime/sys_nacl_386.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
#include "syscall_nacl.h"
diff --git a/src/runtime/sys_nacl_amd64p32.s b/src/runtime/sys_nacl_amd64p32.s
index 9cfbef6ef..f5624ca8d 100644
--- a/src/runtime/sys_nacl_amd64p32.s
+++ b/src/runtime/sys_nacl_amd64p32.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
#include "syscall_nacl.h"
diff --git a/src/runtime/sys_nacl_arm.s b/src/runtime/sys_nacl_arm.s
index 432deadf4..ded95a86b 100644
--- a/src/runtime/sys_nacl_arm.s
+++ b/src/runtime/sys_nacl_arm.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
#include "syscall_nacl.h"
@@ -300,7 +301,7 @@ nog:
TEXT runtime·nacl_sysinfo(SB),NOSPLIT,$16
RET
-TEXT runtime·casp(SB),NOSPLIT,$0
+TEXT runtime·casp1(SB),NOSPLIT,$0
B runtime·cas(SB)
// This is only valid for ARMv6+, however, NaCl/ARM is only defined
diff --git a/src/runtime/sys_netbsd_386.s b/src/runtime/sys_netbsd_386.s
index 23f2f6bd1..509d6d4a8 100644
--- a/src/runtime/sys_netbsd_386.s
+++ b/src/runtime/sys_netbsd_386.s
@@ -6,7 +6,8 @@
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// Exit the entire program (like C exit)
diff --git a/src/runtime/sys_netbsd_amd64.s b/src/runtime/sys_netbsd_amd64.s
index eb9766d3f..e26d60667 100644
--- a/src/runtime/sys_netbsd_amd64.s
+++ b/src/runtime/sys_netbsd_amd64.s
@@ -6,7 +6,8 @@
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// int32 lwp_create(void *context, uintptr flags, void *lwpid)
diff --git a/src/runtime/sys_netbsd_arm.s b/src/runtime/sys_netbsd_arm.s
index 039a0832e..fa9bc577a 100644
--- a/src/runtime/sys_netbsd_arm.s
+++ b/src/runtime/sys_netbsd_arm.s
@@ -6,7 +6,8 @@
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// Exit the entire program (like C exit)
@@ -330,7 +331,7 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SWI $0xa0005c // sys_fcntl
RET
-TEXT runtime·casp(SB),NOSPLIT,$0
+TEXT runtime·casp1(SB),NOSPLIT,$0
B runtime·cas(SB)
// TODO(minux): this is only valid for ARMv6+
diff --git a/src/runtime/sys_openbsd_386.s b/src/runtime/sys_openbsd_386.s
index b1ae5ecee..93907577e 100644
--- a/src/runtime/sys_openbsd_386.s
+++ b/src/runtime/sys_openbsd_386.s
@@ -6,7 +6,8 @@
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
#define CLOCK_MONOTONIC $3
diff --git a/src/runtime/sys_openbsd_amd64.s b/src/runtime/sys_openbsd_amd64.s
index 4e9db2390..9dc0fb685 100644
--- a/src/runtime/sys_openbsd_amd64.s
+++ b/src/runtime/sys_openbsd_amd64.s
@@ -6,7 +6,8 @@
// /usr/src/sys/kern/syscalls.master for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
#define CLOCK_MONOTONIC $3
diff --git a/src/runtime/sys_plan9_386.s b/src/runtime/sys_plan9_386.s
index a41b56258..b9db8cbf1 100644
--- a/src/runtime/sys_plan9_386.s
+++ b/src/runtime/sys_plan9_386.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// setldt(int entry, int address, int limit)
diff --git a/src/runtime/sys_plan9_amd64.s b/src/runtime/sys_plan9_amd64.s
index 3a96c2bf9..02c7c8743 100644
--- a/src/runtime/sys_plan9_amd64.s
+++ b/src/runtime/sys_plan9_amd64.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// setldt(int entry, int address, int limit)
diff --git a/src/runtime/sys_solaris_amd64.s b/src/runtime/sys_solaris_amd64.s
index 3981893b0..54aeaeaf5 100644
--- a/src/runtime/sys_solaris_amd64.s
+++ b/src/runtime/sys_solaris_amd64.s
@@ -6,14 +6,15 @@
// /usr/include/sys/syscall.h for syscall numbers.
//
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// This is needed by asm_amd64.s
TEXT runtime·settls(SB),NOSPLIT,$8
RET
-// void libc·miniterrno(void *(*___errno)(void));
+// void libc_miniterrno(void *(*___errno)(void));
//
// Set the TLS errno pointer in M.
//
@@ -40,7 +41,7 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$0
SUBQ $64, SP // 16 bytes will do, but who knows in the future?
MOVQ $3, DI // CLOCK_REALTIME from <sys/time_impl.h>
MOVQ SP, SI
- MOVQ libc·clock_gettime(SB), AX
+ MOVQ libc_clock_gettime(SB), AX
CALL AX
MOVQ (SP), AX // tv_sec from struct timespec
IMULQ $1000000000, AX // multiply into nanoseconds
@@ -53,7 +54,7 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$0
TEXT runtime·pipe1(SB),NOSPLIT,$0
SUBQ $16, SP // 8 bytes will do, but stack has to be 16-byte alligned
MOVQ SP, DI
- MOVQ libc·pipe(SB), AX
+ MOVQ libc_pipe(SB), AX
CALL AX
MOVL 0(SP), AX
MOVL 4(SP), DX
@@ -132,7 +133,7 @@ TEXT runtime·tstart_sysvicall(SB),NOSPLIT,$0
MOVQ AX, (g_stack+stack_hi)(DX)
SUBQ $(0x100000), AX // stack size
MOVQ AX, (g_stack+stack_lo)(DX)
- ADDQ $const_StackGuard, AX
+ ADDQ $const__StackGuard, AX
MOVQ AX, g_stackguard0(DX)
MOVQ AX, g_stackguard1(DX)
@@ -320,13 +321,13 @@ noswitch:
// Runs on OS stack. duration (in µs units) is in DI.
TEXT runtime·usleep2(SB),NOSPLIT,$0
- MOVQ libc·usleep(SB), AX
+ MOVQ libc_usleep(SB), AX
CALL AX
RET
// Runs on OS stack, called from runtime·osyield.
TEXT runtime·osyield1(SB),NOSPLIT,$0
- MOVQ libc·sched_yield(SB), AX
+ MOVQ libc_sched_yield(SB), AX
CALL AX
RET
diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s
index 13fb5bdc9..2793e5221 100644
--- a/src/runtime/sys_windows_386.s
+++ b/src/runtime/sys_windows_386.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// void runtime·asmstdcall(void *c);
diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s
index 8b95f6d6c..5e5c2e7f5 100644
--- a/src/runtime/sys_windows_amd64.s
+++ b/src/runtime/sys_windows_amd64.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
// maxargs should be divisible by 2, as Windows stack
diff --git a/src/runtime/sys_x86.c b/src/runtime/sys_x86.go
index edbe47ff4..086af8ff1 100644
--- a/src/runtime/sys_x86.c
+++ b/src/runtime/sys_x86.go
@@ -4,55 +4,51 @@
// +build amd64 amd64p32 386
-#include "runtime.h"
+package runtime
+
+import "unsafe"
// adjust Gobuf as it if executed a call to fn with context ctxt
// and then did an immediate gosave.
-void
-runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt)
-{
- uintptr *sp;
-
- sp = (uintptr*)gobuf->sp;
- if(sizeof(uintreg) > sizeof(uintptr))
- *--sp = 0;
- *--sp = (uintptr)gobuf->pc;
- gobuf->sp = (uintptr)sp;
- gobuf->pc = (uintptr)fn;
- gobuf->ctxt = ctxt;
- runtime·writebarrierptr_nostore(&gobuf->ctxt, ctxt);
+func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
+ sp := buf.sp
+ if regSize > ptrSize {
+ sp -= ptrSize
+ *(*uintptr)(unsafe.Pointer(sp)) = 0
+ }
+ sp -= ptrSize
+ *(*uintptr)(unsafe.Pointer(sp)) = buf.pc
+ buf.sp = sp
+ buf.pc = uintptr(fn)
+ buf.ctxt = ctxt
}
// Called to rewind context saved during morestack back to beginning of function.
// To help us, the linker emits a jmp back to the beginning right after the
// call to morestack. We just have to decode and apply that jump.
-void
-runtime·rewindmorestack(Gobuf *gobuf)
-{
- byte *pc;
-
- pc = (byte*)gobuf->pc;
- if(pc[0] == 0xe9) { // jmp 4-byte offset
- gobuf->pc = gobuf->pc + 5 + *(int32*)(pc+1);
- return;
+func rewindmorestack(buf *gobuf) {
+ pc := (*[8]byte)(unsafe.Pointer(buf.pc))
+ if pc[0] == 0xe9 { // jmp 4-byte offset
+ buf.pc = buf.pc + 5 + uintptr(int64(*(*int32)(unsafe.Pointer(&pc[1]))))
+ return
}
- if(pc[0] == 0xeb) { // jmp 1-byte offset
- gobuf->pc = gobuf->pc + 2 + *(int8*)(pc+1);
- return;
+ if pc[0] == 0xeb { // jmp 1-byte offset
+ buf.pc = buf.pc + 2 + uintptr(int64(*(*int8)(unsafe.Pointer(&pc[1]))))
+ return
}
- if(pc[0] == 0xcc) {
+ if pc[0] == 0xcc {
// This is a breakpoint inserted by gdb. We could use
// runtime·findfunc to find the function. But if we
// do that, then we will continue execution at the
// function entry point, and we will not hit the gdb
// breakpoint. So for this case we don't change
- // gobuf->pc, so that when we return we will execute
+ // buf.pc, so that when we return we will execute
// the jump instruction and carry on. This means that
// stack unwinding may not work entirely correctly
// (http://golang.org/issue/5723) but the user is
// running under gdb anyhow.
- return;
+ return
}
- runtime·printf("runtime: pc=%p %x %x %x %x %x\n", pc, pc[0], pc[1], pc[2], pc[3], pc[4]);
- runtime·throw("runtime: misuse of rewindmorestack");
+ print("runtime: pc=", pc, " ", hex(pc[0]), " ", hex(pc[1]), " ", hex(pc[2]), " ", hex(pc[3]), " ", hex(pc[4]), "\n")
+ gothrow("runtime: misuse of rewindmorestack")
}
diff --git a/src/runtime/syscall2_solaris.go b/src/runtime/syscall2_solaris.go
new file mode 100644
index 000000000..f4ffa7410
--- /dev/null
+++ b/src/runtime/syscall2_solaris.go
@@ -0,0 +1,47 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import _ "unsafe"
+
+//go:cgo_import_dynamic libc_chdir chdir "libc.so"
+//go:cgo_import_dynamic libc_chroot chroot "libc.so"
+//go:cgo_import_dynamic libc_close close "libc.so"
+//go:cgo_import_dynamic libc_dlclose dlclose "libc.so"
+//go:cgo_import_dynamic libc_dlopen dlopen "libc.so"
+//go:cgo_import_dynamic libc_dlsym dlsym "libc.so"
+//go:cgo_import_dynamic libc_execve execve "libc.so"
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+//go:cgo_import_dynamic libc_gethostname gethostname "libc.so"
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+//go:cgo_import_dynamic libc_pipe pipe "libc.so"
+//go:cgo_import_dynamic libc_setgid setgid "libc.so"
+//go:cgo_import_dynamic libc_setgroups setgroups "libc.so"
+//go:cgo_import_dynamic libc_setsid setsid "libc.so"
+//go:cgo_import_dynamic libc_setuid setuid "libc.so"
+//go:cgo_import_dynamic libc_setpgid setsid "libc.so"
+//go:cgo_import_dynamic libc_syscall syscall "libc.so"
+//go:cgo_import_dynamic libc_forkx forkx "libc.so"
+//go:cgo_import_dynamic libc_wait4 wait4 "libc.so"
+
+//go:linkname libc_chdir libc_chdir
+//go:linkname libc_chroot libc_chroot
+//go:linkname libc_close libc_close
+//go:linkname libc_dlclose libc_dlclose
+//go:linkname libc_dlopen libc_dlopen
+//go:linkname libc_dlsym libc_dlsym
+//go:linkname libc_execve libc_execve
+//go:linkname libc_fcntl libc_fcntl
+//go:linkname libc_gethostname libc_gethostname
+//go:linkname libc_ioctl libc_ioctl
+//go:linkname libc_pipe libc_pipe
+//go:linkname libc_setgid libc_setgid
+//go:linkname libc_setgroups libc_setgroups
+//go:linkname libc_setsid libc_setsid
+//go:linkname libc_setuid libc_setuid
+//go:linkname libc_setpgid libc_setpgid
+//go:linkname libc_syscall libc_syscall
+//go:linkname libc_forkx libc_forkx
+//go:linkname libc_wait4 libc_wait4
diff --git a/src/runtime/syscall_solaris.c b/src/runtime/syscall_solaris.c
deleted file mode 100644
index 13ac31bde..000000000
--- a/src/runtime/syscall_solaris.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#pragma dynimport libc·chdir chdir "libc.so"
-#pragma dynimport libc·chroot chroot "libc.so"
-#pragma dynimport libc·close close "libc.so"
-#pragma dynimport libc·dlclose dlclose "libc.so"
-#pragma dynimport libc·dlopen dlopen "libc.so"
-#pragma dynimport libc·dlsym dlsym "libc.so"
-#pragma dynimport libc·execve execve "libc.so"
-#pragma dynimport libc·fcntl fcntl "libc.so"
-#pragma dynimport libc·gethostname gethostname "libc.so"
-#pragma dynimport libc·ioctl ioctl "libc.so"
-#pragma dynimport libc·pipe pipe "libc.so"
-#pragma dynimport libc·setgid setgid "libc.so"
-#pragma dynimport libc·setgroups setgroups "libc.so"
-#pragma dynimport libc·setsid setsid "libc.so"
-#pragma dynimport libc·setuid setuid "libc.so"
-#pragma dynimport libc·setpgid setsid "libc.so"
-#pragma dynimport libc·syscall syscall "libc.so"
-#pragma dynimport libc·forkx forkx "libc.so"
-#pragma dynimport libc·wait4 wait4 "libc.so"
diff --git a/src/runtime/syscall_solaris.go b/src/runtime/syscall_solaris.go
index 50d3a1d36..9b9971674 100644
--- a/src/runtime/syscall_solaris.go
+++ b/src/runtime/syscall_solaris.go
@@ -9,12 +9,10 @@ import "unsafe"
var (
libc_chdir,
libc_chroot,
- libc_close,
libc_dlopen,
libc_dlclose,
libc_dlsym,
libc_execve,
- libc_exit,
libc_fcntl,
libc_forkx,
libc_gethostname,
@@ -27,7 +25,6 @@ var (
libc_setpgid,
libc_syscall,
libc_wait4,
- libc_write,
pipe1 libcFunc
)
@@ -38,9 +35,9 @@ func syscall_sysvicall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err
n: nargs,
args: uintptr(unsafe.Pointer(&a1)),
}
- entersyscallblock()
+ entersyscallblock(0)
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&call))
- exitsyscall()
+ exitsyscall(0)
return call.r1, call.r2, call.err
}
@@ -62,7 +59,7 @@ func syscall_rawsysvicall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, e
//go:nosplit
func syscall_chdir(path uintptr) (err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_chdir)),
+ fn: uintptr(unsafe.Pointer(libc_chdir)),
n: 1,
args: uintptr(unsafe.Pointer(&path)),
}
@@ -73,7 +70,7 @@ func syscall_chdir(path uintptr) (err uintptr) {
//go:nosplit
func syscall_chroot(path uintptr) (err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_chroot)),
+ fn: uintptr(unsafe.Pointer(libc_chroot)),
n: 1,
args: uintptr(unsafe.Pointer(&path)),
}
@@ -84,18 +81,18 @@ func syscall_chroot(path uintptr) (err uintptr) {
// like close, but must not split stack, for forkx.
//go:nosplit
func syscall_close(fd int32) int32 {
- return int32(sysvicall1(&libc_close, uintptr(fd)))
+ return int32(sysvicall1(libc_close, uintptr(fd)))
}
func syscall_dlopen(name *byte, mode uintptr) (handle uintptr, err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_dlopen)),
+ fn: uintptr(unsafe.Pointer(libc_dlopen)),
n: 2,
args: uintptr(unsafe.Pointer(&name)),
}
- entersyscallblock()
+ entersyscallblock(0)
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&call))
- exitsyscall()
+ exitsyscall(0)
if call.r1 == 0 {
return call.r1, call.err
}
@@ -104,25 +101,25 @@ func syscall_dlopen(name *byte, mode uintptr) (handle uintptr, err uintptr) {
func syscall_dlclose(handle uintptr) (err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_dlclose)),
+ fn: uintptr(unsafe.Pointer(libc_dlclose)),
n: 1,
args: uintptr(unsafe.Pointer(&handle)),
}
- entersyscallblock()
+ entersyscallblock(0)
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&call))
- exitsyscall()
+ exitsyscall(0)
return call.r1
}
func syscall_dlsym(handle uintptr, name *byte) (proc uintptr, err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_dlsym)),
+ fn: uintptr(unsafe.Pointer(libc_dlsym)),
n: 2,
args: uintptr(unsafe.Pointer(&handle)),
}
- entersyscallblock()
+ entersyscallblock(0)
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&call))
- exitsyscall()
+ exitsyscall(0)
if call.r1 == 0 {
return call.r1, call.err
}
@@ -132,7 +129,7 @@ func syscall_dlsym(handle uintptr, name *byte) (proc uintptr, err uintptr) {
//go:nosplit
func syscall_execve(path, argv, envp uintptr) (err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_execve)),
+ fn: uintptr(unsafe.Pointer(libc_execve)),
n: 3,
args: uintptr(unsafe.Pointer(&path)),
}
@@ -143,13 +140,13 @@ func syscall_execve(path, argv, envp uintptr) (err uintptr) {
// like exit, but must not split stack, for forkx.
//go:nosplit
func syscall_exit(code uintptr) {
- sysvicall1(&libc_exit, code)
+ sysvicall1(libc_exit, code)
}
//go:nosplit
func syscall_fcntl(fd, cmd, arg uintptr) (val, err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_fcntl)),
+ fn: uintptr(unsafe.Pointer(libc_fcntl)),
n: 3,
args: uintptr(unsafe.Pointer(&fd)),
}
@@ -160,7 +157,7 @@ func syscall_fcntl(fd, cmd, arg uintptr) (val, err uintptr) {
//go:nosplit
func syscall_forkx(flags uintptr) (pid uintptr, err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_forkx)),
+ fn: uintptr(unsafe.Pointer(libc_forkx)),
n: 1,
args: uintptr(unsafe.Pointer(&flags)),
}
@@ -172,13 +169,13 @@ func syscall_gethostname() (name string, err uintptr) {
cname := new([_MAXHOSTNAMELEN]byte)
var args = [2]uintptr{uintptr(unsafe.Pointer(&cname[0])), _MAXHOSTNAMELEN}
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_gethostname)),
+ fn: uintptr(unsafe.Pointer(libc_gethostname)),
n: 2,
args: uintptr(unsafe.Pointer(&args[0])),
}
- entersyscallblock()
+ entersyscallblock(0)
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&call))
- exitsyscall()
+ exitsyscall(0)
if call.r1 != 0 {
return "", call.err
}
@@ -189,7 +186,7 @@ func syscall_gethostname() (name string, err uintptr) {
//go:nosplit
func syscall_ioctl(fd, req, arg uintptr) (err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_ioctl)),
+ fn: uintptr(unsafe.Pointer(libc_ioctl)),
n: 3,
args: uintptr(unsafe.Pointer(&fd)),
}
@@ -203,9 +200,9 @@ func syscall_pipe() (r, w, err uintptr) {
n: 0,
args: uintptr(unsafe.Pointer(&pipe1)), // it's unused but must be non-nil, otherwise crashes
}
- entersyscallblock()
+ entersyscallblock(0)
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&call))
- exitsyscall()
+ exitsyscall(0)
return call.r1, call.r2, call.err
}
@@ -217,7 +214,7 @@ func syscall_pipe() (r, w, err uintptr) {
// TODO(aram): make this panic once we stop calling fcntl(2) in net using it.
func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_syscall)),
+ fn: uintptr(unsafe.Pointer(libc_syscall)),
n: 4,
args: uintptr(unsafe.Pointer(&trap)),
}
@@ -228,7 +225,7 @@ func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
//go:nosplit
func syscall_setgid(gid uintptr) (err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_setgid)),
+ fn: uintptr(unsafe.Pointer(libc_setgid)),
n: 1,
args: uintptr(unsafe.Pointer(&gid)),
}
@@ -239,7 +236,7 @@ func syscall_setgid(gid uintptr) (err uintptr) {
//go:nosplit
func syscall_setgroups(ngid, gid uintptr) (err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_setgroups)),
+ fn: uintptr(unsafe.Pointer(libc_setgroups)),
n: 2,
args: uintptr(unsafe.Pointer(&ngid)),
}
@@ -250,9 +247,9 @@ func syscall_setgroups(ngid, gid uintptr) (err uintptr) {
//go:nosplit
func syscall_setsid() (pid, err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_setsid)),
+ fn: uintptr(unsafe.Pointer(libc_setsid)),
n: 0,
- args: uintptr(unsafe.Pointer(&libc_setsid)), // it's unused but must be non-nil, otherwise crashes
+ args: uintptr(unsafe.Pointer(libc_setsid)), // it's unused but must be non-nil, otherwise crashes
}
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&call))
return call.r1, call.err
@@ -261,7 +258,7 @@ func syscall_setsid() (pid, err uintptr) {
//go:nosplit
func syscall_setuid(uid uintptr) (err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_setuid)),
+ fn: uintptr(unsafe.Pointer(libc_setuid)),
n: 1,
args: uintptr(unsafe.Pointer(&uid)),
}
@@ -272,7 +269,7 @@ func syscall_setuid(uid uintptr) (err uintptr) {
//go:nosplit
func syscall_setpgid(pid, pgid uintptr) (err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_setpgid)),
+ fn: uintptr(unsafe.Pointer(libc_setpgid)),
n: 2,
args: uintptr(unsafe.Pointer(&pid)),
}
@@ -288,32 +285,32 @@ func syscall_setpgid(pid, pgid uintptr) (err uintptr) {
// TODO(aram): make this panic once we stop calling fcntl(2) in net using it.
func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_syscall)),
+ fn: uintptr(unsafe.Pointer(libc_syscall)),
n: 4,
args: uintptr(unsafe.Pointer(&trap)),
}
- entersyscallblock()
+ entersyscallblock(0)
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&call))
- exitsyscall()
+ exitsyscall(0)
return call.r1, call.r2, call.err
}
func syscall_wait4(pid uintptr, wstatus *uint32, options uintptr, rusage unsafe.Pointer) (wpid int, err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_wait4)),
+ fn: uintptr(unsafe.Pointer(libc_wait4)),
n: 4,
args: uintptr(unsafe.Pointer(&pid)),
}
- entersyscallblock()
+ entersyscallblock(0)
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&call))
- exitsyscall()
+ exitsyscall(0)
return int(call.r1), call.err
}
//go:nosplit
func syscall_write(fd, buf, nbyte uintptr) (n, err uintptr) {
call := libcall{
- fn: uintptr(unsafe.Pointer(&libc_write)),
+ fn: uintptr(unsafe.Pointer(libc_write)),
n: 3,
args: uintptr(unsafe.Pointer(&fd)),
}
diff --git a/src/runtime/thunk.s b/src/runtime/thunk.s
index 1a5b65502..241dd90af 100644
--- a/src/runtime/thunk.s
+++ b/src/runtime/thunk.s
@@ -4,7 +4,6 @@
// This file exposes various internal runtime functions to other packages in std lib.
-#include "zasm_GOOS_GOARCH.h"
#include "textflag.h"
#ifdef GOARCH_arm
@@ -187,3 +186,18 @@ TEXT syscall·runtime_envs(SB),NOSPLIT,$0-0
TEXT os·runtime_args(SB),NOSPLIT,$0-0
JMP runtime·runtime_args(SB)
+
+TEXT sync·runtime_procUnpin(SB),NOSPLIT,$0-0
+ JMP runtime·sync_procUnpin(SB)
+
+TEXT sync·runtime_procPin(SB),NOSPLIT,$0-0
+ JMP runtime·sync_procPin(SB)
+
+TEXT syscall·runtime_BeforeFork(SB),NOSPLIT,$0-0
+ JMP runtime·syscall_BeforeFork(SB)
+
+TEXT syscall·runtime_AfterFork(SB),NOSPLIT,$0-0
+ JMP runtime·syscall_AfterFork(SB)
+
+TEXT reflect·typelinks(SB),NOSPLIT,$0-0
+ JMP runtime·typelinks(SB)
diff --git a/src/runtime/thunk_solaris_amd64.s b/src/runtime/thunk_solaris_amd64.s
deleted file mode 100644
index f61188c14..000000000
--- a/src/runtime/thunk_solaris_amd64.s
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file exposes various external library functions to Go code in the runtime.
-
-#include "zasm_GOOS_GOARCH.h"
-#include "textflag.h"
-
-TEXT runtime·libc_chdir(SB),NOSPLIT,$0
- MOVQ libc·chdir(SB), AX
- JMP AX
-
-TEXT runtime·libc_chroot(SB),NOSPLIT,$0
- MOVQ libc·chroot(SB), AX
- JMP AX
-
-TEXT runtime·libc_close(SB),NOSPLIT,$0
- MOVQ libc·close(SB), AX
- JMP AX
-
-TEXT runtime·libc_dlopen(SB),NOSPLIT,$0
- MOVQ libc·dlopen(SB), AX
- JMP AX
-
-TEXT runtime·libc_dlclose(SB),NOSPLIT,$0
- MOVQ libc·dlclose(SB), AX
- JMP AX
-
-TEXT runtime·libc_dlsym(SB),NOSPLIT,$0
- MOVQ libc·dlsym(SB), AX
- JMP AX
-
-TEXT runtime·libc_execve(SB),NOSPLIT,$0
- MOVQ libc·execve(SB), AX
- JMP AX
-
-TEXT runtime·libc_exit(SB),NOSPLIT,$0
- MOVQ libc·exit(SB), AX
- JMP AX
-
-TEXT runtime·libc_fcntl(SB),NOSPLIT,$0
- MOVQ libc·fcntl(SB), AX
- JMP AX
-
-TEXT runtime·libc_forkx(SB),NOSPLIT,$0
- MOVQ libc·forkx(SB), AX
- JMP AX
-
-TEXT runtime·libc_gethostname(SB),NOSPLIT,$0
- MOVQ libc·gethostname(SB), AX
- JMP AX
-
-TEXT runtime·libc_ioctl(SB),NOSPLIT,$0
- MOVQ libc·ioctl(SB), AX
- JMP AX
-
-TEXT runtime·libc_setgid(SB),NOSPLIT,$0
- MOVQ libc·setgid(SB), AX
- JMP AX
-
-TEXT runtime·libc_setgroups(SB),NOSPLIT,$0
- MOVQ libc·setgroups(SB), AX
- JMP AX
-
-TEXT runtime·libc_setsid(SB),NOSPLIT,$0
- MOVQ libc·setsid(SB), AX
- JMP AX
-
-TEXT runtime·libc_setuid(SB),NOSPLIT,$0
- MOVQ libc·setuid(SB), AX
- JMP AX
-
-TEXT runtime·libc_setpgid(SB),NOSPLIT,$0
- MOVQ libc·setpgid(SB), AX
- JMP AX
-
-TEXT runtime·libc_syscall(SB),NOSPLIT,$0
- MOVQ libc·syscall(SB), AX
- JMP AX
-
-TEXT runtime·libc_wait4(SB),NOSPLIT,$0
- MOVQ libc·wait4(SB), AX
- JMP AX
-
-TEXT runtime·libc_write(SB),NOSPLIT,$0
- MOVQ libc·write(SB), AX
- JMP AX
diff --git a/src/runtime/thunk_windows.s b/src/runtime/thunk_windows.s
index 7ccb98fd4..b1d5d5753 100644
--- a/src/runtime/thunk_windows.s
+++ b/src/runtime/thunk_windows.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
TEXT syscall·Syscall(SB),NOSPLIT,$0-0
diff --git a/src/runtime/tls_arm.s b/src/runtime/tls_arm.s
index 85c3940bf..7c5c0e215 100644
--- a/src/runtime/tls_arm.s
+++ b/src/runtime/tls_arm.s
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "funcdata.h"
#include "textflag.h"
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 1c6ce6e64..e1cc9123f 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -32,15 +32,16 @@ const usesLR = GOARCH != "amd64" && GOARCH != "amd64p32" && GOARCH != "386"
var (
// initialized in tracebackinit
- deferprocPC uintptr
- goexitPC uintptr
- jmpdeferPC uintptr
- mcallPC uintptr
- morestackPC uintptr
- mstartPC uintptr
- newprocPC uintptr
- rt0_goPC uintptr
- sigpanicPC uintptr
+ deferprocPC uintptr
+ goexitPC uintptr
+ jmpdeferPC uintptr
+ mcallPC uintptr
+ morestackPC uintptr
+ mstartPC uintptr
+ newprocPC uintptr
+ rt0_goPC uintptr
+ sigpanicPC uintptr
+ systemstack_switchPC uintptr
externalthreadhandlerp uintptr // initialized elsewhere
)
@@ -59,6 +60,7 @@ func tracebackinit() {
newprocPC = funcPC(newproc)
rt0_goPC = funcPC(rt0_go)
sigpanicPC = funcPC(sigpanic)
+ systemstack_switchPC = funcPC(systemstack_switch)
}
// Traceback over the deferred function calls.
@@ -335,8 +337,7 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
print(hex(argp[i]))
}
print(")\n")
- var file string
- line := funcline(f, tracepc, &file)
+ file, line := funcline(f, tracepc)
print("\t", file, ":", line)
if frame.pc > f.entry {
print(" +", hex(frame.pc-f.entry))
@@ -480,8 +481,7 @@ func printcreatedby(gp *g) {
if pc > f.entry {
tracepc -= _PCQuantum
}
- var file string
- line := funcline(f, tracepc, &file)
+ file, line := funcline(f, tracepc)
print("\t", file, ":", line)
if pc > f.entry {
print(" +", hex(pc-f.entry))
@@ -528,7 +528,7 @@ func callers(skip int, pcbuf *uintptr, m int) int {
sp := getcallersp(unsafe.Pointer(&skip))
pc := uintptr(getcallerpc(unsafe.Pointer(&skip)))
var n int
- onM(func() {
+ systemstack(func() {
n = gentraceback(pc, sp, 0, getg(), skip, pcbuf, m, nil, nil, 0)
})
return n
diff --git a/src/runtime/type.go b/src/runtime/type.go
new file mode 100644
index 000000000..cbd5c9ebc
--- /dev/null
+++ b/src/runtime/type.go
@@ -0,0 +1,99 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Runtime _type representation.
+
+package runtime
+
+import "unsafe"
+
+// Needs to be in sync with ../../cmd/ld/decodesym.c:/^commonsize and pkg/reflect/type.go:/type.
+type _type struct {
+ size uintptr
+ hash uint32
+ _unused uint8
+ align uint8
+ fieldalign uint8
+ kind uint8
+ alg unsafe.Pointer
+ // gc stores _type info required for garbage collector.
+ // If (kind&KindGCProg)==0, then gc[0] points at sparse GC bitmap
+ // (no indirection), 4 bits per word.
+ // If (kind&KindGCProg)!=0, then gc[1] points to a compiler-generated
+ // read-only GC program; and gc[0] points to BSS space for sparse GC bitmap.
+ // For huge _types (>MaxGCMask), runtime unrolls the program directly into
+ // GC bitmap and gc[0] is not used. For moderately-sized _types, runtime
+ // unrolls the program into gc[0] space on first use. The first byte of gc[0]
+ // (gc[0][0]) contains 'unroll' flag saying whether the program is already
+ // unrolled into gc[0] or not.
+ gc [2]uintptr
+ _string *string
+ x *uncommontype
+ ptrto *_type
+ zero *byte // ptr to the zero value for this _type
+}
+
+type method struct {
+ name *string
+ pkgpath *string
+ mtyp *_type
+ typ *_type
+ ifn unsafe.Pointer
+ tfn unsafe.Pointer
+}
+
+type uncommontype struct {
+ name *string
+ pkgpath *string
+ mhdr []method
+ m [0]method
+}
+
+type imethod struct {
+ name *string
+ pkgpath *string
+ _type *_type
+}
+
+type interfacetype struct {
+ typ _type
+ mhdr []imethod
+ m [0]imethod
+}
+
+type maptype struct {
+ typ _type
+ key *_type
+ elem *_type
+ bucket *_type // internal _type representing a hash bucket
+ hmap *_type // internal _type representing a hmap
+ keysize uint8 // size of key slot
+ indirectkey bool // store ptr to key instead of key itself
+ valuesize uint8 // size of value slot
+ indirectvalue bool // store ptr to value instead of value itself
+ bucketsize uint16 // size of bucket
+}
+
+type chantype struct {
+ typ _type
+ elem *_type
+ dir uintptr
+}
+
+type slicetype struct {
+ typ _type
+ elem *_type
+}
+
+type functype struct {
+ typ _type
+ dotdotdot bool
+ in slice
+ out slice
+}
+
+type ptrtype struct {
+ typ _type
+ elem *_type
+}
diff --git a/src/runtime/type.h b/src/runtime/type.h
deleted file mode 100644
index f5b4f9d13..000000000
--- a/src/runtime/type.h
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Runtime type representation.
-
-typedef struct Type Type;
-typedef struct UncommonType UncommonType;
-typedef struct InterfaceType InterfaceType;
-typedef struct Method Method;
-typedef struct IMethod IMethod;
-typedef struct SliceType SliceType;
-typedef struct FuncType FuncType;
-
-// Needs to be in sync with ../../cmd/ld/decodesym.c:/^commonsize and pkg/reflect/type.go:/type.
-struct Type
-{
- uintptr size;
- uint32 hash;
- uint8 _unused;
- uint8 align;
- uint8 fieldAlign;
- uint8 kind;
- void* alg;
- // gc stores type info required for garbage collector.
- // If (kind&KindGCProg)==0, then gc[0] points at sparse GC bitmap
- // (no indirection), 4 bits per word.
- // If (kind&KindGCProg)!=0, then gc[1] points to a compiler-generated
- // read-only GC program; and gc[0] points to BSS space for sparse GC bitmap.
- // For huge types (>MaxGCMask), runtime unrolls the program directly into
- // GC bitmap and gc[0] is not used. For moderately-sized types, runtime
- // unrolls the program into gc[0] space on first use. The first byte of gc[0]
- // (gc[0][0]) contains 'unroll' flag saying whether the program is already
- // unrolled into gc[0] or not.
- uintptr gc[2];
- String *string;
- UncommonType *x;
- Type *ptrto;
- byte *zero; // ptr to the zero value for this type
-};
-
-struct Method
-{
- String *name;
- String *pkgPath;
- Type *mtyp;
- Type *typ;
- void (*ifn)(void);
- void (*tfn)(void);
-};
-
-struct UncommonType
-{
- String *name;
- String *pkgPath;
- Slice mhdr;
- Method m[];
-};
-
-struct IMethod
-{
- String *name;
- String *pkgPath;
- Type *type;
-};
-
-struct InterfaceType
-{
- Type typ;
- Slice mhdr;
- IMethod m[];
-};
-
-struct MapType
-{
- Type typ;
- Type *key;
- Type *elem;
- Type *bucket; // internal type representing a hash bucket
- Type *hmap; // internal type representing a Hmap
- uint8 keysize; // size of key slot
- bool indirectkey; // store ptr to key instead of key itself
- uint8 valuesize; // size of value slot
- bool indirectvalue; // store ptr to value instead of value itself
- uint16 bucketsize; // size of bucket
-};
-
-struct ChanType
-{
- Type typ;
- Type *elem;
- uintptr dir;
-};
-
-struct SliceType
-{
- Type typ;
- Type *elem;
-};
-
-struct FuncType
-{
- Type typ;
- bool dotdotdot;
- Slice in;
- Slice out;
-};
-
-struct PtrType
-{
- Type typ;
- Type *elem;
-};
diff --git a/src/runtime/typekind.h b/src/runtime/typekind.h
index e0fe177bb..39cd45c2e 100644
--- a/src/runtime/typekind.h
+++ b/src/runtime/typekind.h
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// Must match runtime and reflect.
+// Included by cmd/gc.
+
enum {
KindBool = 1,
KindInt,
@@ -30,9 +33,8 @@ enum {
KindStruct,
KindUnsafePointer,
- KindDirectIface = 1<<5,
- KindGCProg = 1<<6, // Type.gc points to GC program
- KindNoPointers = 1<<7,
- KindMask = (1<<5)-1,
+ KindDirectIface = 1 << 5,
+ KindGCProg = 1 << 6, // Type.gc points to GC program
+ KindNoPointers = 1 << 7,
+ KindMask = (1 << 5) - 1,
};
-
diff --git a/src/runtime/typekind1.go b/src/runtime/typekind1.go
new file mode 100644
index 000000000..73028d6f4
--- /dev/null
+++ b/src/runtime/typekind1.go
@@ -0,0 +1,39 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _KindBool = 1 + iota
+ _KindInt
+ _KindInt8
+ _KindInt16
+ _KindInt32
+ _KindInt64
+ _KindUint
+ _KindUint8
+ _KindUint16
+ _KindUint32
+ _KindUint64
+ _KindUintptr
+ _KindFloat32
+ _KindFloat64
+ _KindComplex64
+ _KindComplex128
+ _KindArray
+ _KindChan
+ _KindFunc
+ _KindInterface
+ _KindMap
+ _KindPtr
+ _KindSlice
+ _KindString
+ _KindStruct
+ _KindUnsafePointer
+
+ _KindDirectIface = 1 << 5
+ _KindGCProg = 1 << 6 // Type.gc points to GC program
+ _KindNoPointers = 1 << 7
+ _KindMask = (1 << 5) - 1
+)
diff --git a/src/runtime/vdso_linux_amd64.c b/src/runtime/vdso_linux_amd64.c
deleted file mode 100644
index 681340c5b..000000000
--- a/src/runtime/vdso_linux_amd64.c
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "textflag.h"
-
-// Look up symbols in the Linux vDSO.
-
-// This code was originally based on the sample Linux vDSO parser at
-// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/vDSO/parse_vdso.c
-
-// This implements the ELF dynamic linking spec at
-// http://sco.com/developers/gabi/latest/ch5.dynamic.html
-
-// The version section is documented at
-// http://refspecs.linuxfoundation.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic/symversion.html
-
-#define AT_RANDOM 25
-#define AT_SYSINFO_EHDR 33
-#define AT_NULL 0 /* End of vector */
-#define PT_LOAD 1 /* Loadable program segment */
-#define PT_DYNAMIC 2 /* Dynamic linking information */
-#define DT_NULL 0 /* Marks end of dynamic section */
-#define DT_HASH 4 /* Dynamic symbol hash table */
-#define DT_STRTAB 5 /* Address of string table */
-#define DT_SYMTAB 6 /* Address of symbol table */
-#define DT_VERSYM 0x6ffffff0
-#define DT_VERDEF 0x6ffffffc
-
-#define VER_FLG_BASE 0x1 /* Version definition of file itself */
-#define SHN_UNDEF 0 /* Undefined section */
-#define SHT_DYNSYM 11 /* Dynamic linker symbol table */
-#define STT_FUNC 2 /* Symbol is a code object */
-#define STB_GLOBAL 1 /* Global symbol */
-#define STB_WEAK 2 /* Weak symbol */
-
-/* How to extract and insert information held in the st_info field. */
-#define ELF64_ST_BIND(val) (((byte) (val)) >> 4)
-#define ELF64_ST_TYPE(val) ((val) & 0xf)
-
-#define EI_NIDENT (16)
-
-typedef uint16 Elf64_Half;
-typedef uint32 Elf64_Word;
-typedef int32 Elf64_Sword;
-typedef uint64 Elf64_Xword;
-typedef int64 Elf64_Sxword;
-typedef uint64 Elf64_Addr;
-typedef uint64 Elf64_Off;
-typedef uint16 Elf64_Section;
-typedef Elf64_Half Elf64_Versym;
-
-
-typedef struct Elf64_Sym
-{
- Elf64_Word st_name;
- byte st_info;
- byte st_other;
- Elf64_Section st_shndx;
- Elf64_Addr st_value;
- Elf64_Xword st_size;
-} Elf64_Sym;
-
-typedef struct Elf64_Verdef
-{
- Elf64_Half vd_version; /* Version revision */
- Elf64_Half vd_flags; /* Version information */
- Elf64_Half vd_ndx; /* Version Index */
- Elf64_Half vd_cnt; /* Number of associated aux entries */
- Elf64_Word vd_hash; /* Version name hash value */
- Elf64_Word vd_aux; /* Offset in bytes to verdaux array */
- Elf64_Word vd_next; /* Offset in bytes to next verdef entry */
-} Elf64_Verdef;
-
-typedef struct Elf64_Ehdr
-{
- byte e_ident[EI_NIDENT]; /* Magic number and other info */
- Elf64_Half e_type; /* Object file type */
- Elf64_Half e_machine; /* Architecture */
- Elf64_Word e_version; /* Object file version */
- Elf64_Addr e_entry; /* Entry point virtual address */
- Elf64_Off e_phoff; /* Program header table file offset */
- Elf64_Off e_shoff; /* Section header table file offset */
- Elf64_Word e_flags; /* Processor-specific flags */
- Elf64_Half e_ehsize; /* ELF header size in bytes */
- Elf64_Half e_phentsize; /* Program header table entry size */
- Elf64_Half e_phnum; /* Program header table entry count */
- Elf64_Half e_shentsize; /* Section header table entry size */
- Elf64_Half e_shnum; /* Section header table entry count */
- Elf64_Half e_shstrndx; /* Section header string table index */
-} Elf64_Ehdr;
-
-typedef struct Elf64_Phdr
-{
- Elf64_Word p_type; /* Segment type */
- Elf64_Word p_flags; /* Segment flags */
- Elf64_Off p_offset; /* Segment file offset */
- Elf64_Addr p_vaddr; /* Segment virtual address */
- Elf64_Addr p_paddr; /* Segment physical address */
- Elf64_Xword p_filesz; /* Segment size in file */
- Elf64_Xword p_memsz; /* Segment size in memory */
- Elf64_Xword p_align; /* Segment alignment */
-} Elf64_Phdr;
-
-typedef struct Elf64_Shdr
-{
- Elf64_Word sh_name; /* Section name (string tbl index) */
- Elf64_Word sh_type; /* Section type */
- Elf64_Xword sh_flags; /* Section flags */
- Elf64_Addr sh_addr; /* Section virtual addr at execution */
- Elf64_Off sh_offset; /* Section file offset */
- Elf64_Xword sh_size; /* Section size in bytes */
- Elf64_Word sh_link; /* Link to another section */
- Elf64_Word sh_info; /* Additional section information */
- Elf64_Xword sh_addralign; /* Section alignment */
- Elf64_Xword sh_entsize; /* Entry size if section holds table */
-} Elf64_Shdr;
-
-typedef struct Elf64_Dyn
-{
- Elf64_Sxword d_tag; /* Dynamic entry type */
- union
- {
- Elf64_Xword d_val; /* Integer value */
- Elf64_Addr d_ptr; /* Address value */
- } d_un;
-} Elf64_Dyn;
-
-typedef struct Elf64_Verdaux
-{
- Elf64_Word vda_name; /* Version or dependency names */
- Elf64_Word vda_next; /* Offset in bytes to next verdaux entry */
-} Elf64_Verdaux;
-
-typedef struct Elf64_auxv_t
-{
- uint64 a_type; /* Entry type */
- union
- {
- uint64 a_val; /* Integer value */
- } a_un;
-} Elf64_auxv_t;
-
-
-typedef struct symbol_key {
- byte* name;
- int32 sym_hash;
- void** var_ptr;
-} symbol_key;
-
-typedef struct version_key {
- byte* version;
- int32 ver_hash;
-} version_key;
-
-struct vdso_info {
- bool valid;
-
- /* Load information */
- uintptr load_addr;
- uintptr load_offset; /* load_addr - recorded vaddr */
-
- /* Symbol table */
- Elf64_Sym *symtab;
- const byte *symstrings;
- Elf64_Word *bucket, *chain;
- Elf64_Word nbucket, nchain;
-
- /* Version table */
- Elf64_Versym *versym;
- Elf64_Verdef *verdef;
-};
-
-#pragma dataflag NOPTR
-static version_key linux26 = { (byte*)"LINUX_2.6", 0x3ae75f6 };
-
-// initialize with vsyscall fallbacks
-#pragma dataflag NOPTR
-void* runtime·__vdso_time_sym = (void*)0xffffffffff600400ULL;
-#pragma dataflag NOPTR
-void* runtime·__vdso_gettimeofday_sym = (void*)0xffffffffff600000ULL;
-#pragma dataflag NOPTR
-void* runtime·__vdso_clock_gettime_sym = (void*)0;
-
-#pragma dataflag NOPTR
-static symbol_key sym_keys[] = {
- { (byte*)"__vdso_time", 0xa33c485, &runtime·__vdso_time_sym },
- { (byte*)"__vdso_gettimeofday", 0x315ca59, &runtime·__vdso_gettimeofday_sym },
- { (byte*)"__vdso_clock_gettime", 0xd35ec75, &runtime·__vdso_clock_gettime_sym },
-};
-
-static void
-vdso_init_from_sysinfo_ehdr(struct vdso_info *vdso_info, Elf64_Ehdr* hdr)
-{
- uint64 i;
- bool found_vaddr = false;
- Elf64_Phdr *pt;
- Elf64_Dyn *dyn;
- Elf64_Word *hash;
-
- vdso_info->valid = false;
- vdso_info->load_addr = (uintptr) hdr;
-
- pt = (Elf64_Phdr*)(vdso_info->load_addr + hdr->e_phoff);
- dyn = nil;
-
- // We need two things from the segment table: the load offset
- // and the dynamic table.
- for(i=0; i<hdr->e_phnum; i++) {
- if(pt[i].p_type == PT_LOAD && found_vaddr == false) {
- found_vaddr = true;
- vdso_info->load_offset = (uintptr)hdr
- + (uintptr)pt[i].p_offset
- - (uintptr)pt[i].p_vaddr;
- } else if(pt[i].p_type == PT_DYNAMIC) {
- dyn = (Elf64_Dyn*)((uintptr)hdr + pt[i].p_offset);
- }
- }
-
- if(found_vaddr == false || dyn == nil)
- return; // Failed
-
- // Fish out the useful bits of the dynamic table.
- hash = nil;
- vdso_info->symstrings = nil;
- vdso_info->symtab = nil;
- vdso_info->versym = nil;
- vdso_info->verdef = nil;
- for(i=0; dyn[i].d_tag!=DT_NULL; i++) {
- switch(dyn[i].d_tag) {
- case DT_STRTAB:
- vdso_info->symstrings = (const byte *)
- ((uintptr)dyn[i].d_un.d_ptr
- + vdso_info->load_offset);
- break;
- case DT_SYMTAB:
- vdso_info->symtab = (Elf64_Sym *)
- ((uintptr)dyn[i].d_un.d_ptr
- + vdso_info->load_offset);
- break;
- case DT_HASH:
- hash = (Elf64_Word *)
- ((uintptr)dyn[i].d_un.d_ptr
- + vdso_info->load_offset);
- break;
- case DT_VERSYM:
- vdso_info->versym = (Elf64_Versym *)
- ((uintptr)dyn[i].d_un.d_ptr
- + vdso_info->load_offset);
- break;
- case DT_VERDEF:
- vdso_info->verdef = (Elf64_Verdef *)
- ((uintptr)dyn[i].d_un.d_ptr
- + vdso_info->load_offset);
- break;
- }
- }
- if(vdso_info->symstrings == nil || vdso_info->symtab == nil || hash == nil)
- return; // Failed
-
- if(vdso_info->verdef == nil)
- vdso_info->versym = 0;
-
- // Parse the hash table header.
- vdso_info->nbucket = hash[0];
- vdso_info->nchain = hash[1];
- vdso_info->bucket = &hash[2];
- vdso_info->chain = &hash[vdso_info->nbucket + 2];
-
- // That's all we need.
- vdso_info->valid = true;
-}
-
-static int32
-vdso_find_version(struct vdso_info *vdso_info, version_key* ver)
-{
- if(vdso_info->valid == false) {
- return 0;
- }
- Elf64_Verdef *def = vdso_info->verdef;
- while(true) {
- if((def->vd_flags & VER_FLG_BASE) == 0) {
- Elf64_Verdaux *aux = (Elf64_Verdaux*)((byte *)def + def->vd_aux);
- if(def->vd_hash == ver->ver_hash &&
- runtime·strcmp(ver->version, vdso_info->symstrings + aux->vda_name) == 0) {
- return def->vd_ndx & 0x7fff;
- }
- }
-
- if(def->vd_next == 0) {
- break;
- }
- def = (Elf64_Verdef *)((byte *)def + def->vd_next);
- }
- return -1; // can not match any version
-}
-
-static void
-vdso_parse_symbols(struct vdso_info *vdso_info, int32 version)
-{
- int32 i;
- Elf64_Word chain;
- Elf64_Sym *sym;
-
- if(vdso_info->valid == false)
- return;
-
- for(i=0; i<nelem(sym_keys); i++) {
- for(chain = vdso_info->bucket[sym_keys[i].sym_hash % vdso_info->nbucket];
- chain != 0; chain = vdso_info->chain[chain]) {
-
- sym = &vdso_info->symtab[chain];
- if(ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
- continue;
- if(ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
- ELF64_ST_BIND(sym->st_info) != STB_WEAK)
- continue;
- if(sym->st_shndx == SHN_UNDEF)
- continue;
- if(runtime·strcmp(sym_keys[i].name, vdso_info->symstrings + sym->st_name) != 0)
- continue;
-
- // Check symbol version.
- if(vdso_info->versym != nil && version != 0
- && vdso_info->versym[chain] & 0x7fff != version)
- continue;
-
- *sym_keys[i].var_ptr = (void *)(vdso_info->load_offset + sym->st_value);
- break;
- }
- }
-}
-
-static void
-runtime·linux_setup_vdso(int32 argc, uint8** argv)
-{
- struct vdso_info vdso_info;
-
- // skip argvc
- byte **p = argv;
- p = &p[argc+1];
-
- // skip envp to get to ELF auxiliary vector.
- for(; *p!=0; p++) {}
-
- // skip NULL separator
- p++;
-
- // now, p points to auxv
- Elf64_auxv_t *elf_auxv = (Elf64_auxv_t*) p;
-
- for(int32 i=0; elf_auxv[i].a_type!=AT_NULL; i++) {
- if(elf_auxv[i].a_type == AT_SYSINFO_EHDR) {
- if(elf_auxv[i].a_un.a_val == 0) {
- // Something went wrong
- continue;
- }
- vdso_init_from_sysinfo_ehdr(&vdso_info, (Elf64_Ehdr*)elf_auxv[i].a_un.a_val);
- vdso_parse_symbols(&vdso_info, vdso_find_version(&vdso_info, &linux26));
- continue;
- }
- if(elf_auxv[i].a_type == AT_RANDOM) {
- runtime·startup_random_data = (byte*)elf_auxv[i].a_un.a_val;
- runtime·startup_random_data_len = 16;
- continue;
- }
- }
-}
-
-void (*runtime·sysargs)(int32, uint8**) = runtime·linux_setup_vdso;
diff --git a/src/runtime/vdso_linux_amd64.go b/src/runtime/vdso_linux_amd64.go
new file mode 100644
index 000000000..7eb698811
--- /dev/null
+++ b/src/runtime/vdso_linux_amd64.go
@@ -0,0 +1,328 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// Look up symbols in the Linux vDSO.
+
+// This code was originally based on the sample Linux vDSO parser at
+// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/vDSO/parse_vdso.c
+
+// This implements the ELF dynamic linking spec at
+// http://sco.com/developers/gabi/latest/ch5.dynamic.html
+
+// The version section is documented at
+// http://refspecs.linuxfoundation.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic/symversion.html
+
+const (
+ _AT_RANDOM = 25
+ _AT_SYSINFO_EHDR = 33
+ _AT_NULL = 0 /* End of vector */
+
+ _PT_LOAD = 1 /* Loadable program segment */
+ _PT_DYNAMIC = 2 /* Dynamic linking information */
+
+ _DT_NULL = 0 /* Marks end of dynamic section */
+ _DT_HASH = 4 /* Dynamic symbol hash table */
+ _DT_STRTAB = 5 /* Address of string table */
+ _DT_SYMTAB = 6 /* Address of symbol table */
+ _DT_VERSYM = 0x6ffffff0
+ _DT_VERDEF = 0x6ffffffc
+
+ _VER_FLG_BASE = 0x1 /* Version definition of file itself */
+
+ _SHN_UNDEF = 0 /* Undefined section */
+
+ _SHT_DYNSYM = 11 /* Dynamic linker symbol table */
+
+ _STT_FUNC = 2 /* Symbol is a code object */
+
+ _STB_GLOBAL = 1 /* Global symbol */
+ _STB_WEAK = 2 /* Weak symbol */
+
+ _EI_NIDENT = 16
+)
+
+/* How to extract and insert information held in the st_info field. */
+func _ELF64_ST_BIND(val byte) byte { return val >> 4 }
+func _ELF64_ST_TYPE(val byte) byte { return val & 0xf }
+
+type elf64Sym struct {
+ st_name uint32
+ st_info byte
+ st_other byte
+ st_shndx uint16
+ st_value uint64
+ st_size uint64
+}
+
+type elf64Verdef struct {
+ vd_version uint16 /* Version revision */
+ vd_flags uint16 /* Version information */
+ vd_ndx uint16 /* Version Index */
+ vd_cnt uint16 /* Number of associated aux entries */
+ vd_hash uint32 /* Version name hash value */
+ vd_aux uint32 /* Offset in bytes to verdaux array */
+ vd_next uint32 /* Offset in bytes to next verdef entry */
+}
+
+type elf64Ehdr struct {
+ e_ident [_EI_NIDENT]byte /* Magic number and other info */
+ e_type uint16 /* Object file type */
+ e_machine uint16 /* Architecture */
+ e_version uint32 /* Object file version */
+ e_entry uint64 /* Entry point virtual address */
+ e_phoff uint64 /* Program header table file offset */
+ e_shoff uint64 /* Section header table file offset */
+ e_flags uint32 /* Processor-specific flags */
+ e_ehsize uint16 /* ELF header size in bytes */
+ e_phentsize uint16 /* Program header table entry size */
+ e_phnum uint16 /* Program header table entry count */
+ e_shentsize uint16 /* Section header table entry size */
+ e_shnum uint16 /* Section header table entry count */
+ e_shstrndx uint16 /* Section header string table index */
+}
+
+type elf64Phdr struct {
+ p_type uint32 /* Segment type */
+ p_flags uint32 /* Segment flags */
+ p_offset uint64 /* Segment file offset */
+ p_vaddr uint64 /* Segment virtual address */
+ p_paddr uint64 /* Segment physical address */
+ p_filesz uint64 /* Segment size in file */
+ p_memsz uint64 /* Segment size in memory */
+ p_align uint64 /* Segment alignment */
+}
+
+type elf64Shdr struct {
+ sh_name uint32 /* Section name (string tbl index) */
+ sh_type uint32 /* Section type */
+ sh_flags uint64 /* Section flags */
+ sh_addr uint64 /* Section virtual addr at execution */
+ sh_offset uint64 /* Section file offset */
+ sh_size uint64 /* Section size in bytes */
+ sh_link uint32 /* Link to another section */
+ sh_info uint32 /* Additional section information */
+ sh_addralign uint64 /* Section alignment */
+ sh_entsize uint64 /* Entry size if section holds table */
+}
+
+type elf64Dyn struct {
+ d_tag int64 /* Dynamic entry type */
+ d_val uint64 /* Integer value */
+}
+
+type elf64Verdaux struct {
+ vda_name uint32 /* Version or dependency names */
+ vda_next uint32 /* Offset in bytes to next verdaux entry */
+}
+
+type elf64Auxv struct {
+ a_type uint64 /* Entry type */
+ a_val uint64 /* Integer value */
+}
+
+type symbol_key struct {
+ name string
+ sym_hash uint32
+ ptr *uintptr
+}
+
+type version_key struct {
+ version string
+ ver_hash uint32
+}
+
+type vdso_info struct {
+ valid bool
+
+ /* Load information */
+ load_addr uintptr
+ load_offset uintptr /* load_addr - recorded vaddr */
+
+ /* Symbol table */
+ symtab *[1 << 32]elf64Sym
+ symstrings *[1 << 32]byte
+ chain []uint32
+ bucket []uint32
+
+ /* Version table */
+ versym *[1 << 32]uint16
+ verdef *elf64Verdef
+}
+
+var linux26 = version_key{"LINUX_2.6", 0x3ae75f6}
+
+var sym_keys = []symbol_key{
+ {"__vdso_time", 0xa33c485, &__vdso_time_sym},
+ {"__vdso_gettimeofday", 0x315ca59, &__vdso_gettimeofday_sym},
+ {"__vdso_clock_gettime", 0xd35ec75, &__vdso_clock_gettime_sym},
+}
+
+// initialize with vsyscall fallbacks
+var (
+ __vdso_time_sym uintptr = 0xffffffffff600400
+ __vdso_gettimeofday_sym uintptr = 0xffffffffff600000
+ __vdso_clock_gettime_sym uintptr = 0
+)
+
+func vdso_init_from_sysinfo_ehdr(info *vdso_info, hdr *elf64Ehdr) {
+ info.valid = false
+ info.load_addr = uintptr(unsafe.Pointer(hdr))
+
+ pt := unsafe.Pointer(info.load_addr + uintptr(hdr.e_phoff))
+
+ // We need two things from the segment table: the load offset
+ // and the dynamic table.
+ var found_vaddr bool
+ var dyn *[1 << 20]elf64Dyn
+ for i := uint16(0); i < hdr.e_phnum; i++ {
+ pt := (*elf64Phdr)(add(pt, uintptr(i)*unsafe.Sizeof(elf64Phdr{})))
+ switch pt.p_type {
+ case _PT_LOAD:
+ if !found_vaddr {
+ found_vaddr = true
+ info.load_offset = info.load_addr + uintptr(pt.p_offset-pt.p_vaddr)
+ }
+
+ case _PT_DYNAMIC:
+ dyn = (*[1 << 20]elf64Dyn)(unsafe.Pointer(info.load_addr + uintptr(pt.p_offset)))
+ }
+ }
+
+ if !found_vaddr || dyn == nil {
+ return // Failed
+ }
+
+ // Fish out the useful bits of the dynamic table.
+
+ var hash *[1 << 30]uint32
+ hash = nil
+ info.symstrings = nil
+ info.symtab = nil
+ info.versym = nil
+ info.verdef = nil
+ for i := 0; dyn[i].d_tag != _DT_NULL; i++ {
+ dt := &dyn[i]
+ p := info.load_offset + uintptr(dt.d_val)
+ switch dt.d_tag {
+ case _DT_STRTAB:
+ info.symstrings = (*[1 << 32]byte)(unsafe.Pointer(p))
+ case _DT_SYMTAB:
+ info.symtab = (*[1 << 32]elf64Sym)(unsafe.Pointer(p))
+ case _DT_HASH:
+ hash = (*[1 << 30]uint32)(unsafe.Pointer(p))
+ case _DT_VERSYM:
+ info.versym = (*[1 << 32]uint16)(unsafe.Pointer(p))
+ case _DT_VERDEF:
+ info.verdef = (*elf64Verdef)(unsafe.Pointer(p))
+ }
+ }
+
+ if info.symstrings == nil || info.symtab == nil || hash == nil {
+ return // Failed
+ }
+
+ if info.verdef == nil {
+ info.versym = nil
+ }
+
+ // Parse the hash table header.
+ nbucket := hash[0]
+ nchain := hash[1]
+ info.bucket = hash[2 : 2+nbucket]
+ info.chain = hash[2+nbucket : 2+nbucket+nchain]
+
+ // That's all we need.
+ info.valid = true
+}
+
+func vdso_find_version(info *vdso_info, ver *version_key) int32 {
+ if !info.valid {
+ return 0
+ }
+
+ def := info.verdef
+ for {
+ if def.vd_flags&_VER_FLG_BASE == 0 {
+ aux := (*elf64Verdaux)(add(unsafe.Pointer(def), uintptr(def.vd_aux)))
+ if def.vd_hash == ver.ver_hash && ver.version == gostringnocopy(&info.symstrings[aux.vda_name]) {
+ return int32(def.vd_ndx & 0x7fff)
+ }
+ }
+
+ if def.vd_next == 0 {
+ break
+ }
+ def = (*elf64Verdef)(add(unsafe.Pointer(def), uintptr(def.vd_next)))
+ }
+
+ return -1 // can not match any version
+}
+
+func vdso_parse_symbols(info *vdso_info, version int32) {
+ if !info.valid {
+ return
+ }
+
+ for _, k := range sym_keys {
+ for chain := info.bucket[k.sym_hash%uint32(len(info.bucket))]; chain != 0; chain = info.chain[chain] {
+ sym := &info.symtab[chain]
+ typ := _ELF64_ST_TYPE(sym.st_info)
+ bind := _ELF64_ST_BIND(sym.st_info)
+ if typ != _STT_FUNC || bind != _STB_GLOBAL && bind != _STB_WEAK || sym.st_shndx == _SHN_UNDEF {
+ continue
+ }
+ if k.name != gostringnocopy(&info.symstrings[sym.st_name]) {
+ continue
+ }
+
+ // Check symbol version.
+ if info.versym != nil && version != 0 && int32(info.versym[chain]&0x7fff) != version {
+ continue
+ }
+
+ *k.ptr = info.load_offset + uintptr(sym.st_value)
+ break
+ }
+ }
+}
+
+func sysargs(argc int32, argv **byte) {
+ n := argc + 1
+
+ // skip envp to get to ELF auxiliary vector.
+ for argv_index(argv, n) != nil {
+ n++
+ }
+
+ // skip NULL separator
+ n++
+
+ // now argv+n is auxv
+ auxv := (*[1 << 32]elf64Auxv)(add(unsafe.Pointer(argv), uintptr(n)*ptrSize))
+
+ for i := 0; auxv[i].a_type != _AT_NULL; i++ {
+ av := &auxv[i]
+ switch av.a_type {
+ case _AT_SYSINFO_EHDR:
+ if av.a_val == 0 {
+ // Something went wrong
+ continue
+ }
+ var info vdso_info
+ // TODO(rsc): I don't understand why the compiler thinks info escapes
+ // when passed to the three functions below.
+ info1 := (*vdso_info)(noescape(unsafe.Pointer(&info)))
+ vdso_init_from_sysinfo_ehdr(info1, (*elf64Ehdr)(unsafe.Pointer(uintptr(av.a_val))))
+ vdso_parse_symbols(info1, vdso_find_version(info1, &linux26))
+
+ case _AT_RANDOM:
+ startup_random_data = (*byte)(unsafe.Pointer(uintptr(av.a_val)))
+ startup_random_data_len = 16
+ }
+ }
+}
diff --git a/src/runtime/vdso_none.go b/src/runtime/vdso_none.go
new file mode 100644
index 000000000..6f83ecc89
--- /dev/null
+++ b/src/runtime/vdso_none.go
@@ -0,0 +1,11 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !linux !amd64
+// +build !linux !386
+
+package runtime
+
+func sysargs(argc int32, argv **byte) {
+}
diff --git a/src/runtime/vlop_arm.s b/src/runtime/vlop_arm.s
index b4b905bb7..5354bf911 100644
--- a/src/runtime/vlop_arm.s
+++ b/src/runtime/vlop_arm.s
@@ -23,7 +23,8 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-#include "zasm_GOOS_GOARCH.h"
+#include "go_asm.h"
+#include "go_tls.h"
#include "textflag.h"
arg=0
@@ -100,7 +101,7 @@ TEXT _sfloat(SB), NOSPLIT, $68-0 // 4 arg + 14*4 saved regs + cpsr + return valu
// load the signal fault address into LR, and jump
// to the real sigpanic.
// This simulates what sighandler does for a memory fault.
-TEXT _sfloatpanic(SB),NOSPLIT,$-4
+TEXT runtime·_sfloatpanic(SB),NOSPLIT,$-4
MOVW $0, R0
MOVW.W R0, -4(R13)
MOVW g_sigpc(g), LR
diff --git a/src/runtime/vlrt.c b/src/runtime/vlrt.c
deleted file mode 100644
index cb0d14796..000000000
--- a/src/runtime/vlrt.c
+++ /dev/null
@@ -1,914 +0,0 @@
-// Inferno's libkern/vlrt-386.c
-// http://code.google.com/p/inferno-os/source/browse/libkern/vlrt-386.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
-// Portions Copyright 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build arm 386
-
-#include "textflag.h"
-
-/*
- * C runtime for 64-bit divide, others.
- *
- * TODO(rsc): The simple functions are dregs--8c knows how
- * to generate the code directly now. Find and remove.
- */
-
-void runtime·panicdivide(void);
-
-typedef unsigned long ulong;
-typedef unsigned int uint;
-typedef unsigned short ushort;
-typedef unsigned char uchar;
-typedef signed char schar;
-
-#define SIGN(n) (1UL<<(n-1))
-
-typedef struct Vlong Vlong;
-struct Vlong
-{
- ulong lo;
- ulong hi;
-};
-
-typedef union Vlong64 Vlong64;
-union Vlong64
-{
- long long v;
- Vlong v2;
-};
-
-void runtime·abort(void);
-
-#pragma textflag NOSPLIT
-Vlong
-_addv(Vlong a, Vlong b)
-{
- Vlong r;
-
- r.lo = a.lo + b.lo;
- r.hi = a.hi + b.hi;
- if(r.lo < a.lo)
- r.hi++;
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_subv(Vlong a, Vlong b)
-{
- Vlong r;
-
- r.lo = a.lo - b.lo;
- r.hi = a.hi - b.hi;
- if(r.lo > a.lo)
- r.hi--;
- return r;
-}
-
-Vlong
-_d2v(double d)
-{
- union { double d; Vlong vl; } x;
- ulong xhi, xlo, ylo, yhi;
- int sh;
- Vlong y;
-
- x.d = d;
-
- xhi = (x.vl.hi & 0xfffff) | 0x100000;
- xlo = x.vl.lo;
- sh = 1075 - ((x.vl.hi >> 20) & 0x7ff);
-
- ylo = 0;
- yhi = 0;
- if(sh >= 0) {
- /* v = (hi||lo) >> sh */
- if(sh < 32) {
- if(sh == 0) {
- ylo = xlo;
- yhi = xhi;
- } else {
- ylo = (xlo >> sh) | (xhi << (32-sh));
- yhi = xhi >> sh;
- }
- } else {
- if(sh == 32) {
- ylo = xhi;
- } else
- if(sh < 64) {
- ylo = xhi >> (sh-32);
- }
- }
- } else {
- /* v = (hi||lo) << -sh */
- sh = -sh;
- if(sh <= 10) { /* NOTE: sh <= 11 on ARM??? */
- ylo = xlo << sh;
- yhi = (xhi << sh) | (xlo >> (32-sh));
- } else {
- /* overflow */
- yhi = d; /* causes something awful */
- }
- }
- if(x.vl.hi & SIGN(32)) {
- if(ylo != 0) {
- ylo = -ylo;
- yhi = ~yhi;
- } else
- yhi = -yhi;
- }
-
- y.hi = yhi;
- y.lo = ylo;
- return y;
-}
-
-Vlong
-_f2v(float f)
-{
- return _d2v(f);
-}
-
-double
-_ul2d(ulong u)
-{
- // compensate for bug in c
- if(u & SIGN(32)) {
- u ^= SIGN(32);
- return 2147483648. + u;
- }
- return u;
-}
-
-double
-_v2d(Vlong x)
-{
- if(x.hi & SIGN(32)) {
- if(x.lo) {
- x.lo = -x.lo;
- x.hi = ~x.hi;
- } else
- x.hi = -x.hi;
- return -(_ul2d(x.hi)*4294967296. + _ul2d(x.lo));
- }
- return (long)x.hi*4294967296. + x.lo;
-}
-
-float
-_v2f(Vlong x)
-{
- return _v2d(x);
-}
-
-ulong runtime·_div64by32(Vlong, ulong, ulong*);
-int runtime·_mul64by32(Vlong*, Vlong, ulong);
-
-static void
-slowdodiv(Vlong num, Vlong den, Vlong *q, Vlong *r)
-{
- ulong numlo, numhi, denhi, denlo, quohi, quolo, t;
- int i;
-
- numhi = num.hi;
- numlo = num.lo;
- denhi = den.hi;
- denlo = den.lo;
-
- /*
- * get a divide by zero
- */
- if(denlo==0 && denhi==0) {
- runtime·panicdivide();
- }
-
- /*
- * set up the divisor and find the number of iterations needed
- */
- if(numhi >= SIGN(32)) {
- quohi = SIGN(32);
- quolo = 0;
- } else {
- quohi = numhi;
- quolo = numlo;
- }
- i = 0;
- while(denhi < quohi || (denhi == quohi && denlo < quolo)) {
- denhi = (denhi<<1) | (denlo>>31);
- denlo <<= 1;
- i++;
- }
-
- quohi = 0;
- quolo = 0;
- for(; i >= 0; i--) {
- quohi = (quohi<<1) | (quolo>>31);
- quolo <<= 1;
- if(numhi > denhi || (numhi == denhi && numlo >= denlo)) {
- t = numlo;
- numlo -= denlo;
- if(numlo > t)
- numhi--;
- numhi -= denhi;
- quolo |= 1;
- }
- denlo = (denlo>>1) | (denhi<<31);
- denhi >>= 1;
- }
-
- if(q) {
- q->lo = quolo;
- q->hi = quohi;
- }
- if(r) {
- r->lo = numlo;
- r->hi = numhi;
- }
-}
-
-#ifdef GOARCH_arm
-static void
-dodiv(Vlong num, Vlong den, Vlong *qp, Vlong *rp)
-{
- slowdodiv(num, den, qp, rp);
-}
-#endif
-
-#ifdef GOARCH_386
-static void
-dodiv(Vlong num, Vlong den, Vlong *qp, Vlong *rp)
-{
- ulong n;
- Vlong x, q, r;
-
- if(den.hi > num.hi || (den.hi == num.hi && den.lo > num.lo)){
- if(qp) {
- qp->hi = 0;
- qp->lo = 0;
- }
- if(rp) {
- rp->hi = num.hi;
- rp->lo = num.lo;
- }
- return;
- }
-
- if(den.hi != 0){
- q.hi = 0;
- n = num.hi/den.hi;
- if(runtime·_mul64by32(&x, den, n) || x.hi > num.hi || (x.hi == num.hi && x.lo > num.lo))
- slowdodiv(num, den, &q, &r);
- else {
- q.lo = n;
- *(long long*)&r = *(long long*)&num - *(long long*)&x;
- }
- } else {
- if(num.hi >= den.lo){
- if(den.lo == 0)
- runtime·panicdivide();
- q.hi = n = num.hi/den.lo;
- num.hi -= den.lo*n;
- } else {
- q.hi = 0;
- }
- q.lo = runtime·_div64by32(num, den.lo, &r.lo);
- r.hi = 0;
- }
- if(qp) {
- qp->lo = q.lo;
- qp->hi = q.hi;
- }
- if(rp) {
- rp->lo = r.lo;
- rp->hi = r.hi;
- }
-}
-#endif
-
-Vlong
-_divvu(Vlong n, Vlong d)
-{
- Vlong q;
-
- if(n.hi == 0 && d.hi == 0) {
- if(d.lo == 0)
- runtime·panicdivide();
- q.hi = 0;
- q.lo = n.lo / d.lo;
- return q;
- }
- dodiv(n, d, &q, 0);
- return q;
-}
-
-Vlong
-_modvu(Vlong n, Vlong d)
-{
- Vlong r;
-
- if(n.hi == 0 && d.hi == 0) {
- if(d.lo == 0)
- runtime·panicdivide();
- r.hi = 0;
- r.lo = n.lo % d.lo;
- return r;
- }
- dodiv(n, d, 0, &r);
- return r;
-}
-
-static void
-vneg(Vlong *v)
-{
-
- if(v->lo == 0) {
- v->hi = -v->hi;
- return;
- }
- v->lo = -v->lo;
- v->hi = ~v->hi;
-}
-
-Vlong
-_divv(Vlong n, Vlong d)
-{
- long nneg, dneg;
- Vlong q;
-
- if(n.hi == (((long)n.lo)>>31) && d.hi == (((long)d.lo)>>31)) {
- if((long)n.lo == -0x80000000 && (long)d.lo == -1) {
- // special case: 32-bit -0x80000000 / -1 causes divide error,
- // but it's okay in this 64-bit context.
- q.lo = 0x80000000;
- q.hi = 0;
- return q;
- }
- if(d.lo == 0)
- runtime·panicdivide();
- q.lo = (long)n.lo / (long)d.lo;
- q.hi = ((long)q.lo) >> 31;
- return q;
- }
- nneg = n.hi >> 31;
- if(nneg)
- vneg(&n);
- dneg = d.hi >> 31;
- if(dneg)
- vneg(&d);
- dodiv(n, d, &q, 0);
- if(nneg != dneg)
- vneg(&q);
- return q;
-}
-
-Vlong
-_modv(Vlong n, Vlong d)
-{
- long nneg, dneg;
- Vlong r;
-
- if(n.hi == (((long)n.lo)>>31) && d.hi == (((long)d.lo)>>31)) {
- if((long)n.lo == -0x80000000 && (long)d.lo == -1) {
- // special case: 32-bit -0x80000000 % -1 causes divide error,
- // but it's okay in this 64-bit context.
- r.lo = 0;
- r.hi = 0;
- return r;
- }
- if(d.lo == 0)
- runtime·panicdivide();
- r.lo = (long)n.lo % (long)d.lo;
- r.hi = ((long)r.lo) >> 31;
- return r;
- }
- nneg = n.hi >> 31;
- if(nneg)
- vneg(&n);
- dneg = d.hi >> 31;
- if(dneg)
- vneg(&d);
- dodiv(n, d, 0, &r);
- if(nneg)
- vneg(&r);
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_rshav(Vlong a, int b)
-{
- long t;
- Vlong r;
-
- t = a.hi;
- if(b >= 32) {
- r.hi = t>>31;
- if(b >= 64) {
- /* this is illegal re C standard */
- r.lo = t>>31;
- return r;
- }
- r.lo = t >> (b-32);
- return r;
- }
- if(b <= 0) {
- r.hi = t;
- r.lo = a.lo;
- return r;
- }
- r.hi = t >> b;
- r.lo = (t << (32-b)) | (a.lo >> b);
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_rshlv(Vlong a, int b)
-{
- ulong t;
- Vlong r;
-
- t = a.hi;
- if(b >= 32) {
- r.hi = 0;
- if(b >= 64) {
- /* this is illegal re C standard */
- r.lo = 0;
- return r;
- }
- r.lo = t >> (b-32);
- return r;
- }
- if(b <= 0) {
- r.hi = t;
- r.lo = a.lo;
- return r;
- }
- r.hi = t >> b;
- r.lo = (t << (32-b)) | (a.lo >> b);
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_lshv(Vlong a, int b)
-{
- ulong t;
-
- t = a.lo;
- if(b >= 32) {
- if(b >= 64) {
- /* this is illegal re C standard */
- return (Vlong){0, 0};
- }
- return (Vlong){0, t<<(b-32)};
- }
- if(b <= 0) {
- return (Vlong){t, a.hi};
- }
- return (Vlong){t<<b, (t >> (32-b)) | (a.hi << b)};
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_andv(Vlong a, Vlong b)
-{
- Vlong r;
-
- r.hi = a.hi & b.hi;
- r.lo = a.lo & b.lo;
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_orv(Vlong a, Vlong b)
-{
- Vlong r;
-
- r.hi = a.hi | b.hi;
- r.lo = a.lo | b.lo;
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_xorv(Vlong a, Vlong b)
-{
- Vlong r;
-
- r.hi = a.hi ^ b.hi;
- r.lo = a.lo ^ b.lo;
- return r;
-}
-
-Vlong
-_vpp(Vlong *r)
-{
- Vlong l;
-
- l = *r;
- r->lo++;
- if(r->lo == 0)
- r->hi++;
- return l;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_vmm(Vlong *r)
-{
- Vlong l;
-
- l = *r;
- if(r->lo == 0)
- r->hi--;
- r->lo--;
- return l;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_ppv(Vlong *r)
-{
-
- r->lo++;
- if(r->lo == 0)
- r->hi++;
- return *r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_mmv(Vlong *r)
-{
-
- if(r->lo == 0)
- r->hi--;
- r->lo--;
- return *r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_vasop(void *lv, Vlong fn(Vlong, Vlong), int type, Vlong rv)
-{
- Vlong t, u;
-
- u.lo = 0;
- u.hi = 0;
- switch(type) {
- default:
- runtime·abort();
- break;
-
- case 1: /* schar */
- t.lo = *(schar*)lv;
- t.hi = t.lo >> 31;
- u = fn(t, rv);
- *(schar*)lv = u.lo;
- break;
-
- case 2: /* uchar */
- t.lo = *(uchar*)lv;
- t.hi = 0;
- u = fn(t, rv);
- *(uchar*)lv = u.lo;
- break;
-
- case 3: /* short */
- t.lo = *(short*)lv;
- t.hi = t.lo >> 31;
- u = fn(t, rv);
- *(short*)lv = u.lo;
- break;
-
- case 4: /* ushort */
- t.lo = *(ushort*)lv;
- t.hi = 0;
- u = fn(t, rv);
- *(ushort*)lv = u.lo;
- break;
-
- case 9: /* int */
- t.lo = *(int*)lv;
- t.hi = t.lo >> 31;
- u = fn(t, rv);
- *(int*)lv = u.lo;
- break;
-
- case 10: /* uint */
- t.lo = *(uint*)lv;
- t.hi = 0;
- u = fn(t, rv);
- *(uint*)lv = u.lo;
- break;
-
- case 5: /* long */
- t.lo = *(long*)lv;
- t.hi = t.lo >> 31;
- u = fn(t, rv);
- *(long*)lv = u.lo;
- break;
-
- case 6: /* ulong */
- t.lo = *(ulong*)lv;
- t.hi = 0;
- u = fn(t, rv);
- *(ulong*)lv = u.lo;
- break;
-
- case 7: /* vlong */
- case 8: /* uvlong */
- if((void*)fn == _lshv || (void*)fn == _rshav || (void*)fn == _rshlv)
- u = ((Vlong(*)(Vlong,int))fn)(*(Vlong*)lv, *(int*)&rv);
- else
- u = fn(*(Vlong*)lv, rv);
- *(Vlong*)lv = u;
- break;
- }
- return u;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_p2v(void *p)
-{
- long t;
- Vlong ret;
-
- t = (ulong)p;
- ret.lo = t;
- ret.hi = 0;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_sl2v(long sl)
-{
- long t;
- Vlong ret;
-
- t = sl;
- ret.lo = t;
- ret.hi = t >> 31;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_ul2v(ulong ul)
-{
- long t;
- Vlong ret;
-
- t = ul;
- ret.lo = t;
- ret.hi = 0;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_si2v(int si)
-{
- return (Vlong){si, si>>31};
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_ui2v(uint ui)
-{
- long t;
- Vlong ret;
-
- t = ui;
- ret.lo = t;
- ret.hi = 0;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_sh2v(long sh)
-{
- long t;
- Vlong ret;
-
- t = (sh << 16) >> 16;
- ret.lo = t;
- ret.hi = t >> 31;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_uh2v(ulong ul)
-{
- long t;
- Vlong ret;
-
- t = ul & 0xffff;
- ret.lo = t;
- ret.hi = 0;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_sc2v(long uc)
-{
- long t;
- Vlong ret;
-
- t = (uc << 24) >> 24;
- ret.lo = t;
- ret.hi = t >> 31;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_uc2v(ulong ul)
-{
- long t;
- Vlong ret;
-
- t = ul & 0xff;
- ret.lo = t;
- ret.hi = 0;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2sc(Vlong rv)
-{
- long t;
-
- t = rv.lo & 0xff;
- return (t << 24) >> 24;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2uc(Vlong rv)
-{
-
- return rv.lo & 0xff;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2sh(Vlong rv)
-{
- long t;
-
- t = rv.lo & 0xffff;
- return (t << 16) >> 16;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2uh(Vlong rv)
-{
-
- return rv.lo & 0xffff;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2sl(Vlong rv)
-{
-
- return rv.lo;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2ul(Vlong rv)
-{
-
- return rv.lo;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2si(Vlong rv)
-{
- return rv.lo;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2ui(Vlong rv)
-{
-
- return rv.lo;
-}
-
-#pragma textflag NOSPLIT
-int
-_testv(Vlong rv)
-{
- return rv.lo || rv.hi;
-}
-
-#pragma textflag NOSPLIT
-int
-_eqv(Vlong lv, Vlong rv)
-{
- return lv.lo == rv.lo && lv.hi == rv.hi;
-}
-
-#pragma textflag NOSPLIT
-int
-_nev(Vlong lv, Vlong rv)
-{
- return lv.lo != rv.lo || lv.hi != rv.hi;
-}
-
-#pragma textflag NOSPLIT
-int
-_ltv(Vlong lv, Vlong rv)
-{
- return (long)lv.hi < (long)rv.hi ||
- (lv.hi == rv.hi && lv.lo < rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_lev(Vlong lv, Vlong rv)
-{
- return (long)lv.hi < (long)rv.hi ||
- (lv.hi == rv.hi && lv.lo <= rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_gtv(Vlong lv, Vlong rv)
-{
- return (long)lv.hi > (long)rv.hi ||
- (lv.hi == rv.hi && lv.lo > rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_gev(Vlong lv, Vlong rv)
-{
- return (long)lv.hi > (long)rv.hi ||
- (lv.hi == rv.hi && lv.lo >= rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_lov(Vlong lv, Vlong rv)
-{
- return lv.hi < rv.hi ||
- (lv.hi == rv.hi && lv.lo < rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_lsv(Vlong lv, Vlong rv)
-{
- return lv.hi < rv.hi ||
- (lv.hi == rv.hi && lv.lo <= rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_hiv(Vlong lv, Vlong rv)
-{
- return lv.hi > rv.hi ||
- (lv.hi == rv.hi && lv.lo > rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_hsv(Vlong lv, Vlong rv)
-{
- return lv.hi > rv.hi ||
- (lv.hi == rv.hi && lv.lo >= rv.lo);
-}