summaryrefslogtreecommitdiff
path: root/libgo
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2016-09-21 20:58:51 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2016-09-21 20:58:51 +0000
commit4a2bb7fcb0c8507b958afb3d22ddfeeba494148a (patch)
tree843fadb26050988a8c6037662e2d090533437044 /libgo
parent812b1403a88cea3257e120f3234576f236c0921d (diff)
downloadgcc-4a2bb7fcb0c8507b958afb3d22ddfeeba494148a.tar.gz
compiler, runtime: replace hashmap code with Go 1.7 hashmap
This change removes the gccgo-specific hashmap code and replaces it with the hashmap code from the Go 1.7 runtime. The Go 1.7 hashmap code is more efficient, does a better job on details like when to update a key, and provides some support against denial-of-service attacks. The compiler is changed to call the new hashmap functions instead of the old ones. The compiler now tracks which types are reflexive and which require updating when used as a map key, and records the information in map type descriptors. Map_index_expression is simplified. The special case for a map index on the right hand side of a tuple expression has been unnecessary for some time, and is removed. The support for specially marking a map index as an lvalue is removed, in favor of lowering an assignment to a map index into a function call. The long-obsolete support for a map index of a pointer to a map is removed. The __go_new_map_big function (known to the compiler as Runtime::MAKEMAPBIG) is no longer needed, as the new runtime.makemap function takes an int64 hint argument. The old map descriptor type and supporting expression is removed. The compiler was still supporting the long-obsolete syntax `m[k] = 0, false` to delete a value from a map. That is now removed, requiring a change to one of the gccgo-specific tests. The builtin len function applied to a map or channel p is now compiled as `p == nil ? 0 : *(*int)(p)`. The __go_chan_len function (known to the compiler as Runtime::CHAN_LEN) is removed. Support for a shared zero value for maps to large value types is introduced, along the lines of the gc compiler. The zero value is handled as a common variable. The hash function is changed to take a seed argument, changing the runtime hash functions and the compiler-generated hash functions. Unlike the gc compiler, both the hash and equal functions continue to take the type length. Types that can not be compared now store nil for the hash and equal functions, rather than pointing to functions that throw. Interface hash and comparison functions now check explicitly for nil. This matches the gc compiler and permits a simple implementation for ismapkey. The compiler is changed to permit marking struct and array types as incomparable, meaning that they have no hash or equal function. We use this for thunk types, removing the existing special code to avoid generating hash/equal functions for them. The C runtime code adds memclr, memequal, and memmove functions. The hashmap code uses go:linkname comments to make the functions visible, as otherwise the compiler would discard them. The hashmap code comments out the unused reference to the address of the first parameter in the race code, as otherwise the compiler thinks that the parameter escapes and copies it onto the heap. This is probably not needed when we enable escape analysis. Several runtime map tests that ere previously skipped for gccgo are now run. The Go runtime picks up type kind information and stubs. The type kind information causes the generated runtime header file to define some constants, including `empty`, and the C code is adjusted accordingly. A Go-callable version of runtime.throw, that takes a Go string, is added to be called from the hashmap code. Reviewed-on: https://go-review.googlesource.com/29447 * go.go-torture/execute/map-1.go: Replace old map deletion syntax with call to builtin delete function. From-SVN: r240334
Diffstat (limited to 'libgo')
-rw-r--r--libgo/Makefile.am11
-rw-r--r--libgo/Makefile.in124
-rw-r--r--libgo/go/reflect/type.go157
-rw-r--r--libgo/go/runtime/export_test.go2
-rw-r--r--libgo/go/runtime/hashmap.go1081
-rw-r--r--libgo/go/runtime/hashmap_fast.go398
-rw-r--r--libgo/go/runtime/map_test.go21
-rw-r--r--libgo/go/runtime/msan0.go1
-rw-r--r--libgo/go/runtime/race0.go40
-rw-r--r--libgo/go/runtime/stubs.go253
-rw-r--r--libgo/go/runtime/type.go29
-rw-r--r--libgo/go/runtime/typekind.go44
-rw-r--r--libgo/runtime/chan.goc6
-rw-r--r--libgo/runtime/go-construct-map.c23
-rw-r--r--libgo/runtime/go-eface-compare.c2
-rw-r--r--libgo/runtime/go-eface-val-compare.c2
-rw-r--r--libgo/runtime/go-fieldtrack.c23
-rw-r--r--libgo/runtime/go-interface-compare.c2
-rw-r--r--libgo/runtime/go-interface-eface-compare.c2
-rw-r--r--libgo/runtime/go-interface-val-compare.c2
-rw-r--r--libgo/runtime/go-map-delete.c61
-rw-r--r--libgo/runtime/go-map-index.c137
-rw-r--r--libgo/runtime/go-map-len.c25
-rw-r--r--libgo/runtime/go-map-range.c103
-rw-r--r--libgo/runtime/go-memclr.c16
-rw-r--r--libgo/runtime/go-memequal.c16
-rw-r--r--libgo/runtime/go-memmove.c16
-rw-r--r--libgo/runtime/go-new-map.c142
-rw-r--r--libgo/runtime/go-reflect-map.c156
-rw-r--r--libgo/runtime/go-type-complex.c14
-rw-r--r--libgo/runtime/go-type-eface.c10
-rw-r--r--libgo/runtime/go-type-error.c34
-rw-r--r--libgo/runtime/go-type-float.c10
-rw-r--r--libgo/runtime/go-type-identity.c8
-rw-r--r--libgo/runtime/go-type-interface.c10
-rw-r--r--libgo/runtime/go-type-string.c4
-rw-r--r--libgo/runtime/go-type.h48
-rw-r--r--libgo/runtime/malloc.goc9
-rw-r--r--libgo/runtime/malloc.h8
-rw-r--r--libgo/runtime/map.goc72
-rw-r--r--libgo/runtime/map.h87
-rw-r--r--libgo/runtime/mcentral.c10
-rw-r--r--libgo/runtime/mgc0.c31
-rw-r--r--libgo/runtime/mheap.c4
-rw-r--r--libgo/runtime/panic.c16
-rw-r--r--libgo/runtime/proc.c14
-rw-r--r--libgo/runtime/runtime.h2
47 files changed, 2171 insertions, 1115 deletions
diff --git a/libgo/Makefile.am b/libgo/Makefile.am
index 4ac6a4a7bd5..bd75dd3e02f 100644
--- a/libgo/Makefile.am
+++ b/libgo/Makefile.am
@@ -464,22 +464,19 @@ runtime_files = \
runtime/go-interface-eface-compare.c \
runtime/go-interface-val-compare.c \
runtime/go-make-slice.c \
- runtime/go-map-delete.c \
- runtime/go-map-index.c \
- runtime/go-map-len.c \
- runtime/go-map-range.c \
runtime/go-matherr.c \
+ runtime/go-memclr.c \
runtime/go-memcmp.c \
+ runtime/go-memequal.c \
+ runtime/go-memmove.c \
runtime/go-nanotime.c \
runtime/go-now.c \
- runtime/go-new-map.c \
runtime/go-new.c \
runtime/go-nosys.c \
runtime/go-panic.c \
runtime/go-print.c \
runtime/go-recover.c \
runtime/go-reflect-call.c \
- runtime/go-reflect-map.c \
runtime/go-rune.c \
runtime/go-runtime-error.c \
runtime/go-setenv.c \
@@ -492,7 +489,6 @@ runtime_files = \
runtime/go-traceback.c \
runtime/go-type-complex.c \
runtime/go-type-eface.c \
- runtime/go-type-error.c \
runtime/go-type-float.c \
runtime/go-type-identity.c \
runtime/go-type-interface.c \
@@ -529,7 +525,6 @@ runtime_files = \
go-iface.c \
lfstack.c \
malloc.c \
- map.c \
mprof.c \
netpoll.c \
rdebug.c \
diff --git a/libgo/Makefile.in b/libgo/Makefile.in
index 2daa83ee443..78771c6f258 100644
--- a/libgo/Makefile.in
+++ b/libgo/Makefile.in
@@ -248,26 +248,24 @@ am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \
go-eface-val-compare.lo go-ffi.lo go-fieldtrack.lo \
go-int-array-to-string.lo go-int-to-string.lo \
go-interface-compare.lo go-interface-eface-compare.lo \
- go-interface-val-compare.lo go-make-slice.lo go-map-delete.lo \
- go-map-index.lo go-map-len.lo go-map-range.lo go-matherr.lo \
- go-memcmp.lo go-nanotime.lo go-now.lo go-new-map.lo go-new.lo \
- go-nosys.lo go-panic.lo go-print.lo go-recover.lo \
- go-reflect-call.lo go-reflect-map.lo go-rune.lo \
+ go-interface-val-compare.lo go-make-slice.lo go-matherr.lo \
+ go-memclr.lo go-memcmp.lo go-memequal.lo go-memmove.lo \
+ go-nanotime.lo go-now.lo go-new.lo go-nosys.lo go-panic.lo \
+ go-print.lo go-recover.lo go-reflect-call.lo go-rune.lo \
go-runtime-error.lo go-setenv.lo go-signal.lo go-strcmp.lo \
go-string-to-byte-array.lo go-string-to-int-array.lo \
go-strplus.lo go-strslice.lo go-traceback.lo \
- go-type-complex.lo go-type-eface.lo go-type-error.lo \
- go-type-float.lo go-type-identity.lo go-type-interface.lo \
- go-type-string.lo go-typedesc-equal.lo go-unsafe-new.lo \
- go-unsafe-newarray.lo go-unsafe-pointer.lo go-unsetenv.lo \
- go-unwind.lo go-varargs.lo env_posix.lo heapdump.lo \
- $(am__objects_1) mcache.lo mcentral.lo $(am__objects_2) \
- mfixalloc.lo mgc0.lo mheap.lo msize.lo $(am__objects_3) \
- panic.lo parfor.lo print.lo proc.lo runtime.lo signal_unix.lo \
- thread.lo yield.lo $(am__objects_4) chan.lo cpuprof.lo \
- go-iface.lo lfstack.lo malloc.lo map.lo mprof.lo netpoll.lo \
- rdebug.lo reflect.lo runtime1.lo sema.lo sigqueue.lo string.lo \
- time.lo $(am__objects_5)
+ go-type-complex.lo go-type-eface.lo go-type-float.lo \
+ go-type-identity.lo go-type-interface.lo go-type-string.lo \
+ go-typedesc-equal.lo go-unsafe-new.lo go-unsafe-newarray.lo \
+ go-unsafe-pointer.lo go-unsetenv.lo go-unwind.lo go-varargs.lo \
+ env_posix.lo heapdump.lo $(am__objects_1) mcache.lo \
+ mcentral.lo $(am__objects_2) mfixalloc.lo mgc0.lo mheap.lo \
+ msize.lo $(am__objects_3) panic.lo parfor.lo print.lo proc.lo \
+ runtime.lo signal_unix.lo thread.lo yield.lo $(am__objects_4) \
+ chan.lo cpuprof.lo go-iface.lo lfstack.lo malloc.lo mprof.lo \
+ netpoll.lo rdebug.lo reflect.lo runtime1.lo sema.lo \
+ sigqueue.lo string.lo time.lo $(am__objects_5)
am_libgo_llgo_la_OBJECTS = $(am__objects_6)
libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
@@ -867,22 +865,19 @@ runtime_files = \
runtime/go-interface-eface-compare.c \
runtime/go-interface-val-compare.c \
runtime/go-make-slice.c \
- runtime/go-map-delete.c \
- runtime/go-map-index.c \
- runtime/go-map-len.c \
- runtime/go-map-range.c \
runtime/go-matherr.c \
+ runtime/go-memclr.c \
runtime/go-memcmp.c \
+ runtime/go-memequal.c \
+ runtime/go-memmove.c \
runtime/go-nanotime.c \
runtime/go-now.c \
- runtime/go-new-map.c \
runtime/go-new.c \
runtime/go-nosys.c \
runtime/go-panic.c \
runtime/go-print.c \
runtime/go-recover.c \
runtime/go-reflect-call.c \
- runtime/go-reflect-map.c \
runtime/go-rune.c \
runtime/go-runtime-error.c \
runtime/go-setenv.c \
@@ -895,7 +890,6 @@ runtime_files = \
runtime/go-traceback.c \
runtime/go-type-complex.c \
runtime/go-type-eface.c \
- runtime/go-type-error.c \
runtime/go-type-float.c \
runtime/go-type-identity.c \
runtime/go-type-interface.c \
@@ -932,7 +926,6 @@ runtime_files = \
go-iface.c \
lfstack.c \
malloc.c \
- map.c \
mprof.c \
netpoll.c \
rdebug.c \
@@ -1594,14 +1587,12 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-interface-eface-compare.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-interface-val-compare.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-make-slice.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-map-delete.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-map-index.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-map-len.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-map-range.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-matherr.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-memclr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-memcmp.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-memequal.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-memmove.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-nanotime.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-new-map.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-new.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-nosys.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-now.Plo@am__quote@
@@ -1609,7 +1600,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-print.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-recover.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-reflect-call.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-reflect-map.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-rune.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-runtime-error.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-setenv.Plo@am__quote@
@@ -1622,7 +1612,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-traceback.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-complex.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-eface.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-error.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-float.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-identity.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-interface.Plo@am__quote@
@@ -1642,7 +1631,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lock_futex.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lock_sema.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/malloc.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/map.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mcache.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mcentral.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mem.Plo@am__quote@
@@ -1920,34 +1908,6 @@ go-make-slice.lo: runtime/go-make-slice.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-make-slice.lo `test -f 'runtime/go-make-slice.c' || echo '$(srcdir)/'`runtime/go-make-slice.c
-go-map-delete.lo: runtime/go-map-delete.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-map-delete.lo -MD -MP -MF $(DEPDIR)/go-map-delete.Tpo -c -o go-map-delete.lo `test -f 'runtime/go-map-delete.c' || echo '$(srcdir)/'`runtime/go-map-delete.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-map-delete.Tpo $(DEPDIR)/go-map-delete.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-map-delete.c' object='go-map-delete.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-map-delete.lo `test -f 'runtime/go-map-delete.c' || echo '$(srcdir)/'`runtime/go-map-delete.c
-
-go-map-index.lo: runtime/go-map-index.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-map-index.lo -MD -MP -MF $(DEPDIR)/go-map-index.Tpo -c -o go-map-index.lo `test -f 'runtime/go-map-index.c' || echo '$(srcdir)/'`runtime/go-map-index.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-map-index.Tpo $(DEPDIR)/go-map-index.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-map-index.c' object='go-map-index.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-map-index.lo `test -f 'runtime/go-map-index.c' || echo '$(srcdir)/'`runtime/go-map-index.c
-
-go-map-len.lo: runtime/go-map-len.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-map-len.lo -MD -MP -MF $(DEPDIR)/go-map-len.Tpo -c -o go-map-len.lo `test -f 'runtime/go-map-len.c' || echo '$(srcdir)/'`runtime/go-map-len.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-map-len.Tpo $(DEPDIR)/go-map-len.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-map-len.c' object='go-map-len.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-map-len.lo `test -f 'runtime/go-map-len.c' || echo '$(srcdir)/'`runtime/go-map-len.c
-
-go-map-range.lo: runtime/go-map-range.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-map-range.lo -MD -MP -MF $(DEPDIR)/go-map-range.Tpo -c -o go-map-range.lo `test -f 'runtime/go-map-range.c' || echo '$(srcdir)/'`runtime/go-map-range.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-map-range.Tpo $(DEPDIR)/go-map-range.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-map-range.c' object='go-map-range.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-map-range.lo `test -f 'runtime/go-map-range.c' || echo '$(srcdir)/'`runtime/go-map-range.c
-
go-matherr.lo: runtime/go-matherr.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-matherr.lo -MD -MP -MF $(DEPDIR)/go-matherr.Tpo -c -o go-matherr.lo `test -f 'runtime/go-matherr.c' || echo '$(srcdir)/'`runtime/go-matherr.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-matherr.Tpo $(DEPDIR)/go-matherr.Plo
@@ -1955,6 +1915,13 @@ go-matherr.lo: runtime/go-matherr.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-matherr.lo `test -f 'runtime/go-matherr.c' || echo '$(srcdir)/'`runtime/go-matherr.c
+go-memclr.lo: runtime/go-memclr.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-memclr.lo -MD -MP -MF $(DEPDIR)/go-memclr.Tpo -c -o go-memclr.lo `test -f 'runtime/go-memclr.c' || echo '$(srcdir)/'`runtime/go-memclr.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-memclr.Tpo $(DEPDIR)/go-memclr.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-memclr.c' object='go-memclr.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-memclr.lo `test -f 'runtime/go-memclr.c' || echo '$(srcdir)/'`runtime/go-memclr.c
+
go-memcmp.lo: runtime/go-memcmp.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-memcmp.lo -MD -MP -MF $(DEPDIR)/go-memcmp.Tpo -c -o go-memcmp.lo `test -f 'runtime/go-memcmp.c' || echo '$(srcdir)/'`runtime/go-memcmp.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-memcmp.Tpo $(DEPDIR)/go-memcmp.Plo
@@ -1962,6 +1929,20 @@ go-memcmp.lo: runtime/go-memcmp.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-memcmp.lo `test -f 'runtime/go-memcmp.c' || echo '$(srcdir)/'`runtime/go-memcmp.c
+go-memequal.lo: runtime/go-memequal.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-memequal.lo -MD -MP -MF $(DEPDIR)/go-memequal.Tpo -c -o go-memequal.lo `test -f 'runtime/go-memequal.c' || echo '$(srcdir)/'`runtime/go-memequal.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-memequal.Tpo $(DEPDIR)/go-memequal.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-memequal.c' object='go-memequal.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-memequal.lo `test -f 'runtime/go-memequal.c' || echo '$(srcdir)/'`runtime/go-memequal.c
+
+go-memmove.lo: runtime/go-memmove.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-memmove.lo -MD -MP -MF $(DEPDIR)/go-memmove.Tpo -c -o go-memmove.lo `test -f 'runtime/go-memmove.c' || echo '$(srcdir)/'`runtime/go-memmove.c
+@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-memmove.Tpo $(DEPDIR)/go-memmove.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-memmove.c' object='go-memmove.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-memmove.lo `test -f 'runtime/go-memmove.c' || echo '$(srcdir)/'`runtime/go-memmove.c
+
go-nanotime.lo: runtime/go-nanotime.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-nanotime.lo -MD -MP -MF $(DEPDIR)/go-nanotime.Tpo -c -o go-nanotime.lo `test -f 'runtime/go-nanotime.c' || echo '$(srcdir)/'`runtime/go-nanotime.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-nanotime.Tpo $(DEPDIR)/go-nanotime.Plo
@@ -1976,13 +1957,6 @@ go-now.lo: runtime/go-now.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-now.lo `test -f 'runtime/go-now.c' || echo '$(srcdir)/'`runtime/go-now.c
-go-new-map.lo: runtime/go-new-map.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-new-map.lo -MD -MP -MF $(DEPDIR)/go-new-map.Tpo -c -o go-new-map.lo `test -f 'runtime/go-new-map.c' || echo '$(srcdir)/'`runtime/go-new-map.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-new-map.Tpo $(DEPDIR)/go-new-map.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-new-map.c' object='go-new-map.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-new-map.lo `test -f 'runtime/go-new-map.c' || echo '$(srcdir)/'`runtime/go-new-map.c
-
go-new.lo: runtime/go-new.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-new.lo -MD -MP -MF $(DEPDIR)/go-new.Tpo -c -o go-new.lo `test -f 'runtime/go-new.c' || echo '$(srcdir)/'`runtime/go-new.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-new.Tpo $(DEPDIR)/go-new.Plo
@@ -2025,13 +1999,6 @@ go-reflect-call.lo: runtime/go-reflect-call.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-reflect-call.lo `test -f 'runtime/go-reflect-call.c' || echo '$(srcdir)/'`runtime/go-reflect-call.c
-go-reflect-map.lo: runtime/go-reflect-map.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-reflect-map.lo -MD -MP -MF $(DEPDIR)/go-reflect-map.Tpo -c -o go-reflect-map.lo `test -f 'runtime/go-reflect-map.c' || echo '$(srcdir)/'`runtime/go-reflect-map.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-reflect-map.Tpo $(DEPDIR)/go-reflect-map.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-reflect-map.c' object='go-reflect-map.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-reflect-map.lo `test -f 'runtime/go-reflect-map.c' || echo '$(srcdir)/'`runtime/go-reflect-map.c
-
go-rune.lo: runtime/go-rune.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-rune.lo -MD -MP -MF $(DEPDIR)/go-rune.Tpo -c -o go-rune.lo `test -f 'runtime/go-rune.c' || echo '$(srcdir)/'`runtime/go-rune.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-rune.Tpo $(DEPDIR)/go-rune.Plo
@@ -2116,13 +2083,6 @@ go-type-eface.lo: runtime/go-type-eface.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-type-eface.lo `test -f 'runtime/go-type-eface.c' || echo '$(srcdir)/'`runtime/go-type-eface.c
-go-type-error.lo: runtime/go-type-error.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-type-error.lo -MD -MP -MF $(DEPDIR)/go-type-error.Tpo -c -o go-type-error.lo `test -f 'runtime/go-type-error.c' || echo '$(srcdir)/'`runtime/go-type-error.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-type-error.Tpo $(DEPDIR)/go-type-error.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-type-error.c' object='go-type-error.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-type-error.lo `test -f 'runtime/go-type-error.c' || echo '$(srcdir)/'`runtime/go-type-error.c
-
go-type-float.lo: runtime/go-type-float.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-type-float.lo -MD -MP -MF $(DEPDIR)/go-type-float.Tpo -c -o go-type-float.lo `test -f 'runtime/go-type-float.c' || echo '$(srcdir)/'`runtime/go-type-float.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-type-float.Tpo $(DEPDIR)/go-type-float.Plo
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index d89f15631ac..13b326f5a8d 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -16,7 +16,6 @@
package reflect
import (
- "runtime"
"strconv"
"sync"
"unsafe"
@@ -255,7 +254,7 @@ type rtype struct {
size uintptr
hash uint32 // hash of type; avoids computation in hash tables
- hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
+ hashfn func(unsafe.Pointer, uintptr, uintptr) uintptr // hash function
equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool // equality function
gc unsafe.Pointer // garbage collection data
@@ -330,9 +329,18 @@ type interfaceType struct {
// mapType represents a map type.
type mapType struct {
- rtype `reflect:"map"`
- key *rtype // map key type
- elem *rtype // map element (value) type
+ rtype `reflect:"map"`
+ key *rtype // map key type
+ elem *rtype // map element (value) type
+ bucket *rtype // internal bucket structure
+ hmap *rtype // internal map header
+ keysize uint8 // size of key slot
+ indirectkey uint8 // store ptr to key instead of key itself
+ valuesize uint8 // size of value slot
+ indirectvalue uint8 // store ptr to value instead of value itself
+ bucketsize uint16 // size of bucket
+ reflexivekey bool // true if k==k for all keys
+ needkeyupdate bool // true if we need to update key on an overwrite
}
// ptrType represents a pointer type.
@@ -1606,20 +1614,25 @@ func MapOf(key, elem Type) Type {
mt.elem = etyp
mt.uncommonType = nil
mt.ptrToThis = nil
- // mt.gc = unsafe.Pointer(&ptrGC{
- // width: unsafe.Sizeof(uintptr(0)),
- // op: _GC_PTR,
- // off: 0,
- // elemgc: nil,
- // end: _GC_END,
- // })
- // TODO(cmang): Generate GC data for Map elements.
- mt.gc = unsafe.Pointer(&ptrDataGCProg)
-
- // INCORRECT. Uncomment to check that TestMapOfGC and TestMapOfGCValues
- // fail when mt.gc is wrong.
- //mt.gc = unsafe.Pointer(&badGC{width: mt.size, end: _GC_END})
+ mt.bucket = bucketOf(ktyp, etyp)
+ if ktyp.size > maxKeySize {
+ mt.keysize = uint8(ptrSize)
+ mt.indirectkey = 1
+ } else {
+ mt.keysize = uint8(ktyp.size)
+ mt.indirectkey = 0
+ }
+ if etyp.size > maxValSize {
+ mt.valuesize = uint8(ptrSize)
+ mt.indirectvalue = 1
+ } else {
+ mt.valuesize = uint8(etyp.size)
+ mt.indirectvalue = 0
+ }
+ mt.bucketsize = uint16(mt.bucket.size)
+ mt.reflexivekey = isReflexive(ktyp)
+ mt.needkeyupdate = needKeyUpdate(ktyp)
return cachePut(ckey, &mt.rtype)
}
@@ -1824,72 +1837,60 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
// Note that since the key and value are known to be <= 128 bytes,
// they're guaranteed to have bitmaps instead of GC programs.
// var gcdata *byte
- var ptrdata uintptr
- var overflowPad uintptr
+ // var ptrdata uintptr
- // On NaCl, pad if needed to make overflow end at the proper struct alignment.
- // On other systems, align > ptrSize is not possible.
- if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) {
- overflowPad = ptrSize
+ size := bucketSize
+ size = align(size, uintptr(ktyp.fieldAlign))
+ size += bucketSize * ktyp.size
+ size = align(size, uintptr(etyp.fieldAlign))
+ size += bucketSize * etyp.size
+
+ maxAlign := uintptr(ktyp.fieldAlign)
+ if maxAlign < uintptr(etyp.fieldAlign) {
+ maxAlign = uintptr(etyp.fieldAlign)
}
- size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize
- if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
- panic("reflect: bad size computation in MapOf")
+ if maxAlign > ptrSize {
+ size = align(size, maxAlign)
+ size += align(ptrSize, maxAlign) - ptrSize
}
- if kind != kindNoPointers {
- nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
- mask := make([]byte, (nptr+7)/8)
- base := bucketSize / ptrSize
+ ovoff := size
+ size += ptrSize
+ if maxAlign < ptrSize {
+ maxAlign = ptrSize
+ }
+ var gcPtr unsafe.Pointer
+ if kind != kindNoPointers {
+ gc := []uintptr{size}
+ base := bucketSize
+ base = align(base, uintptr(ktyp.fieldAlign))
if ktyp.kind&kindNoPointers == 0 {
- if ktyp.kind&kindGCProg != 0 {
- panic("reflect: unexpected GC program in MapOf")
- }
- kmask := (*[16]byte)(unsafe.Pointer( /*ktyp.gcdata*/ nil))
- for i := uintptr(0); i < ktyp.size/ptrSize; i++ {
- if (kmask[i/8]>>(i%8))&1 != 0 {
- for j := uintptr(0); j < bucketSize; j++ {
- word := base + j*ktyp.size/ptrSize + i
- mask[word/8] |= 1 << (word % 8)
- }
- }
- }
+ gc = append(gc, _GC_ARRAY_START, base, bucketSize, ktyp.size)
+ gc = appendGCProgram(gc, ktyp, 0)
+ gc = append(gc, _GC_ARRAY_NEXT)
}
- base += bucketSize * ktyp.size / ptrSize
-
+ base += ktyp.size * bucketSize
+ base = align(base, uintptr(etyp.fieldAlign))
if etyp.kind&kindNoPointers == 0 {
- if etyp.kind&kindGCProg != 0 {
- panic("reflect: unexpected GC program in MapOf")
- }
- emask := (*[16]byte)(unsafe.Pointer( /*etyp.gcdata*/ nil))
- for i := uintptr(0); i < etyp.size/ptrSize; i++ {
- if (emask[i/8]>>(i%8))&1 != 0 {
- for j := uintptr(0); j < bucketSize; j++ {
- word := base + j*etyp.size/ptrSize + i
- mask[word/8] |= 1 << (word % 8)
- }
- }
- }
- }
- base += bucketSize * etyp.size / ptrSize
- base += overflowPad / ptrSize
-
- word := base
- mask[word/8] |= 1 << (word % 8)
- // gcdata = &mask[0]
- ptrdata = (word + 1) * ptrSize
-
- // overflow word must be last
- if ptrdata != size {
- panic("reflect: bad layout computation in MapOf")
+ gc = append(gc, _GC_ARRAY_START, base, bucketSize, etyp.size)
+ gc = appendGCProgram(gc, etyp, 0)
+ gc = append(gc, _GC_ARRAY_NEXT)
}
+ gc = append(gc, _GC_APTR, ovoff, _GC_END)
+ gcPtr = unsafe.Pointer(&gc[0])
+ } else {
+ // No pointers in bucket.
+ gc := [...]uintptr{size, _GC_END}
+ gcPtr = unsafe.Pointer(&gc[0])
}
b := new(rtype)
- // b.size = gc.size
- // b.gc[0], _ = gc.finalize()
- b.kind |= kindGCProg
+ b.align = int8(maxAlign)
+ b.fieldAlign = uint8(maxAlign)
+ b.size = size
+ b.kind = kind
+ b.gc = gcPtr
s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
b.string = &s
return b
@@ -2202,14 +2203,14 @@ func StructOf(fields []StructField) Type {
typ.gc = unsafe.Pointer(&gc[0])
}
- typ.hashfn = func(p unsafe.Pointer, size uintptr) uintptr {
- ret := uintptr(0)
+ typ.hashfn = func(p unsafe.Pointer, seed, size uintptr) uintptr {
+ ret := seed
for i, ft := range typ.fields {
if i > 0 {
ret *= 33
}
o := unsafe.Pointer(uintptr(p) + ft.offset)
- ret += ft.typ.hashfn(o, ft.typ.size)
+ ret = ft.typ.hashfn(o, ret, ft.typ.size)
}
return ret
}
@@ -2347,11 +2348,11 @@ func ArrayOf(count int, elem Type) Type {
array.kind &^= kindDirectIface
- array.hashfn = func(p unsafe.Pointer, size uintptr) uintptr {
- ret := uintptr(0)
+ array.hashfn = func(p unsafe.Pointer, seed, size uintptr) uintptr {
+ ret := seed
for i := 0; i < count; i++ {
ret *= 33
- ret += typ.hashfn(p, typ.size)
+ ret = typ.hashfn(p, ret, typ.size)
p = unsafe.Pointer(uintptr(p) + typ.size)
}
return ret
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index 7ba217eb782..2b1a9b72211 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -90,7 +90,7 @@ func GCMask(x interface{}) (ret []byte) {
//var IfaceHash = ifaceHash
//var MemclrBytes = memclrBytes
-// var HashLoad = &hashLoad
+var HashLoad = &hashLoad
// entry point for testing
//func GostringW(w []uint16) (s string) {
diff --git a/libgo/go/runtime/hashmap.go b/libgo/go/runtime/hashmap.go
new file mode 100644
index 00000000000..aaf4fb4d6e3
--- /dev/null
+++ b/libgo/go/runtime/hashmap.go
@@ -0,0 +1,1081 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// This file contains the implementation of Go's map type.
+//
+// A map is just a hash table. The data is arranged
+// into an array of buckets. Each bucket contains up to
+// 8 key/value pairs. The low-order bits of the hash are
+// used to select a bucket. Each bucket contains a few
+// high-order bits of each hash to distinguish the entries
+// within a single bucket.
+//
+// If more than 8 keys hash to a bucket, we chain on
+// extra buckets.
+//
+// When the hashtable grows, we allocate a new array
+// of buckets twice as big. Buckets are incrementally
+// copied from the old bucket array to the new bucket array.
+//
+// Map iterators walk through the array of buckets and
+// return the keys in walk order (bucket #, then overflow
+// chain order, then bucket index). To maintain iteration
+// semantics, we never move keys within their bucket (if
+// we did, keys might be returned 0 or 2 times). When
+// growing the table, iterators remain iterating through the
+// old table and must check the new table if the bucket
+// they are iterating through has been moved ("evacuated")
+// to the new table.
+
+// Picking loadFactor: too large and we have lots of overflow
+// buckets, too small and we waste a lot of space. I wrote
+// a simple program to check some stats for different loads:
+// (64-bit, 8 byte keys and values)
+// loadFactor %overflow bytes/entry hitprobe missprobe
+// 4.00 2.13 20.77 3.00 4.00
+// 4.50 4.05 17.30 3.25 4.50
+// 5.00 6.85 14.77 3.50 5.00
+// 5.50 10.55 12.94 3.75 5.50
+// 6.00 15.27 11.67 4.00 6.00
+// 6.50 20.90 10.79 4.25 6.50
+// 7.00 27.14 10.15 4.50 7.00
+// 7.50 34.03 9.73 4.75 7.50
+// 8.00 41.10 9.40 5.00 8.00
+//
+// %overflow = percentage of buckets which have an overflow bucket
+// bytes/entry = overhead bytes used per key/value pair
+// hitprobe = # of entries to check when looking up a present key
+// missprobe = # of entries to check when looking up an absent key
+//
+// Keep in mind this data is for maximally loaded tables, i.e. just
+// before the table grows. Typical tables will be somewhat less loaded.
+
+import (
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname makemap runtime.makemap
+//go:linkname mapaccess1 runtime.mapaccess1
+//go:linkname mapaccess2 runtime.mapaccess2
+//go:linkname mapaccess1_fat runtime.mapaccess1_fat
+//go:linkname mapaccess2_fat runtime.mapaccess2_fat
+//go:linkname mapassign1 runtime.mapassign1
+//go:linkname mapdelete runtime.mapdelete
+//go:linkname mapiterinit runtime.mapiterinit
+//go:linkname mapiternext runtime.mapiternext
+
+const (
+ // Maximum number of key/value pairs a bucket can hold.
+ bucketCntBits = 3
+ bucketCnt = 1 << bucketCntBits
+
+ // Maximum average load of a bucket that triggers growth.
+ loadFactor = 6.5
+
+ // Maximum key or value size to keep inline (instead of mallocing per element).
+ // Must fit in a uint8.
+ // Fast versions cannot handle big values - the cutoff size for
+ // fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
+ maxKeySize = 128
+ maxValueSize = 128
+
+ // data offset should be the size of the bmap struct, but needs to be
+ // aligned correctly. For amd64p32 this means 64-bit alignment
+ // even though pointers are 32 bit.
+ dataOffset = unsafe.Offsetof(struct {
+ b bmap
+ v int64
+ }{}.v)
+
+ // Possible tophash values. We reserve a few possibilities for special marks.
+ // Each bucket (including its overflow buckets, if any) will have either all or none of its
+ // entries in the evacuated* states (except during the evacuate() method, which only happens
+ // during map writes and thus no one else can observe the map during that time).
+ empty = 0 // cell is empty
+ evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
+ evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table.
+ evacuatedY = 3 // same as above, but evacuated to second half of larger table.
+ minTopHash = 4 // minimum tophash for a normal filled cell.
+
+ // flags
+ iterator = 1 // there may be an iterator using buckets
+ oldIterator = 2 // there may be an iterator using oldbuckets
+ hashWriting = 4 // a goroutine is writing to the map
+
+ // sentinel bucket ID for iterator checks
+ noCheck = 1<<(8*sys.PtrSize) - 1
+)
+
+// A header for a Go map.
+type hmap struct {
+ // Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
+ // ../reflect/type.go. Don't change this structure without also changing that code!
+ count int // # live cells == size of map. Must be first (used by len() builtin)
+ flags uint8
+ B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
+ hash0 uint32 // hash seed
+
+ buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
+ oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
+ nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
+
+ // If both key and value do not contain pointers and are inline, then we mark bucket
+ // type as containing no pointers. This avoids scanning such maps.
+ // However, bmap.overflow is a pointer. In order to keep overflow buckets
+ // alive, we store pointers to all overflow buckets in hmap.overflow.
+ // Overflow is used only if key and value do not contain pointers.
+ // overflow[0] contains overflow buckets for hmap.buckets.
+ // overflow[1] contains overflow buckets for hmap.oldbuckets.
+ // The first indirection allows us to reduce static size of hmap.
+ // The second indirection allows to store a pointer to the slice in hiter.
+ overflow *[2]*[]*bmap
+}
+
+// A bucket for a Go map.
+type bmap struct {
+ tophash [bucketCnt]uint8
+ // Followed by bucketCnt keys and then bucketCnt values.
+ // NOTE: packing all the keys together and then all the values together makes the
+ // code a bit more complicated than alternating key/value/key/value/... but it allows
+ // us to eliminate padding which would be needed for, e.g., map[int64]int8.
+ // Followed by an overflow pointer.
+}
+
+// A hash iteration structure.
+// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
+// the layout of this structure.
+type hiter struct {
+ key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
+ value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
+ t *maptype
+ h *hmap
+ buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
+ bptr *bmap // current bucket
+ overflow [2]*[]*bmap // keeps overflow buckets alive
+ startBucket uintptr // bucket iteration started at
+ offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
+ wrapped bool // already wrapped around from end of bucket array to beginning
+ B uint8
+ i uint8
+ bucket uintptr
+ checkBucket uintptr
+}
+
+func evacuated(b *bmap) bool {
+ h := b.tophash[0]
+ return h > empty && h < minTopHash
+}
+
+func (b *bmap) overflow(t *maptype) *bmap {
+ return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
+}
+
+func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
+ if t.bucket.kind&kindNoPointers != 0 {
+ h.createOverflow()
+ *h.overflow[0] = append(*h.overflow[0], ovf)
+ }
+ *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
+}
+
+func (h *hmap) createOverflow() {
+ if h.overflow == nil {
+ h.overflow = new([2]*[]*bmap)
+ }
+ if h.overflow[0] == nil {
+ h.overflow[0] = new([]*bmap)
+ }
+}
+
+// makemap implements a Go map creation make(map[k]v, hint)
+// If the compiler has determined that the map or the first bucket
+// can be created on the stack, h and/or bucket may be non-nil.
+// If h != nil, the map can be created directly in h.
+// If bucket != nil, bucket can be used as the first bucket.
+func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
+ if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != t.hmap.size {
+ println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
+ throw("bad hmap size")
+ }
+
+ if hint < 0 || int64(int32(hint)) != hint {
+ panic(plainError("makemap: size out of range"))
+ // TODO: make hint an int, then none of this nonsense
+ }
+
+ if !ismapkey(t.key) {
+ throw("runtime.makemap: unsupported map key type")
+ }
+
+ // check compiler's and reflect's math
+ if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
+ t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
+ throw("key size wrong")
+ }
+ if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
+ t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
+ throw("value size wrong")
+ }
+
+ // invariants we depend on. We should probably check these at compile time
+ // somewhere, but for now we'll do it here.
+ if t.key.align > bucketCnt {
+ throw("key align too big")
+ }
+ if t.elem.align > bucketCnt {
+ throw("value align too big")
+ }
+ if t.key.size%uintptr(t.key.align) != 0 {
+ throw("key size not a multiple of key align")
+ }
+ if t.elem.size%uintptr(t.elem.align) != 0 {
+ throw("value size not a multiple of value align")
+ }
+ if bucketCnt < 8 {
+ throw("bucketsize too small for proper alignment")
+ }
+ if dataOffset%uintptr(t.key.align) != 0 {
+ throw("need padding in bucket (key)")
+ }
+ if dataOffset%uintptr(t.elem.align) != 0 {
+ throw("need padding in bucket (value)")
+ }
+
+ // find size parameter which will hold the requested # of elements
+ B := uint8(0)
+ for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
+ }
+
+ // allocate initial hash table
+ // if B == 0, the buckets field is allocated lazily later (in mapassign)
+ // If hint is large zeroing this memory could take a while.
+ buckets := bucket
+ if B != 0 {
+ buckets = newarray(t.bucket, 1<<B)
+ }
+
+ // initialize Hmap
+ if h == nil {
+ h = (*hmap)(newobject(t.hmap))
+ }
+ h.count = 0
+ h.B = B
+ h.flags = 0
+ h.hash0 = fastrand1()
+ h.buckets = buckets
+ h.oldbuckets = nil
+ h.nevacuate = 0
+
+ return h
+}
+
+// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
+// it will return a reference to the zero object for the value type if
+// the key is not in the map.
+// NOTE: The returned pointer may keep the whole map live, so don't
+// hold onto it for very long.
+func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ pc := funcPC(mapaccess1)
+ racereadpc(unsafe.Pointer(h), callerpc, pc)
+ raceReadObjectPC(t.key, key, callerpc, pc)
+ }
+ if msanenabled && h != nil {
+ msanread(key, t.key.size)
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map read and map write")
+ }
+ hashfn := t.key.hashfn
+ equalfn := t.key.equalfn
+ hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+ m := uintptr(1)<<h.B - 1
+ b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ if t.indirectkey {
+ k = *((*unsafe.Pointer)(k))
+ }
+ if equalfn(key, k, uintptr(t.keysize)) {
+ v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+ if t.indirectvalue {
+ v = *((*unsafe.Pointer)(v))
+ }
+ return v
+ }
+ }
+ b = b.overflow(t)
+ if b == nil {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ }
+}
+
+func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ pc := funcPC(mapaccess2)
+ racereadpc(unsafe.Pointer(h), callerpc, pc)
+ raceReadObjectPC(t.key, key, callerpc, pc)
+ }
+ if msanenabled && h != nil {
+ msanread(key, t.key.size)
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map read and map write")
+ }
+ hashfn := t.key.hashfn
+ equalfn := t.key.equalfn
+ hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+ m := uintptr(1)<<h.B - 1
+ b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ if t.indirectkey {
+ k = *((*unsafe.Pointer)(k))
+ }
+ if equalfn(key, k, uintptr(t.keysize)) {
+ v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+ if t.indirectvalue {
+ v = *((*unsafe.Pointer)(v))
+ }
+ return v, true
+ }
+ }
+ b = b.overflow(t)
+ if b == nil {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ }
+}
+
+// returns both key and value. Used by map iterator
+func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
+ if h == nil || h.count == 0 {
+ return nil, nil
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map read and map write")
+ }
+ hashfn := t.key.hashfn
+ equalfn := t.key.equalfn
+ hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+ m := uintptr(1)<<h.B - 1
+ b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ if t.indirectkey {
+ k = *((*unsafe.Pointer)(k))
+ }
+ if equalfn(key, k, uintptr(t.keysize)) {
+ v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+ if t.indirectvalue {
+ v = *((*unsafe.Pointer)(v))
+ }
+ return k, v
+ }
+ }
+ b = b.overflow(t)
+ if b == nil {
+ return nil, nil
+ }
+ }
+}
+
+func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
+ v := mapaccess1(t, h, key)
+ if v == unsafe.Pointer(&zeroVal[0]) {
+ return zero
+ }
+ return v
+}
+
+func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
+ v := mapaccess1(t, h, key)
+ if v == unsafe.Pointer(&zeroVal[0]) {
+ return zero, false
+ }
+ return v, true
+}
+
+func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ pc := funcPC(mapassign1)
+ racewritepc(unsafe.Pointer(h), callerpc, pc)
+ raceReadObjectPC(t.key, key, callerpc, pc)
+ raceReadObjectPC(t.elem, val, callerpc, pc)
+ }
+ if msanenabled {
+ msanread(key, t.key.size)
+ msanread(val, t.elem.size)
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+ h.flags |= hashWriting
+
+ hashfn := t.key.hashfn
+ equalfn := t.key.equalfn
+ hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+
+ if h.buckets == nil {
+ h.buckets = newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & (uintptr(1)<<h.B - 1)
+ if h.oldbuckets != nil {
+ growWork(t, h, bucket)
+ }
+ b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+
+ var inserti *uint8
+ var insertk unsafe.Pointer
+ var insertv unsafe.Pointer
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if b.tophash[i] == empty && inserti == nil {
+ inserti = &b.tophash[i]
+ insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+ }
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ k2 := k
+ if t.indirectkey {
+ k2 = *((*unsafe.Pointer)(k2))
+ }
+ if !equalfn(key, k2, uintptr(t.keysize)) {
+ continue
+ }
+ // already have a mapping for key. Update it.
+ if t.needkeyupdate {
+ typedmemmove(t.key, k2, key)
+ }
+ v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+ v2 := v
+ if t.indirectvalue {
+ v2 = *((*unsafe.Pointer)(v2))
+ }
+ typedmemmove(t.elem, v2, val)
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // did not find mapping for key. Allocate new cell & add entry.
+ if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if inserti == nil {
+ // all current buckets are full, allocate a new one.
+ newb := (*bmap)(newobject(t.bucket))
+ h.setoverflow(t, b, newb)
+ inserti = &newb.tophash[0]
+ insertk = add(unsafe.Pointer(newb), dataOffset)
+ insertv = add(insertk, bucketCnt*uintptr(t.keysize))
+ }
+
+ // store new key/value at insert position
+ if t.indirectkey {
+ kmem := newobject(t.key)
+ *(*unsafe.Pointer)(insertk) = kmem
+ insertk = kmem
+ }
+ if t.indirectvalue {
+ vmem := newobject(t.elem)
+ *(*unsafe.Pointer)(insertv) = vmem
+ insertv = vmem
+ }
+ typedmemmove(t.key, insertk, key)
+ typedmemmove(t.elem, insertv, val)
+ *inserti = top
+ h.count++
+
+done:
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+}
+
+func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ pc := funcPC(mapdelete)
+ racewritepc(unsafe.Pointer(h), callerpc, pc)
+ raceReadObjectPC(t.key, key, callerpc, pc)
+ }
+ if msanenabled && h != nil {
+ msanread(key, t.key.size)
+ }
+ if h == nil || h.count == 0 {
+ return
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+ h.flags |= hashWriting
+
+ hashfn := t.key.hashfn
+ equalfn := t.key.equalfn
+ hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+ bucket := hash & (uintptr(1)<<h.B - 1)
+ if h.oldbuckets != nil {
+ growWork(t, h, bucket)
+ }
+ b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ continue
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ k2 := k
+ if t.indirectkey {
+ k2 = *((*unsafe.Pointer)(k2))
+ }
+ if !equalfn(key, k2, uintptr(t.keysize)) {
+ continue
+ }
+ memclr(k, uintptr(t.keysize))
+ v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
+ memclr(v, uintptr(t.valuesize))
+ b.tophash[i] = empty
+ h.count--
+ goto done
+ }
+ b = b.overflow(t)
+ if b == nil {
+ goto done
+ }
+ }
+
+done:
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+}
+
+func mapiterinit(t *maptype, h *hmap, it *hiter) {
+ // Clear pointer fields so garbage collector does not complain.
+ it.key = nil
+ it.value = nil
+ it.t = nil
+ it.h = nil
+ it.buckets = nil
+ it.bptr = nil
+ it.overflow[0] = nil
+ it.overflow[1] = nil
+
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
+ }
+
+ if h == nil || h.count == 0 {
+ it.key = nil
+ it.value = nil
+ return
+ }
+
+ if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
+ throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
+ }
+ it.t = t
+ it.h = h
+
+ // grab snapshot of bucket state
+ it.B = h.B
+ it.buckets = h.buckets
+ if t.bucket.kind&kindNoPointers != 0 {
+ // Allocate the current slice and remember pointers to both current and old.
+ // This preserves all relevant overflow buckets alive even if
+ // the table grows and/or overflow buckets are added to the table
+ // while we are iterating.
+ h.createOverflow()
+ it.overflow = *h.overflow
+ }
+
+ // decide where to start
+ r := uintptr(fastrand1())
+ if h.B > 31-bucketCntBits {
+ r += uintptr(fastrand1()) << 31
+ }
+ it.startBucket = r & (uintptr(1)<<h.B - 1)
+ it.offset = uint8(r >> h.B & (bucketCnt - 1))
+
+ // iterator state
+ it.bucket = it.startBucket
+ it.wrapped = false
+ it.bptr = nil
+
+ // Remember we have an iterator.
+ // Can run concurrently with another hash_iter_init().
+ if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
+ atomic.Or8(&h.flags, iterator|oldIterator)
+ }
+
+ mapiternext(it)
+}
+
+func mapiternext(it *hiter) {
+ h := it.h
+ if raceenabled {
+ callerpc := getcallerpc(unsafe.Pointer( /* &it */ nil))
+ racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
+ }
+ t := it.t
+ bucket := it.bucket
+ b := it.bptr
+ i := it.i
+ checkBucket := it.checkBucket
+ hashfn := t.key.hashfn
+ equalfn := t.key.equalfn
+
+next:
+ if b == nil {
+ if bucket == it.startBucket && it.wrapped {
+ // end of iteration
+ it.key = nil
+ it.value = nil
+ return
+ }
+ if h.oldbuckets != nil && it.B == h.B {
+ // Iterator was started in the middle of a grow, and the grow isn't done yet.
+ // If the bucket we're looking at hasn't been filled in yet (i.e. the old
+ // bucket hasn't been evacuated) then we need to iterate through the old
+ // bucket and only return the ones that will be migrated to this bucket.
+ oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
+ b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ if !evacuated(b) {
+ checkBucket = bucket
+ } else {
+ b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
+ checkBucket = noCheck
+ }
+ } else {
+ b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
+ checkBucket = noCheck
+ }
+ bucket++
+ if bucket == uintptr(1)<<it.B {
+ bucket = 0
+ it.wrapped = true
+ }
+ i = 0
+ }
+ for ; i < bucketCnt; i++ {
+ offi := (i + it.offset) & (bucketCnt - 1)
+ k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
+ v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
+ if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
+ if checkBucket != noCheck {
+ // Special case: iterator was started during a grow and the
+ // grow is not done yet. We're working on a bucket whose
+ // oldbucket has not been evacuated yet. Or at least, it wasn't
+ // evacuated when we started the bucket. So we're iterating
+ // through the oldbucket, skipping any keys that will go
+ // to the other new bucket (each oldbucket expands to two
+ // buckets during a grow).
+ k2 := k
+ if t.indirectkey {
+ k2 = *((*unsafe.Pointer)(k2))
+ }
+ if t.reflexivekey || equalfn(k2, k2, uintptr(t.keysize)) {
+ // If the item in the oldbucket is not destined for
+ // the current new bucket in the iteration, skip it.
+ hash := hashfn(k2, uintptr(h.hash0), uintptr(t.keysize))
+ if hash&(uintptr(1)<<it.B-1) != checkBucket {
+ continue
+ }
+ } else {
+ // Hash isn't repeatable if k != k (NaNs). We need a
+ // repeatable and randomish choice of which direction
+ // to send NaNs during evacuation. We'll use the low
+ // bit of tophash to decide which way NaNs go.
+ // NOTE: this case is why we need two evacuate tophash
+ // values, evacuatedX and evacuatedY, that differ in
+ // their low bit.
+ if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
+ continue
+ }
+ }
+ }
+ if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
+ // this is the golden data, we can return it.
+ if t.indirectkey {
+ k = *((*unsafe.Pointer)(k))
+ }
+ it.key = k
+ if t.indirectvalue {
+ v = *((*unsafe.Pointer)(v))
+ }
+ it.value = v
+ } else {
+ // The hash table has grown since the iterator was started.
+ // The golden data for this key is now somewhere else.
+ k2 := k
+ if t.indirectkey {
+ k2 = *((*unsafe.Pointer)(k2))
+ }
+ if t.reflexivekey || equalfn(k2, k2, uintptr(t.keysize)) {
+ // Check the current hash table for the data.
+ // This code handles the case where the key
+ // has been deleted, updated, or deleted and reinserted.
+ // NOTE: we need to regrab the key as it has potentially been
+ // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
+ rk, rv := mapaccessK(t, h, k2)
+ if rk == nil {
+ continue // key has been deleted
+ }
+ it.key = rk
+ it.value = rv
+ } else {
+ // if key!=key then the entry can't be deleted or
+ // updated, so we can just return it. That's lucky for
+ // us because when key!=key we can't look it up
+ // successfully in the current table.
+ it.key = k2
+ if t.indirectvalue {
+ v = *((*unsafe.Pointer)(v))
+ }
+ it.value = v
+ }
+ }
+ it.bucket = bucket
+ if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
+ it.bptr = b
+ }
+ it.i = i + 1
+ it.checkBucket = checkBucket
+ return
+ }
+ }
+ b = b.overflow(t)
+ i = 0
+ goto next
+}
+
+func hashGrow(t *maptype, h *hmap) {
+ if h.oldbuckets != nil {
+ throw("evacuation not done in time")
+ }
+ oldbuckets := h.buckets
+ newbuckets := newarray(t.bucket, 1<<(h.B+1))
+ flags := h.flags &^ (iterator | oldIterator)
+ if h.flags&iterator != 0 {
+ flags |= oldIterator
+ }
+ // commit the grow (atomic wrt gc)
+ h.B++
+ h.flags = flags
+ h.oldbuckets = oldbuckets
+ h.buckets = newbuckets
+ h.nevacuate = 0
+
+ if h.overflow != nil {
+ // Promote current overflow buckets to the old generation.
+ if h.overflow[1] != nil {
+ throw("overflow is not nil")
+ }
+ h.overflow[1] = h.overflow[0]
+ h.overflow[0] = nil
+ }
+
+ // the actual copying of the hash table data is done incrementally
+ // by growWork() and evacuate().
+}
+
+func growWork(t *maptype, h *hmap, bucket uintptr) {
+ noldbuckets := uintptr(1) << (h.B - 1)
+
+ // make sure we evacuate the oldbucket corresponding
+ // to the bucket we're about to use
+ evacuate(t, h, bucket&(noldbuckets-1))
+
+ // evacuate one more oldbucket to make progress on growing
+ if h.oldbuckets != nil {
+ evacuate(t, h, h.nevacuate)
+ }
+}
+
+func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
+ b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ newbit := uintptr(1) << (h.B - 1)
+ hashfn := t.key.hashfn
+ equalfn := t.key.equalfn
+ if !evacuated(b) {
+ // TODO: reuse overflow buckets instead of using new ones, if there
+ // is no iterator using the old buckets. (If !oldIterator.)
+
+ x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
+ y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
+ xi := 0
+ yi := 0
+ xk := add(unsafe.Pointer(x), dataOffset)
+ yk := add(unsafe.Pointer(y), dataOffset)
+ xv := add(xk, bucketCnt*uintptr(t.keysize))
+ yv := add(yk, bucketCnt*uintptr(t.keysize))
+ for ; b != nil; b = b.overflow(t) {
+ k := add(unsafe.Pointer(b), dataOffset)
+ v := add(k, bucketCnt*uintptr(t.keysize))
+ for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
+ top := b.tophash[i]
+ if top == empty {
+ b.tophash[i] = evacuatedEmpty
+ continue
+ }
+ if top < minTopHash {
+ throw("bad map state")
+ }
+ k2 := k
+ if t.indirectkey {
+ k2 = *((*unsafe.Pointer)(k2))
+ }
+ // Compute hash to make our evacuation decision (whether we need
+ // to send this key/value to bucket x or bucket y).
+ hash := hashfn(k2, uintptr(h.hash0), uintptr(t.keysize))
+ if h.flags&iterator != 0 {
+ if !t.reflexivekey && !equalfn(k2, k2, uintptr(t.keysize)) {
+ // If key != key (NaNs), then the hash could be (and probably
+ // will be) entirely different from the old hash. Moreover,
+ // it isn't reproducible. Reproducibility is required in the
+ // presence of iterators, as our evacuation decision must
+ // match whatever decision the iterator made.
+ // Fortunately, we have the freedom to send these keys either
+ // way. Also, tophash is meaningless for these kinds of keys.
+ // We let the low bit of tophash drive the evacuation decision.
+ // We recompute a new random tophash for the next level so
+ // these keys will get evenly distributed across all buckets
+ // after multiple grows.
+ if (top & 1) != 0 {
+ hash |= newbit
+ } else {
+ hash &^= newbit
+ }
+ top = uint8(hash >> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ }
+ }
+ if (hash & newbit) == 0 {
+ b.tophash[i] = evacuatedX
+ if xi == bucketCnt {
+ newx := (*bmap)(newobject(t.bucket))
+ h.setoverflow(t, x, newx)
+ x = newx
+ xi = 0
+ xk = add(unsafe.Pointer(x), dataOffset)
+ xv = add(xk, bucketCnt*uintptr(t.keysize))
+ }
+ x.tophash[xi] = top
+ if t.indirectkey {
+ *(*unsafe.Pointer)(xk) = k2 // copy pointer
+ } else {
+ typedmemmove(t.key, xk, k) // copy value
+ }
+ if t.indirectvalue {
+ *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
+ } else {
+ typedmemmove(t.elem, xv, v)
+ }
+ xi++
+ xk = add(xk, uintptr(t.keysize))
+ xv = add(xv, uintptr(t.valuesize))
+ } else {
+ b.tophash[i] = evacuatedY
+ if yi == bucketCnt {
+ newy := (*bmap)(newobject(t.bucket))
+ h.setoverflow(t, y, newy)
+ y = newy
+ yi = 0
+ yk = add(unsafe.Pointer(y), dataOffset)
+ yv = add(yk, bucketCnt*uintptr(t.keysize))
+ }
+ y.tophash[yi] = top
+ if t.indirectkey {
+ *(*unsafe.Pointer)(yk) = k2
+ } else {
+ typedmemmove(t.key, yk, k)
+ }
+ if t.indirectvalue {
+ *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
+ } else {
+ typedmemmove(t.elem, yv, v)
+ }
+ yi++
+ yk = add(yk, uintptr(t.keysize))
+ yv = add(yv, uintptr(t.valuesize))
+ }
+ }
+ }
+ // Unlink the overflow buckets & clear key/value to help GC.
+ if h.flags&oldIterator == 0 {
+ b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+ memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
+ }
+ }
+
+ // Advance evacuation mark
+ if oldbucket == h.nevacuate {
+ h.nevacuate = oldbucket + 1
+ if oldbucket+1 == newbit { // newbit == # of oldbuckets
+ // Growing is all done. Free old main bucket array.
+ h.oldbuckets = nil
+ // Can discard old overflow buckets as well.
+ // If they are still referenced by an iterator,
+ // then the iterator holds a pointers to the slice.
+ if h.overflow != nil {
+ h.overflow[1] = nil
+ }
+ }
+ }
+}
+
+func ismapkey(t *_type) bool {
+ return t.hashfn != nil
+}
+
+// Reflect stubs. Called from ../reflect/asm_*.s
+
+//go:linkname reflect_makemap reflect.makemap
+func reflect_makemap(t *maptype) *hmap {
+ return makemap(t, 0, nil, nil)
+}
+
+//go:linkname reflect_mapaccess reflect.mapaccess
+func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+ val, ok := mapaccess2(t, h, key)
+ if !ok {
+ // reflect wants nil for a missing element
+ val = nil
+ }
+ return val
+}
+
+//go:linkname reflect_mapassign reflect.mapassign
+func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
+ mapassign1(t, h, key, val)
+}
+
+//go:linkname reflect_mapdelete reflect.mapdelete
+func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
+ mapdelete(t, h, key)
+}
+
+//go:linkname reflect_mapiterinit reflect.mapiterinit
+func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
+ it := new(hiter)
+ mapiterinit(t, h, it)
+ return it
+}
+
+//go:linkname reflect_mapiternext reflect.mapiternext
+func reflect_mapiternext(it *hiter) {
+ mapiternext(it)
+}
+
+//go:linkname reflect_mapiterkey reflect.mapiterkey
+func reflect_mapiterkey(it *hiter) unsafe.Pointer {
+ return it.key
+}
+
+//go:linkname reflect_maplen reflect.maplen
+func reflect_maplen(h *hmap) int {
+ if h == nil {
+ return 0
+ }
+ if raceenabled {
+ callerpc := getcallerpc(unsafe.Pointer( /* &h */ nil))
+ racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
+ }
+ return h.count
+}
+
+//go:linkname reflect_ismapkey reflect.ismapkey
+func reflect_ismapkey(t *_type) bool {
+ return ismapkey(t)
+}
+
+const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go
+var zeroVal [maxZero]byte
diff --git a/libgo/go/runtime/hashmap_fast.go b/libgo/go/runtime/hashmap_fast.go
new file mode 100644
index 00000000000..4850b168f1c
--- /dev/null
+++ b/libgo/go/runtime/hashmap_fast.go
@@ -0,0 +1,398 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map read and map write")
+ }
+ var b *bmap
+ if h.B == 0 {
+ // One-bucket table. No need to hash.
+ b = (*bmap)(h.buckets)
+ } else {
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+ m := uintptr(1)<<h.B - 1
+ b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
+ if k != key {
+ continue
+ }
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ if x == empty {
+ continue
+ }
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
+ }
+ b = b.overflow(t)
+ if b == nil {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ }
+}
+
+func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map read and map write")
+ }
+ var b *bmap
+ if h.B == 0 {
+ // One-bucket table. No need to hash.
+ b = (*bmap)(h.buckets)
+ } else {
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+ m := uintptr(1)<<h.B - 1
+ b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
+ if k != key {
+ continue
+ }
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ if x == empty {
+ continue
+ }
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
+ }
+ b = b.overflow(t)
+ if b == nil {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ }
+}
+
+func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map read and map write")
+ }
+ var b *bmap
+ if h.B == 0 {
+ // One-bucket table. No need to hash.
+ b = (*bmap)(h.buckets)
+ } else {
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+ m := uintptr(1)<<h.B - 1
+ b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
+ if k != key {
+ continue
+ }
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ if x == empty {
+ continue
+ }
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
+ }
+ b = b.overflow(t)
+ if b == nil {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ }
+}
+
+func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map read and map write")
+ }
+ var b *bmap
+ if h.B == 0 {
+ // One-bucket table. No need to hash.
+ b = (*bmap)(h.buckets)
+ } else {
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+ m := uintptr(1)<<h.B - 1
+ b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
+ if k != key {
+ continue
+ }
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ if x == empty {
+ continue
+ }
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
+ }
+ b = b.overflow(t)
+ if b == nil {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ }
+}
+
+func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map read and map write")
+ }
+ key := stringStructOf(&ky)
+ if h.B == 0 {
+ // One-bucket table.
+ b := (*bmap)(h.buckets)
+ if key.len < 32 {
+ // short key, doing lots of comparisons is ok
+ for i := uintptr(0); i < bucketCnt; i++ {
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ if x == empty {
+ continue
+ }
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ if k.len != key.len {
+ continue
+ }
+ if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ // long key, try not to do more comparisons than necessary
+ keymaybe := uintptr(bucketCnt)
+ for i := uintptr(0); i < bucketCnt; i++ {
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ if x == empty {
+ continue
+ }
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ if k.len != key.len {
+ continue
+ }
+ if k.str == key.str {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+ }
+ // check first 4 bytes
+ // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
+ // four 1-byte comparisons.
+ if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
+ continue
+ }
+ // check last 4 bytes
+ if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
+ continue
+ }
+ if keymaybe != bucketCnt {
+ // Two keys are potential matches. Use hash to distinguish them.
+ goto dohash
+ }
+ keymaybe = i
+ }
+ if keymaybe != bucketCnt {
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
+ if memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0])
+ }
+dohash:
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0), uintptr(t.keysize))
+ m := uintptr(1)<<h.B - 1
+ b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ if x != top {
+ continue
+ }
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ if k.len != key.len {
+ continue
+ }
+ if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+ }
+ }
+ b = b.overflow(t)
+ if b == nil {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ }
+}
+
+func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+ racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
+ }
+ if h == nil || h.count == 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map read and map write")
+ }
+ key := stringStructOf(&ky)
+ if h.B == 0 {
+ // One-bucket table.
+ b := (*bmap)(h.buckets)
+ if key.len < 32 {
+ // short key, doing lots of comparisons is ok
+ for i := uintptr(0); i < bucketCnt; i++ {
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ if x == empty {
+ continue
+ }
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ if k.len != key.len {
+ continue
+ }
+ if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ // long key, try not to do more comparisons than necessary
+ keymaybe := uintptr(bucketCnt)
+ for i := uintptr(0); i < bucketCnt; i++ {
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ if x == empty {
+ continue
+ }
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ if k.len != key.len {
+ continue
+ }
+ if k.str == key.str {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
+ }
+ // check first 4 bytes
+ if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
+ continue
+ }
+ // check last 4 bytes
+ if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
+ continue
+ }
+ if keymaybe != bucketCnt {
+ // Two keys are potential matches. Use hash to distinguish them.
+ goto dohash
+ }
+ keymaybe = i
+ }
+ if keymaybe != bucketCnt {
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
+ if memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
+ }
+ }
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+dohash:
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0), uintptr(t.keysize))
+ m := uintptr(1)<<h.B - 1
+ b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+ if c := h.oldbuckets; c != nil {
+ oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+ if !evacuated(oldb) {
+ b = oldb
+ }
+ }
+ top := uint8(hash >> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ if x != top {
+ continue
+ }
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ if k.len != key.len {
+ continue
+ }
+ if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
+ }
+ }
+ b = b.overflow(t)
+ if b == nil {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ }
+}
diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go
index 95200a47481..77affdfda12 100644
--- a/libgo/go/runtime/map_test.go
+++ b/libgo/go/runtime/map_test.go
@@ -30,13 +30,11 @@ func TestNegativeZero(t *testing.T) {
t.Error("length wrong")
}
- /* gccgo fails this test; this is not required by the spec.
for k := range m {
if math.Copysign(1.0, k) > 0 {
t.Error("wrong sign")
}
}
- */
m = make(map[float64]bool, 0)
m[math.Copysign(0.0, -1.0)] = true
@@ -46,13 +44,11 @@ func TestNegativeZero(t *testing.T) {
t.Error("length wrong")
}
- /* gccgo fails this test; this is not required by the spec.
for k := range m {
if math.Copysign(1.0, k) < 0 {
t.Error("wrong sign")
}
}
- */
}
// nan is a good test because nan != nan, and nan has
@@ -93,7 +89,6 @@ func TestAlias(t *testing.T) {
}
func TestGrowWithNaN(t *testing.T) {
- t.Skip("fails with gccgo")
m := make(map[float64]int, 4)
nan := math.NaN()
m[nan] = 1
@@ -115,7 +110,6 @@ func TestGrowWithNaN(t *testing.T) {
s |= v
}
}
- t.Log("cnt:", cnt, "s:", s)
if cnt != 3 {
t.Error("NaN keys lost during grow")
}
@@ -130,7 +124,6 @@ type FloatInt struct {
}
func TestGrowWithNegativeZero(t *testing.T) {
- t.Skip("fails with gccgo")
negzero := math.Copysign(0.0, -1.0)
m := make(map[FloatInt]int, 4)
m[FloatInt{0.0, 0}] = 1
@@ -407,7 +400,7 @@ func TestMapNanGrowIterator(t *testing.T) {
nan := math.NaN()
const nBuckets = 16
// To fill nBuckets buckets takes LOAD * nBuckets keys.
- nKeys := int(nBuckets * /* *runtime.HashLoad */ 6.5)
+ nKeys := int(nBuckets * *runtime.HashLoad)
// Get map to full point with nan keys.
for i := 0; i < nKeys; i++ {
@@ -439,10 +432,6 @@ func TestMapNanGrowIterator(t *testing.T) {
}
func TestMapIterOrder(t *testing.T) {
- if runtime.Compiler == "gccgo" {
- t.Skip("skipping for gccgo")
- }
-
for _, n := range [...]int{3, 7, 9, 15} {
for i := 0; i < 1000; i++ {
// Make m be {0: true, 1: true, ..., n-1: true}.
@@ -478,9 +467,6 @@ func TestMapIterOrder(t *testing.T) {
func TestMapSparseIterOrder(t *testing.T) {
// Run several rounds to increase the probability
// of failure. One is not enough.
- if runtime.Compiler == "gccgo" {
- t.Skip("skipping for gccgo")
- }
NextRound:
for round := 0; round < 10; round++ {
m := make(map[int]bool)
@@ -514,9 +500,6 @@ NextRound:
}
func TestMapStringBytesLookup(t *testing.T) {
- if runtime.Compiler == "gccgo" {
- t.Skip("skipping for gccgo")
- }
// Use large string keys to avoid small-allocation coalescing,
// which can cause AllocsPerRun to report lower counts than it should.
m := map[string]int{
@@ -532,6 +515,8 @@ func TestMapStringBytesLookup(t *testing.T) {
t.Errorf(`m[string([]byte("2"))] = %d, want 2`, x)
}
+ t.Skip("does not work on gccgo without better escape analysis")
+
var x int
n := testing.AllocsPerRun(100, func() {
x += m[string(buf)]
diff --git a/libgo/go/runtime/msan0.go b/libgo/go/runtime/msan0.go
index 48ae3e4ffdc..117c5e5789c 100644
--- a/libgo/go/runtime/msan0.go
+++ b/libgo/go/runtime/msan0.go
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build ignore
// +build !msan
// Dummy MSan support API, used when not built with -msan.
diff --git a/libgo/go/runtime/race0.go b/libgo/go/runtime/race0.go
new file mode 100644
index 00000000000..f1d37062317
--- /dev/null
+++ b/libgo/go/runtime/race0.go
@@ -0,0 +1,40 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !race
+
+// Dummy race detection API, used when not built with -race.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+const raceenabled = false
+
+// Because raceenabled is false, none of these functions should be called.
+
+func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func raceinit() (uintptr, uintptr) { throw("race"); return 0, 0 }
+func racefini() { throw("race") }
+func raceproccreate() uintptr { throw("race"); return 0 }
+func raceprocdestroy(ctx uintptr) { throw("race") }
+func racemapshadow(addr unsafe.Pointer, size uintptr) { throw("race") }
+func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { throw("race") }
+func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { throw("race") }
+func raceacquire(addr unsafe.Pointer) { throw("race") }
+func raceacquireg(gp *g, addr unsafe.Pointer) { throw("race") }
+func racerelease(addr unsafe.Pointer) { throw("race") }
+func racereleaseg(gp *g, addr unsafe.Pointer) { throw("race") }
+func racereleasemerge(addr unsafe.Pointer) { throw("race") }
+func racereleasemergeg(gp *g, addr unsafe.Pointer) { throw("race") }
+func racefingo() { throw("race") }
+func racemalloc(p unsafe.Pointer, sz uintptr) { throw("race") }
+func racefree(p unsafe.Pointer, sz uintptr) { throw("race") }
+func racegostart(pc uintptr) uintptr { throw("race"); return 0 }
+func racegoend() { throw("race") }
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
new file mode 100644
index 00000000000..48abbfa889f
--- /dev/null
+++ b/libgo/go/runtime/stubs.go
@@ -0,0 +1,253 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// Should be a built-in for unsafe.Pointer?
+//go:nosplit
+func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
+}
+
+// getg returns the pointer to the current g.
+// The compiler rewrites calls to this function into instructions
+// that fetch the g directly (from TLS or from the dedicated register).
+func getg() *g
+
+// mcall switches from the g to the g0 stack and invokes fn(g),
+// where g is the goroutine that made the call.
+// mcall saves g's current PC/SP in g->sched so that it can be restored later.
+// It is up to fn to arrange for that later execution, typically by recording
+// g in a data structure, causing something to call ready(g) later.
+// mcall returns to the original goroutine g later, when g has been rescheduled.
+// fn must not return at all; typically it ends by calling schedule, to let the m
+// run other goroutines.
+//
+// mcall can only be called from g stacks (not g0, not gsignal).
+//
+// This must NOT be go:noescape: if fn is a stack-allocated closure,
+// fn puts g on a run queue, and g executes before fn returns, the
+// closure will be invalidated while it is still executing.
+func mcall(fn func(*g))
+
+// systemstack runs fn on a system stack.
+// If systemstack is called from the per-OS-thread (g0) stack, or
+// if systemstack is called from the signal handling (gsignal) stack,
+// systemstack calls fn directly and returns.
+// Otherwise, systemstack is being called from the limited stack
+// of an ordinary goroutine. In this case, systemstack switches
+// to the per-OS-thread stack, calls fn, and switches back.
+// It is common to use a func literal as the argument, in order
+// to share inputs and outputs with the code around the call
+// to system stack:
+//
+// ... set up y ...
+// systemstack(func() {
+// x = bigcall(y)
+// })
+// ... use x ...
+//
+//go:noescape
+func systemstack(fn func())
+
+func badsystemstack() {
+ throw("systemstack called from unexpected goroutine")
+}
+
+// memclr clears n bytes starting at ptr.
+// in memclr_*.s
+//go:noescape
+func memclr(ptr unsafe.Pointer, n uintptr)
+
+//go:linkname reflect_memclr reflect.memclr
+func reflect_memclr(ptr unsafe.Pointer, n uintptr) {
+ memclr(ptr, n)
+}
+
+// memmove copies n bytes from "from" to "to".
+// in memmove_*.s
+//go:noescape
+func memmove(to, from unsafe.Pointer, n uintptr)
+
+//go:linkname reflect_memmove reflect.memmove
+func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
+ memmove(to, from, n)
+}
+
+// exported value for testing
+var hashLoad = loadFactor
+
+// in asm_*.s
+func fastrand1() uint32
+
+// in asm_*.s
+//go:noescape
+func memequal(a, b unsafe.Pointer, size uintptr) bool
+
+// noescape hides a pointer from escape analysis. noescape is
+// the identity function but escape analysis doesn't think the
+// output depends on the input. noescape is inlined and currently
+// compiles down to a single xor instruction.
+// USE CAREFULLY!
+//go:nosplit
+func noescape(p unsafe.Pointer) unsafe.Pointer {
+ x := uintptr(p)
+ return unsafe.Pointer(x ^ 0)
+}
+
+func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
+
+//go:noescape
+func jmpdefer(fv *funcval, argp uintptr)
+func exit1(code int32)
+func asminit()
+func setg(gg *g)
+func breakpoint()
+
+// reflectcall calls fn with a copy of the n argument bytes pointed at by arg.
+// After fn returns, reflectcall copies n-retoffset result bytes
+// back into arg+retoffset before returning. If copying result bytes back,
+// the caller should pass the argument frame type as argtype, so that
+// call can execute appropriate write barriers during the copy.
+// Package reflect passes a frame type. In package runtime, there is only
+// one call that copies results back, in cgocallbackg1, and it does NOT pass a
+// frame type, meaning there are no write barriers invoked. See that call
+// site for justification.
+func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
+
+func procyield(cycles uint32)
+
+type neverCallThisFunction struct{}
+
+// goexit is the return stub at the top of every goroutine call stack.
+// Each goroutine stack is constructed as if goexit called the
+// goroutine's entry point function, so that when the entry point
+// function returns, it will return to goexit, which will call goexit1
+// to perform the actual exit.
+//
+// This function must never be called directly. Call goexit1 instead.
+// gentraceback assumes that goexit terminates the stack. A direct
+// call on the stack will cause gentraceback to stop walking the stack
+// prematurely and if there are leftover stack barriers it may panic.
+func goexit(neverCallThisFunction)
+
+// publicationBarrier performs a store/store barrier (a "publication"
+// or "export" barrier). Some form of synchronization is required
+// between initializing an object and making that object accessible to
+// another processor. Without synchronization, the initialization
+// writes and the "publication" write may be reordered, allowing the
+// other processor to follow the pointer and observe an uninitialized
+// object. In general, higher-level synchronization should be used,
+// such as locking or an atomic pointer write. publicationBarrier is
+// for when those aren't an option, such as in the implementation of
+// the memory manager.
+//
+// There's no corresponding barrier for the read side because the read
+// side naturally has a data dependency order. All architectures that
+// Go supports or seems likely to ever support automatically enforce
+// data dependency ordering.
+func publicationBarrier()
+
+//go:noescape
+func setcallerpc(argp unsafe.Pointer, pc uintptr)
+
+// getcallerpc returns the program counter (PC) of its caller's caller.
+// getcallersp returns the stack pointer (SP) of its caller's caller.
+// For both, the argp must be a pointer to the caller's first function argument.
+// The implementation may or may not use argp, depending on
+// the architecture.
+//
+// For example:
+//
+// func f(arg1, arg2, arg3 int) {
+// pc := getcallerpc(unsafe.Pointer(&arg1))
+// sp := getcallersp(unsafe.Pointer(&arg1))
+// }
+//
+// These two lines find the PC and SP immediately following
+// the call to f (where f will return).
+//
+// The call to getcallerpc and getcallersp must be done in the
+// frame being asked about. It would not be correct for f to pass &arg1
+// to another function g and let g call getcallerpc/getcallersp.
+// The call inside g might return information about g's caller or
+// information about f's caller or complete garbage.
+//
+// The result of getcallersp is correct at the time of the return,
+// but it may be invalidated by any subsequent call to a function
+// that might relocate the stack in order to grow or shrink it.
+// A general rule is that the result of getcallersp should be used
+// immediately and can only be passed to nosplit functions.
+
+//go:noescape
+func getcallerpc(argp unsafe.Pointer) uintptr
+
+//go:noescape
+func getcallersp(argp unsafe.Pointer) uintptr
+
+// argp used in Defer structs when there is no argp.
+const _NoArgs = ^uintptr(0)
+
+// //go:linkname time_now time.now
+// func time_now() (sec int64, nsec int32)
+
+/*
+func unixnanotime() int64 {
+ sec, nsec := time_now()
+ return sec*1e9 + int64(nsec)
+}
+*/
+
+// round n up to a multiple of a. a must be a power of 2.
+func round(n, a uintptr) uintptr {
+ return (n + a - 1) &^ (a - 1)
+}
+
+/*
+// checkASM returns whether assembly runtime checks have passed.
+func checkASM() bool
+*/
+
+// throw crashes the program.
+// For gccgo unless and until we port panic.go.
+func throw(string)
+
+// newobject allocates a new object.
+// For gccgo unless and until we port malloc.go.
+func newobject(*_type) unsafe.Pointer
+
+// newarray allocates a new array of objects.
+// For gccgo unless and until we port malloc.go.
+func newarray(*_type, int) unsafe.Pointer
+
+// funcPC returns the entry PC of the function f.
+// It assumes that f is a func value. Otherwise the behavior is undefined.
+// For gccgo here unless and until we port proc.go.
+//go:nosplit
+func funcPC(f interface{}) uintptr {
+ return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
+}
+
+// typedmemmove copies a typed value.
+// For gccgo for now.
+//go:nosplit
+func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
+ memmove(dst, src, typ.size)
+}
+
+// Here for gccgo unless and until we port string.go.
+type stringStruct struct {
+ str unsafe.Pointer
+ len int
+}
+
+// Here for gccgo unless and until we port string.go.
+func stringStructOf(sp *string) *stringStruct {
+ return (*stringStruct)(unsafe.Pointer(sp))
+}
diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go
index fb5f034dd68..d9b0b5590d2 100644
--- a/libgo/go/runtime/type.go
+++ b/libgo/go/runtime/type.go
@@ -16,12 +16,12 @@ type _type struct {
size uintptr
hash uint32
- hashfn func(unsafe.Pointer, uintptr) uintptr
+ hashfn func(unsafe.Pointer, uintptr, uintptr) uintptr
equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool
gc unsafe.Pointer
string *string
- *uncommonType
+ *uncommontype
ptrToThis *_type
}
@@ -33,7 +33,7 @@ type method struct {
tfn unsafe.Pointer
}
-type uncommonType struct {
+type uncommontype struct {
name *string
pkgPath *string
methods []method
@@ -45,25 +45,34 @@ type imethod struct {
typ *_type
}
-type interfaceType struct {
+type interfacetype struct {
typ _type
methods []imethod
}
-type mapType struct {
- typ _type
- key *_type
- elem *_type
+type maptype struct {
+ typ _type
+ key *_type
+ elem *_type
+ bucket *_type // internal type representing a hash bucket
+ hmap *_type // internal type representing a hmap
+ keysize uint8 // size of key slot
+ indirectkey bool // store ptr to key instead of key itself
+ valuesize uint8 // size of value slot
+ indirectvalue bool // store ptr to value instead of value itself
+ bucketsize uint16 // size of bucket
+ reflexivekey bool // true if k==k for all keys
+ needkeyupdate bool // true if we need to update key on an overwrite
}
-type arrayType struct {
+type arraytype struct {
typ _type
elem *_type
slice *_type
len uintptr
}
-type chanType struct {
+type chantype struct {
typ _type
elem *_type
dir uintptr
diff --git a/libgo/go/runtime/typekind.go b/libgo/go/runtime/typekind.go
new file mode 100644
index 00000000000..abb27777fe9
--- /dev/null
+++ b/libgo/go/runtime/typekind.go
@@ -0,0 +1,44 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ kindBool = 1 + iota
+ kindInt
+ kindInt8
+ kindInt16
+ kindInt32
+ kindInt64
+ kindUint
+ kindUint8
+ kindUint16
+ kindUint32
+ kindUint64
+ kindUintptr
+ kindFloat32
+ kindFloat64
+ kindComplex64
+ kindComplex128
+ kindArray
+ kindChan
+ kindFunc
+ kindInterface
+ kindMap
+ kindPtr
+ kindSlice
+ kindString
+ kindStruct
+ kindUnsafePointer
+
+ kindDirectIface = 1 << 5
+ kindGCProg = 1 << 6
+ kindNoPointers = 1 << 7
+ kindMask = (1 << 5) - 1
+)
+
+// isDirectIface reports whether t is stored directly in an interface value.
+func isDirectIface(t *_type) bool {
+ return t.kind&kindDirectIface != 0
+}
diff --git a/libgo/runtime/chan.goc b/libgo/runtime/chan.goc
index 44402d4481e..6e4c8fd8920 100644
--- a/libgo/runtime/chan.goc
+++ b/libgo/runtime/chan.goc
@@ -1064,12 +1064,6 @@ func reflect.chanlen(c *Hchan) (len int) {
len = c->qcount;
}
-intgo
-__go_chan_len(Hchan *c)
-{
- return reflect_chanlen(c);
-}
-
func reflect.chancap(c *Hchan) (cap int) {
if(c == nil)
cap = 0;
diff --git a/libgo/runtime/go-construct-map.c b/libgo/runtime/go-construct-map.c
index 4bd79d20058..c1a8bb72efa 100644
--- a/libgo/runtime/go-construct-map.c
+++ b/libgo/runtime/go-construct-map.c
@@ -9,25 +9,30 @@
#include <stdlib.h>
#include "runtime.h"
-#include "map.h"
-struct __go_map *
-__go_construct_map (const struct __go_map_descriptor *descriptor,
+extern void *makemap (const struct __go_map_type *, int64_t hint,
+ void *, void *)
+ __asm__ (GOSYM_PREFIX "runtime.makemap");
+
+extern void mapassign1 (const struct __go_map_type *, void *hmap,
+ const void *key, const void *val)
+ __asm__ (GOSYM_PREFIX "runtime.mapassign1");
+
+void *
+__go_construct_map (const struct __go_map_type *type,
uintptr_t count, uintptr_t entry_size,
- uintptr_t val_offset, uintptr_t val_size,
- const void *ventries)
+ uintptr_t val_offset, const void *ventries)
{
- struct __go_map *ret;
+ void *ret;
const unsigned char *entries;
uintptr_t i;
- ret = __go_new_map (descriptor, count);
+ ret = makemap(type, (int64_t) count, NULL, NULL);
entries = (const unsigned char *) ventries;
for (i = 0; i < count; ++i)
{
- void *val = __go_map_index (ret, entries, 1);
- __builtin_memcpy (val, entries + val_offset, val_size);
+ mapassign1 (type, ret, entries, entries + val_offset);
entries += entry_size;
}
diff --git a/libgo/runtime/go-eface-compare.c b/libgo/runtime/go-eface-compare.c
index 40b716eb4af..62302b5ebe2 100644
--- a/libgo/runtime/go-eface-compare.c
+++ b/libgo/runtime/go-eface-compare.c
@@ -26,6 +26,8 @@ __go_empty_interface_compare (struct __go_empty_interface left,
if (!__go_type_descriptors_equal (left_descriptor,
right.__type_descriptor))
return 1;
+ if (left_descriptor->__equalfn == NULL)
+ runtime_panicstring ("comparing uncomparable types");
if (__go_is_pointer_type (left_descriptor))
return left.__object == right.__object ? 0 : 1;
if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object,
diff --git a/libgo/runtime/go-eface-val-compare.c b/libgo/runtime/go-eface-val-compare.c
index e810750d5db..839d1891623 100644
--- a/libgo/runtime/go-eface-val-compare.c
+++ b/libgo/runtime/go-eface-val-compare.c
@@ -24,6 +24,8 @@ __go_empty_interface_value_compare (
return 1;
if (!__go_type_descriptors_equal (left_descriptor, right_descriptor))
return 1;
+ if (left_descriptor->__equalfn == NULL)
+ runtime_panicstring ("comparing uncomparable types");
if (__go_is_pointer_type (left_descriptor))
return left.__object == val ? 0 : 1;
if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object, val,
diff --git a/libgo/runtime/go-fieldtrack.c b/libgo/runtime/go-fieldtrack.c
index a7e2c133440..2b3ac325c10 100644
--- a/libgo/runtime/go-fieldtrack.c
+++ b/libgo/runtime/go-fieldtrack.c
@@ -6,7 +6,6 @@
#include "runtime.h"
#include "go-type.h"
-#include "map.h"
/* The compiler will track fields that have the tag go:"track". Any
function that refers to such a field will call this function with a
@@ -34,16 +33,26 @@ extern const char _edata[] __attribute__ ((weak));
extern const char __edata[] __attribute__ ((weak));
extern const char __bss_start[] __attribute__ ((weak));
-void runtime_Fieldtrack (struct __go_map *) __asm__ (GOSYM_PREFIX "runtime.Fieldtrack");
+extern void mapassign1 (const struct __go_map_type *, void *hmap,
+ const void *key, const void *val)
+ __asm__ (GOSYM_PREFIX "runtime.mapassign1");
+
+// The type descriptor for map[string] bool. */
+extern const char __go_td_MN6_string__N4_bool[] __attribute__ ((weak));
+
+void runtime_Fieldtrack (void *) __asm__ (GOSYM_PREFIX "runtime.Fieldtrack");
void
-runtime_Fieldtrack (struct __go_map *m)
+runtime_Fieldtrack (void *m)
{
const char *p;
const char *pend;
const char *prefix;
size_t prefix_len;
+ if (__go_td_MN6_string__N4_bool == NULL)
+ return;
+
p = __data_start;
if (p == NULL)
p = __etext;
@@ -86,14 +95,12 @@ runtime_Fieldtrack (struct __go_map *m)
if (__builtin_memchr (q1, '\0', q2 - q1) == NULL)
{
String s;
- void *v;
- _Bool *pb;
+ _Bool b;
s.str = (const byte *) q1;
s.len = q2 - q1;
- v = __go_map_index (m, &s, 1);
- pb = (_Bool *) v;
- *pb = 1;
+ b = 1;
+ mapassign1((const void*) __go_td_MN6_string__N4_bool, m, &s, &b);
}
p = q2;
diff --git a/libgo/runtime/go-interface-compare.c b/libgo/runtime/go-interface-compare.c
index 1d367753a1e..14999df1dd1 100644
--- a/libgo/runtime/go-interface-compare.c
+++ b/libgo/runtime/go-interface-compare.c
@@ -26,6 +26,8 @@ __go_interface_compare (struct __go_interface left,
left_descriptor = left.__methods[0];
if (!__go_type_descriptors_equal (left_descriptor, right.__methods[0]))
return 1;
+ if (left_descriptor->__equalfn == NULL)
+ runtime_panicstring ("comparing uncomparable types");
if (__go_is_pointer_type (left_descriptor))
return left.__object == right.__object ? 0 : 1;
if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object,
diff --git a/libgo/runtime/go-interface-eface-compare.c b/libgo/runtime/go-interface-eface-compare.c
index d1e6fd084d2..4c47b7cf04d 100644
--- a/libgo/runtime/go-interface-eface-compare.c
+++ b/libgo/runtime/go-interface-eface-compare.c
@@ -25,6 +25,8 @@ __go_interface_empty_compare (struct __go_interface left,
left_descriptor = left.__methods[0];
if (!__go_type_descriptors_equal (left_descriptor, right.__type_descriptor))
return 1;
+ if (left_descriptor->__equalfn == NULL)
+ runtime_panicstring ("comparing uncomparable types");
if (__go_is_pointer_type (left_descriptor))
return left.__object == right.__object ? 0 : 1;
if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object,
diff --git a/libgo/runtime/go-interface-val-compare.c b/libgo/runtime/go-interface-val-compare.c
index 36b6efdc9f1..5dc91d0330f 100644
--- a/libgo/runtime/go-interface-val-compare.c
+++ b/libgo/runtime/go-interface-val-compare.c
@@ -24,6 +24,8 @@ __go_interface_value_compare (
left_descriptor = left.__methods[0];
if (!__go_type_descriptors_equal (left_descriptor, right_descriptor))
return 1;
+ if (left_descriptor->__equalfn == NULL)
+ runtime_panicstring ("comparing uncomparable types");
if (__go_is_pointer_type (left_descriptor))
return left.__object == val ? 0 : 1;
if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object, val,
diff --git a/libgo/runtime/go-map-delete.c b/libgo/runtime/go-map-delete.c
deleted file mode 100644
index fb7c331856e..00000000000
--- a/libgo/runtime/go-map-delete.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/* go-map-delete.c -- delete an entry from a map.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-#include <stdlib.h>
-
-#include "runtime.h"
-#include "malloc.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Delete the entry matching KEY from MAP. */
-
-void
-__go_map_delete (struct __go_map *map, const void *key)
-{
- const struct __go_map_descriptor *descriptor;
- const struct __go_type_descriptor *key_descriptor;
- uintptr_t key_offset;
- const FuncVal *equalfn;
- size_t key_hash;
- size_t key_size;
- size_t bucket_index;
- void **pentry;
-
- if (map == NULL)
- return;
-
- descriptor = map->__descriptor;
-
- key_descriptor = descriptor->__map_descriptor->__key_type;
- key_offset = descriptor->__key_offset;
- key_size = key_descriptor->__size;
- if (key_size == 0)
- return;
-
- __go_assert (key_size != -1UL);
- equalfn = key_descriptor->__equalfn;
-
- key_hash = __go_call_hashfn (key_descriptor->__hashfn, key, key_size);
- bucket_index = key_hash % map->__bucket_count;
-
- pentry = map->__buckets + bucket_index;
- while (*pentry != NULL)
- {
- char *entry = (char *) *pentry;
- if (__go_call_equalfn (equalfn, key, entry + key_offset, key_size))
- {
- *pentry = *(void **) entry;
- if (descriptor->__entry_size >= TinySize)
- __go_free (entry);
- map->__element_count -= 1;
- break;
- }
- pentry = (void **) entry;
- }
-}
diff --git a/libgo/runtime/go-map-index.c b/libgo/runtime/go-map-index.c
deleted file mode 100644
index 353041db6c4..00000000000
--- a/libgo/runtime/go-map-index.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/* go-map-index.c -- find or insert an entry in a map.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-#include <stdlib.h>
-
-#include "runtime.h"
-#include "malloc.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Rehash MAP to a larger size. */
-
-static void
-__go_map_rehash (struct __go_map *map)
-{
- const struct __go_map_descriptor *descriptor;
- const struct __go_type_descriptor *key_descriptor;
- uintptr_t key_offset;
- size_t key_size;
- const FuncVal *hashfn;
- uintptr_t old_bucket_count;
- void **old_buckets;
- uintptr_t new_bucket_count;
- void **new_buckets;
- uintptr_t i;
-
- descriptor = map->__descriptor;
-
- key_descriptor = descriptor->__map_descriptor->__key_type;
- key_offset = descriptor->__key_offset;
- key_size = key_descriptor->__size;
- hashfn = key_descriptor->__hashfn;
-
- old_bucket_count = map->__bucket_count;
- old_buckets = map->__buckets;
-
- new_bucket_count = __go_map_next_prime (old_bucket_count * 2);
- new_buckets = (void **) __go_alloc (new_bucket_count * sizeof (void *));
- __builtin_memset (new_buckets, 0, new_bucket_count * sizeof (void *));
-
- for (i = 0; i < old_bucket_count; ++i)
- {
- char* entry;
- char* next;
-
- for (entry = old_buckets[i]; entry != NULL; entry = next)
- {
- size_t key_hash;
- size_t new_bucket_index;
-
- /* We could speed up rehashing at the cost of memory space
- by caching the hash code. */
- key_hash = __go_call_hashfn (hashfn, entry + key_offset, key_size);
- new_bucket_index = key_hash % new_bucket_count;
-
- next = *(char **) entry;
- *(char **) entry = new_buckets[new_bucket_index];
- new_buckets[new_bucket_index] = entry;
- }
- }
-
- if (old_bucket_count * sizeof (void *) >= TinySize)
- __go_free (old_buckets);
-
- map->__bucket_count = new_bucket_count;
- map->__buckets = new_buckets;
-}
-
-/* Find KEY in MAP, return a pointer to the value. If KEY is not
- present, then if INSERT is false, return NULL, and if INSERT is
- true, insert a new value and zero-initialize it before returning a
- pointer to it. */
-
-void *
-__go_map_index (struct __go_map *map, const void *key, _Bool insert)
-{
- const struct __go_map_descriptor *descriptor;
- const struct __go_type_descriptor *key_descriptor;
- uintptr_t key_offset;
- const FuncVal *equalfn;
- size_t key_hash;
- size_t key_size;
- size_t bucket_index;
- char *entry;
-
- if (map == NULL)
- {
- if (insert)
- runtime_panicstring ("assignment to entry in nil map");
- return NULL;
- }
-
- descriptor = map->__descriptor;
-
- key_descriptor = descriptor->__map_descriptor->__key_type;
- key_offset = descriptor->__key_offset;
- key_size = key_descriptor->__size;
- __go_assert (key_size != -1UL);
- equalfn = key_descriptor->__equalfn;
-
- key_hash = __go_call_hashfn (key_descriptor->__hashfn, key, key_size);
- bucket_index = key_hash % map->__bucket_count;
-
- entry = (char *) map->__buckets[bucket_index];
- while (entry != NULL)
- {
- if (__go_call_equalfn (equalfn, key, entry + key_offset, key_size))
- return entry + descriptor->__val_offset;
- entry = *(char **) entry;
- }
-
- if (!insert)
- return NULL;
-
- if (map->__element_count >= map->__bucket_count)
- {
- __go_map_rehash (map);
- bucket_index = key_hash % map->__bucket_count;
- }
-
- entry = (char *) __go_alloc (descriptor->__entry_size);
- __builtin_memset (entry, 0, descriptor->__entry_size);
-
- __builtin_memcpy (entry + key_offset, key, key_size);
-
- *(char **) entry = map->__buckets[bucket_index];
- map->__buckets[bucket_index] = entry;
-
- map->__element_count += 1;
-
- return entry + descriptor->__val_offset;
-}
diff --git a/libgo/runtime/go-map-len.c b/libgo/runtime/go-map-len.c
deleted file mode 100644
index 7da10c24943..00000000000
--- a/libgo/runtime/go-map-len.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/* go-map-len.c -- return the length of a map.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-
-#include "runtime.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Return the length of a map. This could be done inline, of course,
- but I'm doing it as a function for now to make it easy to change
- the map structure. */
-
-intgo
-__go_map_len (struct __go_map *map)
-{
- if (map == NULL)
- return 0;
- __go_assert (map->__element_count
- == (uintptr_t) (intgo) map->__element_count);
- return map->__element_count;
-}
diff --git a/libgo/runtime/go-map-range.c b/libgo/runtime/go-map-range.c
deleted file mode 100644
index 5dbb92ccb89..00000000000
--- a/libgo/runtime/go-map-range.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/* go-map-range.c -- implement a range clause over a map.
-
- Copyright 2009, 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Initialize a range over a map. */
-
-void
-__go_mapiterinit (const struct __go_map *h, struct __go_hash_iter *it)
-{
- it->entry = NULL;
- if (h != NULL)
- {
- it->map = h;
- it->next_entry = NULL;
- it->bucket = 0;
- --it->bucket;
- __go_mapiternext(it);
- }
-}
-
-/* Move to the next iteration, updating *HITER. */
-
-void
-__go_mapiternext (struct __go_hash_iter *it)
-{
- const void *entry;
-
- entry = it->next_entry;
- if (entry == NULL)
- {
- const struct __go_map *map;
- uintptr_t bucket;
-
- map = it->map;
- bucket = it->bucket;
- while (1)
- {
- ++bucket;
- if (bucket >= map->__bucket_count)
- {
- /* Map iteration is complete. */
- it->entry = NULL;
- return;
- }
- entry = map->__buckets[bucket];
- if (entry != NULL)
- break;
- }
- it->bucket = bucket;
- }
- it->entry = entry;
- it->next_entry = *(const void * const *) entry;
-}
-
-/* Get the key of the current iteration. */
-
-void
-__go_mapiter1 (struct __go_hash_iter *it, unsigned char *key)
-{
- const struct __go_map *map;
- const struct __go_map_descriptor *descriptor;
- const struct __go_type_descriptor *key_descriptor;
- const char *p;
-
- map = it->map;
- descriptor = map->__descriptor;
- key_descriptor = descriptor->__map_descriptor->__key_type;
- p = it->entry;
- __go_assert (p != NULL);
- __builtin_memcpy (key, p + descriptor->__key_offset, key_descriptor->__size);
-}
-
-/* Get the key and value of the current iteration. */
-
-void
-__go_mapiter2 (struct __go_hash_iter *it, unsigned char *key,
- unsigned char *val)
-{
- const struct __go_map *map;
- const struct __go_map_descriptor *descriptor;
- const struct __go_map_type *map_descriptor;
- const struct __go_type_descriptor *key_descriptor;
- const struct __go_type_descriptor *val_descriptor;
- const char *p;
-
- map = it->map;
- descriptor = map->__descriptor;
- map_descriptor = descriptor->__map_descriptor;
- key_descriptor = map_descriptor->__key_type;
- val_descriptor = map_descriptor->__val_type;
- p = it->entry;
- __go_assert (p != NULL);
- __builtin_memcpy (key, p + descriptor->__key_offset,
- key_descriptor->__size);
- __builtin_memcpy (val, p + descriptor->__val_offset,
- val_descriptor->__size);
-}
diff --git a/libgo/runtime/go-memclr.c b/libgo/runtime/go-memclr.c
new file mode 100644
index 00000000000..de6f39a6b1e
--- /dev/null
+++ b/libgo/runtime/go-memclr.c
@@ -0,0 +1,16 @@
+/* go-memclr.c -- clear a memory buffer
+
+ Copyright 2016 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include "runtime.h"
+
+void memclr(void *, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.memclr");
+
+void
+memclr (void *p1, uintptr len)
+{
+ __builtin_memset (p1, 0, len);
+}
diff --git a/libgo/runtime/go-memequal.c b/libgo/runtime/go-memequal.c
new file mode 100644
index 00000000000..5f514aaae09
--- /dev/null
+++ b/libgo/runtime/go-memequal.c
@@ -0,0 +1,16 @@
+/* go-memequal.c -- compare memory buffers for equality
+
+ Copyright 2016 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include "runtime.h"
+
+_Bool memequal (void *, void *, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.memequal");
+
+_Bool
+memequal (void *p1, void *p2, uintptr len)
+{
+ return __builtin_memcmp (p1, p2, len) == 0;
+}
diff --git a/libgo/runtime/go-memmove.c b/libgo/runtime/go-memmove.c
new file mode 100644
index 00000000000..a6fda08c47d
--- /dev/null
+++ b/libgo/runtime/go-memmove.c
@@ -0,0 +1,16 @@
+/* go-memmove.c -- move one memory buffer to another
+
+ Copyright 2016 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include "runtime.h"
+
+void move(void *, void *, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.memmove");
+
+void
+move (void *p1, void *p2, uintptr len)
+{
+ __builtin_memmove (p1, p2, len);
+}
diff --git a/libgo/runtime/go-new-map.c b/libgo/runtime/go-new-map.c
deleted file mode 100644
index c289bc0bea0..00000000000
--- a/libgo/runtime/go-new-map.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/* go-new-map.c -- allocate a new map.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "map.h"
-
-/* List of prime numbers, copied from libstdc++/src/hashtable.c. */
-
-static const unsigned long prime_list[] = /* 256 + 1 or 256 + 48 + 1 */
-{
- 2ul, 3ul, 5ul, 7ul, 11ul, 13ul, 17ul, 19ul, 23ul, 29ul, 31ul,
- 37ul, 41ul, 43ul, 47ul, 53ul, 59ul, 61ul, 67ul, 71ul, 73ul, 79ul,
- 83ul, 89ul, 97ul, 103ul, 109ul, 113ul, 127ul, 137ul, 139ul, 149ul,
- 157ul, 167ul, 179ul, 193ul, 199ul, 211ul, 227ul, 241ul, 257ul,
- 277ul, 293ul, 313ul, 337ul, 359ul, 383ul, 409ul, 439ul, 467ul,
- 503ul, 541ul, 577ul, 619ul, 661ul, 709ul, 761ul, 823ul, 887ul,
- 953ul, 1031ul, 1109ul, 1193ul, 1289ul, 1381ul, 1493ul, 1613ul,
- 1741ul, 1879ul, 2029ul, 2179ul, 2357ul, 2549ul, 2753ul, 2971ul,
- 3209ul, 3469ul, 3739ul, 4027ul, 4349ul, 4703ul, 5087ul, 5503ul,
- 5953ul, 6427ul, 6949ul, 7517ul, 8123ul, 8783ul, 9497ul, 10273ul,
- 11113ul, 12011ul, 12983ul, 14033ul, 15173ul, 16411ul, 17749ul,
- 19183ul, 20753ul, 22447ul, 24281ul, 26267ul, 28411ul, 30727ul,
- 33223ul, 35933ul, 38873ul, 42043ul, 45481ul, 49201ul, 53201ul,
- 57557ul, 62233ul, 67307ul, 72817ul, 78779ul, 85229ul, 92203ul,
- 99733ul, 107897ul, 116731ul, 126271ul, 136607ul, 147793ul,
- 159871ul, 172933ul, 187091ul, 202409ul, 218971ul, 236897ul,
- 256279ul, 277261ul, 299951ul, 324503ul, 351061ul, 379787ul,
- 410857ul, 444487ul, 480881ul, 520241ul, 562841ul, 608903ul,
- 658753ul, 712697ul, 771049ul, 834181ul, 902483ul, 976369ul,
- 1056323ul, 1142821ul, 1236397ul, 1337629ul, 1447153ul, 1565659ul,
- 1693859ul, 1832561ul, 1982627ul, 2144977ul, 2320627ul, 2510653ul,
- 2716249ul, 2938679ul, 3179303ul, 3439651ul, 3721303ul, 4026031ul,
- 4355707ul, 4712381ul, 5098259ul, 5515729ul, 5967347ul, 6456007ul,
- 6984629ul, 7556579ul, 8175383ul, 8844859ul, 9569143ul, 10352717ul,
- 11200489ul, 12117689ul, 13109983ul, 14183539ul, 15345007ul,
- 16601593ul, 17961079ul, 19431899ul, 21023161ul, 22744717ul,
- 24607243ul, 26622317ul, 28802401ul, 31160981ul, 33712729ul,
- 36473443ul, 39460231ul, 42691603ul, 46187573ul, 49969847ul,
- 54061849ul, 58488943ul, 63278561ul, 68460391ul, 74066549ul,
- 80131819ul, 86693767ul, 93793069ul, 101473717ul, 109783337ul,
- 118773397ul, 128499677ul, 139022417ul, 150406843ul, 162723577ul,
- 176048909ul, 190465427ul, 206062531ul, 222936881ul, 241193053ul,
- 260944219ul, 282312799ul, 305431229ul, 330442829ul, 357502601ul,
- 386778277ul, 418451333ul, 452718089ul, 489790921ul, 529899637ul,
- 573292817ul, 620239453ul, 671030513ul, 725980837ul, 785430967ul,
- 849749479ul, 919334987ul, 994618837ul, 1076067617ul, 1164186217ul,
- 1259520799ul, 1362662261ul, 1474249943ul, 1594975441ul, 1725587117ul,
- 1866894511ul, 2019773507ul, 2185171673ul, 2364114217ul, 2557710269ul,
- 2767159799ul, 2993761039ul, 3238918481ul, 3504151727ul, 3791104843ul,
- 4101556399ul, 4294967291ul,
-#if __SIZEOF_LONG__ >= 8
- 6442450933ul, 8589934583ul, 12884901857ul, 17179869143ul,
- 25769803693ul, 34359738337ul, 51539607367ul, 68719476731ul,
- 103079215087ul, 137438953447ul, 206158430123ul, 274877906899ul,
- 412316860387ul, 549755813881ul, 824633720731ul, 1099511627689ul,
- 1649267441579ul, 2199023255531ul, 3298534883309ul, 4398046511093ul,
- 6597069766607ul, 8796093022151ul, 13194139533241ul, 17592186044399ul,
- 26388279066581ul, 35184372088777ul, 52776558133177ul, 70368744177643ul,
- 105553116266399ul, 140737488355213ul, 211106232532861ul, 281474976710597ul,
- 562949953421231ul, 1125899906842597ul, 2251799813685119ul,
- 4503599627370449ul, 9007199254740881ul, 18014398509481951ul,
- 36028797018963913ul, 72057594037927931ul, 144115188075855859ul,
- 288230376151711717ul, 576460752303423433ul,
- 1152921504606846883ul, 2305843009213693951ul,
- 4611686018427387847ul, 9223372036854775783ul,
- 18446744073709551557ul
-#endif
-};
-
-/* Return the next number from PRIME_LIST >= N. */
-
-uintptr_t
-__go_map_next_prime (uintptr_t n)
-{
- size_t low;
- size_t high;
-
- low = 0;
- high = sizeof prime_list / sizeof prime_list[0];
- while (low < high)
- {
- size_t mid;
-
- mid = (low + high) / 2;
-
- /* Here LOW <= MID < HIGH. */
-
- if (prime_list[mid] < n)
- low = mid + 1;
- else if (prime_list[mid] > n)
- high = mid;
- else
- return n;
- }
- if (low >= sizeof prime_list / sizeof prime_list[0])
- return n;
- return prime_list[low];
-}
-
-/* Allocate a new map. */
-
-struct __go_map *
-__go_new_map (const struct __go_map_descriptor *descriptor, uintptr_t entries)
-{
- int32 ientries;
- struct __go_map *ret;
-
- /* The master library limits map entries to int32, so we do too. */
- ientries = (int32) entries;
- if (ientries < 0 || (uintptr_t) ientries != entries)
- runtime_panicstring ("map size out of range");
-
- if (entries == 0)
- entries = 5;
- else
- entries = __go_map_next_prime (entries);
- ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map));
- ret->__descriptor = descriptor;
- ret->__element_count = 0;
- ret->__bucket_count = entries;
- ret->__buckets = (void **) __go_alloc (entries * sizeof (void *));
- __builtin_memset (ret->__buckets, 0, entries * sizeof (void *));
- return ret;
-}
-
-/* Allocate a new map when the argument to make is a large type. */
-
-struct __go_map *
-__go_new_map_big (const struct __go_map_descriptor *descriptor,
- uint64_t entries)
-{
- uintptr_t sentries;
-
- sentries = (uintptr_t) entries;
- if ((uint64_t) sentries != entries)
- runtime_panicstring ("map size out of range");
- return __go_new_map (descriptor, sentries);
-}
diff --git a/libgo/runtime/go-reflect-map.c b/libgo/runtime/go-reflect-map.c
deleted file mode 100644
index 36f31025d30..00000000000
--- a/libgo/runtime/go-reflect-map.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/* go-reflect-map.c -- map reflection support for Go.
-
- Copyright 2009, 2010 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stdlib.h>
-#include <stdint.h>
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "go-type.h"
-#include "map.h"
-
-/* This file implements support for reflection on maps. These
- functions are called from reflect/value.go. */
-
-extern void *mapaccess (struct __go_map_type *, void *, void *)
- __asm__ (GOSYM_PREFIX "reflect.mapaccess");
-
-void *
-mapaccess (struct __go_map_type *mt, void *m, void *key)
-{
- struct __go_map *map = (struct __go_map *) m;
-
- __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
- if (map == NULL)
- return NULL;
- else
- return __go_map_index (map, key, 0);
-}
-
-extern void mapassign (struct __go_map_type *, void *, void *, void *)
- __asm__ (GOSYM_PREFIX "reflect.mapassign");
-
-void
-mapassign (struct __go_map_type *mt, void *m, void *key, void *val)
-{
- struct __go_map *map = (struct __go_map *) m;
- void *p;
-
- __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
- if (map == NULL)
- runtime_panicstring ("assignment to entry in nil map");
- p = __go_map_index (map, key, 1);
- __builtin_memcpy (p, val, mt->__val_type->__size);
-}
-
-extern void mapdelete (struct __go_map_type *, void *, void *)
- __asm__ (GOSYM_PREFIX "reflect.mapdelete");
-
-void
-mapdelete (struct __go_map_type *mt, void *m, void *key)
-{
- struct __go_map *map = (struct __go_map *) m;
-
- __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
- if (map == NULL)
- return;
- __go_map_delete (map, key);
-}
-
-extern int32_t maplen (void *) __asm__ (GOSYM_PREFIX "reflect.maplen");
-
-int32_t
-maplen (void *m)
-{
- struct __go_map *map = (struct __go_map *) m;
-
- if (map == NULL)
- return 0;
- return (int32_t) map->__element_count;
-}
-
-extern unsigned char *mapiterinit (struct __go_map_type *, void *)
- __asm__ (GOSYM_PREFIX "reflect.mapiterinit");
-
-unsigned char *
-mapiterinit (struct __go_map_type *mt, void *m)
-{
- struct __go_hash_iter *it;
-
- __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
- it = __go_alloc (sizeof (struct __go_hash_iter));
- __go_mapiterinit ((struct __go_map *) m, it);
- return (unsigned char *) it;
-}
-
-extern void mapiternext (void *) __asm__ (GOSYM_PREFIX "reflect.mapiternext");
-
-void
-mapiternext (void *it)
-{
- __go_mapiternext ((struct __go_hash_iter *) it);
-}
-
-extern void *mapiterkey (void *) __asm__ (GOSYM_PREFIX "reflect.mapiterkey");
-
-void *
-mapiterkey (void *ita)
-{
- struct __go_hash_iter *it = (struct __go_hash_iter *) ita;
- const struct __go_type_descriptor *key_descriptor;
- void *key;
-
- if (it->entry == NULL)
- return NULL;
-
- key_descriptor = it->map->__descriptor->__map_descriptor->__key_type;
- key = __go_alloc (key_descriptor->__size);
- __go_mapiter1 (it, key);
- return key;
-}
-
-/* Make a new map. We have to build our own map descriptor. */
-
-extern struct __go_map *makemap (const struct __go_map_type *)
- __asm__ (GOSYM_PREFIX "reflect.makemap");
-
-struct __go_map *
-makemap (const struct __go_map_type *t)
-{
- struct __go_map_descriptor *md;
- unsigned int o;
- const struct __go_type_descriptor *kt;
- const struct __go_type_descriptor *vt;
-
- md = (struct __go_map_descriptor *) __go_alloc (sizeof (*md));
- md->__map_descriptor = t;
- o = sizeof (void *);
- kt = t->__key_type;
- o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1);
- md->__key_offset = o;
- o += kt->__size;
- vt = t->__val_type;
- o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1);
- md->__val_offset = o;
- o += vt->__size;
- o = (o + sizeof (void *) - 1) & ~ (sizeof (void *) - 1);
- o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1);
- o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1);
- md->__entry_size = o;
-
- return __go_new_map (md, 0);
-}
-
-extern _Bool ismapkey (const struct __go_type_descriptor *)
- __asm__ (GOSYM_PREFIX "reflect.ismapkey");
-
-_Bool
-ismapkey (const struct __go_type_descriptor *typ)
-{
- return (typ != NULL
- && (void *) typ->__hashfn->fn != (void *) __go_type_hash_error);
-}
diff --git a/libgo/runtime/go-type-complex.c b/libgo/runtime/go-type-complex.c
index 585984e9fef..829572b7bee 100644
--- a/libgo/runtime/go-type-complex.c
+++ b/libgo/runtime/go-type-complex.c
@@ -14,7 +14,7 @@
/* Hash function for float types. */
uintptr_t
-__go_type_hash_complex (const void *vkey, uintptr_t key_size)
+__go_type_hash_complex (const void *vkey, uintptr_t seed, uintptr_t key_size)
{
if (key_size == 8)
{
@@ -31,7 +31,7 @@ __go_type_hash_complex (const void *vkey, uintptr_t key_size)
cfi = cimagf (cf);
if (isinf (cfr) || isinf (cfi))
- return 0;
+ return seed;
/* NaN != NaN, so the hash code of a NaN is irrelevant. Make it
random so that not all NaNs wind up in the same place. */
@@ -40,14 +40,14 @@ __go_type_hash_complex (const void *vkey, uintptr_t key_size)
/* Avoid negative zero. */
if (cfr == 0 && cfi == 0)
- return 0;
+ return seed;
else if (cfr == 0)
cf = cfi * I;
else if (cfi == 0)
cf = cfr;
memcpy (&fi, &cf, 8);
- return (uintptr_t) cfi;
+ return (uintptr_t) cfi ^ seed;
}
else if (key_size == 16)
{
@@ -64,21 +64,21 @@ __go_type_hash_complex (const void *vkey, uintptr_t key_size)
cdi = cimag (cd);
if (isinf (cdr) || isinf (cdi))
- return 0;
+ return seed;
if (isnan (cdr) || isnan (cdi))
return runtime_fastrand1 ();
/* Avoid negative zero. */
if (cdr == 0 && cdi == 0)
- return 0;
+ return seed;
else if (cdr == 0)
cd = cdi * I;
else if (cdi == 0)
cd = cdr;
memcpy (&di, &cd, 16);
- return di[0] ^ di[1];
+ return di[0] ^ di[1] ^ seed;
}
else
runtime_throw ("__go_type_hash_complex: invalid complex size");
diff --git a/libgo/runtime/go-type-eface.c b/libgo/runtime/go-type-eface.c
index 315c30efb7f..a98bceaac84 100644
--- a/libgo/runtime/go-type-eface.c
+++ b/libgo/runtime/go-type-eface.c
@@ -11,7 +11,7 @@
/* A hash function for an empty interface. */
uintptr_t
-__go_type_hash_empty_interface (const void *vval,
+__go_type_hash_empty_interface (const void *vval, uintptr_t seed,
uintptr_t key_size __attribute__ ((unused)))
{
const struct __go_empty_interface *val;
@@ -22,11 +22,13 @@ __go_type_hash_empty_interface (const void *vval,
descriptor = val->__type_descriptor;
if (descriptor == NULL)
return 0;
+ if (descriptor->__hashfn == NULL)
+ runtime_panicstring ("hash of unhashable type");
size = descriptor->__size;
if (__go_is_pointer_type (descriptor))
- return __go_call_hashfn (descriptor->__hashfn, &val->__object, size);
+ return __go_call_hashfn (descriptor->__hashfn, &val->__object, seed, size);
else
- return __go_call_hashfn (descriptor->__hashfn, val->__object, size);
+ return __go_call_hashfn (descriptor->__hashfn, val->__object, seed, size);
}
const FuncVal __go_type_hash_empty_interface_descriptor =
@@ -51,6 +53,8 @@ __go_type_equal_empty_interface (const void *vv1, const void *vv2,
return v1_descriptor == v2_descriptor;
if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor))
return 0;
+ if (v1_descriptor->__equalfn == NULL)
+ runtime_panicstring ("comparing uncomparable types");
if (__go_is_pointer_type (v1_descriptor))
return v1->__object == v2->__object;
else
diff --git a/libgo/runtime/go-type-error.c b/libgo/runtime/go-type-error.c
deleted file mode 100644
index 8881a86f6e0..00000000000
--- a/libgo/runtime/go-type-error.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/* go-type-error.c -- invalid hash and equality functions.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-type.h"
-
-/* A hash function used for a type which does not support hash
- functions. */
-
-uintptr_t
-__go_type_hash_error (const void *val __attribute__ ((unused)),
- uintptr_t key_size __attribute__ ((unused)))
-{
- runtime_panicstring ("hash of unhashable type");
-}
-
-const FuncVal __go_type_hash_error_descriptor =
- { (void *) __go_type_hash_error };
-
-/* An equality function for an interface. */
-
-_Bool
-__go_type_equal_error (const void *v1 __attribute__ ((unused)),
- const void *v2 __attribute__ ((unused)),
- uintptr_t key_size __attribute__ ((unused)))
-{
- runtime_panicstring ("comparing uncomparable types");
-}
-
-const FuncVal __go_type_equal_error_descriptor =
- { (void *) __go_type_equal_error };
diff --git a/libgo/runtime/go-type-float.c b/libgo/runtime/go-type-float.c
index 39f9b29ae7d..ae0e3367c21 100644
--- a/libgo/runtime/go-type-float.c
+++ b/libgo/runtime/go-type-float.c
@@ -12,7 +12,7 @@
/* Hash function for float types. */
uintptr_t
-__go_type_hash_float (const void *vkey, uintptr_t key_size)
+__go_type_hash_float (const void *vkey, uintptr_t seed, uintptr_t key_size)
{
if (key_size == 4)
{
@@ -24,7 +24,7 @@ __go_type_hash_float (const void *vkey, uintptr_t key_size)
f = *fp;
if (isinf (f) || f == 0)
- return 0;
+ return seed;
/* NaN != NaN, so the hash code of a NaN is irrelevant. Make it
random so that not all NaNs wind up in the same place. */
@@ -32,7 +32,7 @@ __go_type_hash_float (const void *vkey, uintptr_t key_size)
return runtime_fastrand1 ();
memcpy (&si, vkey, 4);
- return (uintptr_t) si;
+ return (uintptr_t) si ^ seed;
}
else if (key_size == 8)
{
@@ -44,13 +44,13 @@ __go_type_hash_float (const void *vkey, uintptr_t key_size)
d = *dp;
if (isinf (d) || d == 0)
- return 0;
+ return seed;
if (isnan (d))
return runtime_fastrand1 ();
memcpy (&di, vkey, 8);
- return (uintptr_t) di;
+ return (uintptr_t) di ^ seed;
}
else
runtime_throw ("__go_type_hash_float: invalid float size");
diff --git a/libgo/runtime/go-type-identity.c b/libgo/runtime/go-type-identity.c
index a334d56cbe4..d58aa75e5ed 100644
--- a/libgo/runtime/go-type-identity.c
+++ b/libgo/runtime/go-type-identity.c
@@ -14,7 +14,7 @@
true of, e.g., integers and pointers. */
uintptr_t
-__go_type_hash_identity (const void *key, uintptr_t key_size)
+__go_type_hash_identity (const void *key, uintptr_t seed, uintptr_t key_size)
{
uintptr_t ret;
uintptr_t i;
@@ -34,12 +34,12 @@ __go_type_hash_identity (const void *key, uintptr_t key_size)
__builtin_memcpy (&u.a[0], key, key_size);
#endif
if (sizeof (uintptr_t) >= 8)
- return (uintptr_t) u.v;
+ return (uintptr_t) u.v ^ seed;
else
- return (uintptr_t) ((u.v >> 32) ^ (u.v & 0xffffffff));
+ return (uintptr_t) ((u.v >> 32) ^ (u.v & 0xffffffff)) ^ seed;
}
- ret = 5381;
+ ret = seed;
for (i = 0, p = (const unsigned char *) key; i < key_size; i++, p++)
ret = ret * 33 + *p;
return ret;
diff --git a/libgo/runtime/go-type-interface.c b/libgo/runtime/go-type-interface.c
index e9e577956eb..ffba7b28a35 100644
--- a/libgo/runtime/go-type-interface.c
+++ b/libgo/runtime/go-type-interface.c
@@ -11,7 +11,7 @@
/* A hash function for an interface. */
uintptr_t
-__go_type_hash_interface (const void *vval,
+__go_type_hash_interface (const void *vval, uintptr_t seed,
uintptr_t key_size __attribute__ ((unused)))
{
const struct __go_interface *val;
@@ -22,11 +22,13 @@ __go_type_hash_interface (const void *vval,
if (val->__methods == NULL)
return 0;
descriptor = (const struct __go_type_descriptor *) val->__methods[0];
+ if (descriptor->__hashfn == NULL)
+ runtime_panicstring ("hash of unhashable type");
size = descriptor->__size;
if (__go_is_pointer_type (descriptor))
- return __go_call_hashfn (descriptor->__hashfn, &val->__object, size);
+ return __go_call_hashfn (descriptor->__hashfn, &val->__object, seed, size);
else
- return __go_call_hashfn (descriptor->__hashfn, val->__object, size);
+ return __go_call_hashfn (descriptor->__hashfn, val->__object, seed, size);
}
const FuncVal __go_type_hash_interface_descriptor =
@@ -51,6 +53,8 @@ __go_type_equal_interface (const void *vv1, const void *vv2,
v2_descriptor = (const struct __go_type_descriptor *) v2->__methods[0];
if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor))
return 0;
+ if (v1_descriptor->__equalfn == NULL)
+ runtime_panicstring ("comparing uncomparable types");
if (__go_is_pointer_type (v1_descriptor))
return v1->__object == v2->__object;
else
diff --git a/libgo/runtime/go-type-string.c b/libgo/runtime/go-type-string.c
index 3d33d6ee510..c7277ddb646 100644
--- a/libgo/runtime/go-type-string.c
+++ b/libgo/runtime/go-type-string.c
@@ -11,7 +11,7 @@
/* A string hash function for a map. */
uintptr_t
-__go_type_hash_string (const void *vkey,
+__go_type_hash_string (const void *vkey, uintptr_t seed,
uintptr_t key_size __attribute__ ((unused)))
{
uintptr_t ret;
@@ -20,7 +20,7 @@ __go_type_hash_string (const void *vkey,
intgo i;
const byte *p;
- ret = 5381;
+ ret = seed;
key = (const String *) vkey;
len = key->len;
for (i = 0, p = key->str; i < len; i++, p++)
diff --git a/libgo/runtime/go-type.h b/libgo/runtime/go-type.h
index eb063ec6789..7c3149badc7 100644
--- a/libgo/runtime/go-type.h
+++ b/libgo/runtime/go-type.h
@@ -257,6 +257,33 @@ struct __go_map_type
/* The map value type. */
const struct __go_type_descriptor *__val_type;
+
+ /* The map bucket type. */
+ const struct __go_type_descriptor *__bucket_type;
+
+ /* The map header type. */
+ const struct __go_type_descriptor *__hmap_type;
+
+ /* The size of the key slot. */
+ uint8_t __key_size;
+
+ /* Whether to store a pointer to key rather than the key itself. */
+ uint8_t __indirect_key;
+
+ /* The size of the value slot. */
+ uint8_t __value_size;
+
+ /* Whether to store a pointer to value rather than the value itself. */
+ uint8_t __indirect_value;
+
+ /* The size of a bucket. */
+ uint16_t __bucket_size;
+
+ /* Whether the key type is reflexive--whether k==k for all keys. */
+ _Bool __reflexive_key;
+
+ /* Whether we should update the key when overwriting an entry. */
+ _Bool __need_key_update;
};
/* A pointer type. */
@@ -314,10 +341,11 @@ __go_is_pointer_type (const struct __go_type_descriptor *td)
/* Call a type hash function, given the __hashfn value. */
static inline uintptr_t
-__go_call_hashfn (const FuncVal *hashfn, const void *p, uintptr_t size)
+__go_call_hashfn (const FuncVal *hashfn, const void *p, uintptr_t seed,
+ uintptr_t size)
{
- uintptr_t (*h) (const void *, uintptr_t) = (void *) hashfn->fn;
- return __builtin_call_with_static_chain (h (p, size), hashfn);
+ uintptr_t (*h) (const void *, uintptr_t, uintptr_t) = (void *) hashfn->fn;
+ return __builtin_call_with_static_chain (h (p, seed, size), hashfn);
}
/* Call a type equality function, given the __equalfn value. */
@@ -334,29 +362,25 @@ extern _Bool
__go_type_descriptors_equal(const struct __go_type_descriptor*,
const struct __go_type_descriptor*);
-extern uintptr_t __go_type_hash_identity (const void *, uintptr_t);
+extern uintptr_t __go_type_hash_identity (const void *, uintptr_t, uintptr_t);
extern const FuncVal __go_type_hash_identity_descriptor;
extern _Bool __go_type_equal_identity (const void *, const void *, uintptr_t);
extern const FuncVal __go_type_equal_identity_descriptor;
-extern uintptr_t __go_type_hash_string (const void *, uintptr_t);
+extern uintptr_t __go_type_hash_string (const void *, uintptr_t, uintptr_t);
extern const FuncVal __go_type_hash_string_descriptor;
extern _Bool __go_type_equal_string (const void *, const void *, uintptr_t);
extern const FuncVal __go_type_equal_string_descriptor;
-extern uintptr_t __go_type_hash_float (const void *, uintptr_t);
+extern uintptr_t __go_type_hash_float (const void *, uintptr_t, uintptr_t);
extern const FuncVal __go_type_hash_float_descriptor;
extern _Bool __go_type_equal_float (const void *, const void *, uintptr_t);
extern const FuncVal __go_type_equal_float_descriptor;
-extern uintptr_t __go_type_hash_complex (const void *, uintptr_t);
+extern uintptr_t __go_type_hash_complex (const void *, uintptr_t, uintptr_t);
extern const FuncVal __go_type_hash_complex_descriptor;
extern _Bool __go_type_equal_complex (const void *, const void *, uintptr_t);
extern const FuncVal __go_type_equal_complex_descriptor;
-extern uintptr_t __go_type_hash_interface (const void *, uintptr_t);
+extern uintptr_t __go_type_hash_interface (const void *, uintptr_t, uintptr_t);
extern const FuncVal __go_type_hash_interface_descriptor;
extern _Bool __go_type_equal_interface (const void *, const void *, uintptr_t);
extern const FuncVal __go_type_equal_interface_descriptor;
-extern uintptr_t __go_type_hash_error (const void *, uintptr_t);
-extern const FuncVal __go_type_hash_error_descriptor;
-extern _Bool __go_type_equal_error (const void *, const void *, uintptr_t);
-extern const FuncVal __go_type_equal_error_descriptor;
#endif /* !defined(LIBGO_GO_TYPE_H) */
diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc
index fbb7b744eeb..591d06a7f59 100644
--- a/libgo/runtime/malloc.goc
+++ b/libgo/runtime/malloc.goc
@@ -23,9 +23,6 @@ package runtime
// Type aka __go_type_descriptor
#define kind __code
#define string __reflection
-#define KindPtr GO_PTR
-#define KindNoPointers GO_NO_POINTERS
-#define kindMask GO_CODE_MASK
// GCCGO SPECIFIC CHANGE
//
@@ -893,7 +890,7 @@ runtime_mal(uintptr n)
}
func new(typ *Type) (ret *uint8) {
- ret = runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
+ ret = runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&kindNoPointers ? FlagNoScan : 0);
}
static void*
@@ -903,7 +900,7 @@ cnew(const Type *typ, intgo n, int32 objtyp)
runtime_throw("runtime: invalid objtyp");
if(n < 0 || (typ->__size > 0 && (uintptr)n > (MaxMem/typ->__size)))
runtime_panicstring("runtime: allocation size out of range");
- return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&KindNoPointers ? FlagNoScan : 0);
+ return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&kindNoPointers ? FlagNoScan : 0);
}
// same as runtime_new, but callable from C
@@ -955,7 +952,7 @@ func SetFinalizer(obj Eface, finalizer Eface) {
if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).
- if(ot->__element_type == nil || (ot->__element_type->kind&KindNoPointers) == 0 || ot->__element_type->__size >= TinySize) {
+ if(ot->__element_type == nil || (ot->__element_type->kind&kindNoPointers) == 0 || ot->__element_type->__size >= TinySize) {
runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.__object);
goto throw;
}
diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h
index acd919f7abe..1efbbbeb102 100644
--- a/libgo/runtime/malloc.h
+++ b/libgo/runtime/malloc.h
@@ -391,7 +391,7 @@ struct MCentral
Lock;
int32 sizeclass;
MSpan nonempty; // list of spans with a free object
- MSpan empty; // list of spans with no free objects (or cached in an MCache)
+ MSpan mempty; // list of spans with no free objects (or cached in an MCache)
int32 nfree; // # of objects available in nonempty spans
};
@@ -478,8 +478,10 @@ extern int32 runtime_checking;
void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime_unmarkspan(void *v, uintptr size);
void runtime_purgecachedstats(MCache*);
-void* runtime_cnew(const Type*);
-void* runtime_cnewarray(const Type*, intgo);
+void* runtime_cnew(const Type*)
+ __asm__(GOSYM_PREFIX "runtime.newobject");
+void* runtime_cnewarray(const Type*, intgo)
+ __asm__(GOSYM_PREFIX "runtime.newarray");
void runtime_tracealloc(void*, uintptr, uintptr);
void runtime_tracefree(void*, uintptr);
void runtime_tracegc(void);
diff --git a/libgo/runtime/map.goc b/libgo/runtime/map.goc
deleted file mode 100644
index e4b8456dc36..00000000000
--- a/libgo/runtime/map.goc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-#include "runtime.h"
-#include "map.h"
-
-typedef struct __go_map Hmap;
-typedef struct __go_hash_iter hiter;
-
-/* Access a value in a map, returning a value and a presence indicator. */
-
-func mapaccess2(t *MapType, h *Hmap, key *byte, val *byte) (present bool) {
- byte *mapval;
- size_t valsize;
-
- mapval = __go_map_index(h, key, 0);
- valsize = t->__val_type->__size;
- if (mapval == nil) {
- __builtin_memset(val, 0, valsize);
- present = 0;
- } else {
- __builtin_memcpy(val, mapval, valsize);
- present = 1;
- }
-}
-
-/* Optionally assign a value to a map (m[k] = v, p). */
-
-func mapassign2(h *Hmap, key *byte, val *byte, p bool) {
- if (!p) {
- __go_map_delete(h, key);
- } else {
- byte *mapval;
- size_t valsize;
-
- mapval = __go_map_index(h, key, 1);
- valsize = h->__descriptor->__map_descriptor->__val_type->__size;
- __builtin_memcpy(mapval, val, valsize);
- }
-}
-
-/* Delete a key from a map. */
-
-func mapdelete(h *Hmap, key *byte) {
- __go_map_delete(h, key);
-}
-
-/* Initialize a range over a map. */
-
-func mapiterinit(h *Hmap, it *hiter) {
- __go_mapiterinit(h, it);
-}
-
-/* Move to the next iteration, updating *HITER. */
-
-func mapiternext(it *hiter) {
- __go_mapiternext(it);
-}
-
-/* Get the key of the current iteration. */
-
-func mapiter1(it *hiter, key *byte) {
- __go_mapiter1(it, key);
-}
-
-/* Get the key and value of the current iteration. */
-
-func mapiter2(it *hiter, key *byte, val *byte) {
- __go_mapiter2(it, key, val);
-}
diff --git a/libgo/runtime/map.h b/libgo/runtime/map.h
deleted file mode 100644
index 0c587bb2afa..00000000000
--- a/libgo/runtime/map.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* map.h -- the map type for Go.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "go-type.h"
-
-/* A map descriptor is what we need to manipulate the map. This is
- constant for a given map type. */
-
-struct __go_map_descriptor
-{
- /* A pointer to the type descriptor for the type of the map itself. */
- const struct __go_map_type *__map_descriptor;
-
- /* A map entry is a struct with three fields:
- map_entry_type *next_entry;
- key_type key;
- value_type value;
- This is the size of that struct. */
- uintptr_t __entry_size;
-
- /* The offset of the key field in a map entry struct. */
- uintptr_t __key_offset;
-
- /* The offset of the value field in a map entry struct (the value
- field immediately follows the key field, but there may be some
- bytes inserted for alignment). */
- uintptr_t __val_offset;
-};
-
-struct __go_map
-{
- /* The constant descriptor for this map. */
- const struct __go_map_descriptor *__descriptor;
-
- /* The number of elements in the hash table. */
- uintptr_t __element_count;
-
- /* The number of entries in the __buckets array. */
- uintptr_t __bucket_count;
-
- /* Each bucket is a pointer to a linked list of map entries. */
- void **__buckets;
-};
-
-/* For a map iteration the compiled code will use a pointer to an
- iteration structure. The iteration structure will be allocated on
- the stack. The Go code must allocate at least enough space. */
-
-struct __go_hash_iter
-{
- /* A pointer to the current entry. This will be set to NULL when
- the range has completed. The Go will test this field, so it must
- be the first one in the structure. */
- const void *entry;
- /* The map we are iterating over. */
- const struct __go_map *map;
- /* A pointer to the next entry in the current bucket. This permits
- deleting the current entry. This will be NULL when we have seen
- all the entries in the current bucket. */
- const void *next_entry;
- /* The bucket index of the current and next entry. */
- uintptr_t bucket;
-};
-
-extern struct __go_map *__go_new_map (const struct __go_map_descriptor *,
- uintptr_t);
-
-extern uintptr_t __go_map_next_prime (uintptr_t);
-
-extern void *__go_map_index (struct __go_map *, const void *, _Bool);
-
-extern void __go_map_delete (struct __go_map *, const void *);
-
-extern void __go_mapiterinit (const struct __go_map *, struct __go_hash_iter *);
-
-extern void __go_mapiternext (struct __go_hash_iter *);
-
-extern void __go_mapiter1 (struct __go_hash_iter *it, unsigned char *key);
-
-extern void __go_mapiter2 (struct __go_hash_iter *it, unsigned char *key,
- unsigned char *val);
diff --git a/libgo/runtime/mcentral.c b/libgo/runtime/mcentral.c
index 62e2c2d7dfb..491cac5330f 100644
--- a/libgo/runtime/mcentral.c
+++ b/libgo/runtime/mcentral.c
@@ -8,7 +8,7 @@
//
// The MCentral doesn't actually contain the list of free objects; the MSpan does.
// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
-// and those that are completely allocated (c->empty).
+// and those that are completely allocated (c->mempty).
//
// TODO(rsc): tcmalloc uses a "transfer cache" to split the list
// into sections of class_to_transfercount[sizeclass] objects
@@ -28,7 +28,7 @@ runtime_MCentral_Init(MCentral *c, int32 sizeclass)
{
c->sizeclass = sizeclass;
runtime_MSpanList_Init(&c->nonempty);
- runtime_MSpanList_Init(&c->empty);
+ runtime_MSpanList_Init(&c->mempty);
}
// Allocate a span to use in an MCache.
@@ -58,13 +58,13 @@ retry:
goto havespan;
}
- for(s = c->empty.next; s != &c->empty; s = s->next) {
+ for(s = c->mempty.next; s != &c->mempty; s = s->next) {
if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
// we have an empty span that requires sweeping,
// sweep it and see if we can free some space in it
runtime_MSpanList_Remove(s);
// swept spans are at the end of the list
- runtime_MSpanList_InsertBack(&c->empty, s);
+ runtime_MSpanList_InsertBack(&c->mempty, s);
runtime_unlock(c);
runtime_MSpan_Sweep(s);
runtime_lock(c);
@@ -96,7 +96,7 @@ havespan:
runtime_throw("freelist empty");
c->nfree -= n;
runtime_MSpanList_Remove(s);
- runtime_MSpanList_InsertBack(&c->empty, s);
+ runtime_MSpanList_InsertBack(&c->mempty, s);
s->incache = true;
runtime_unlock(c);
return s;
diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c
index 1f6a40cd630..341544cb970 100644
--- a/libgo/runtime/mgc0.c
+++ b/libgo/runtime/mgc0.c
@@ -69,9 +69,6 @@
typedef struct __go_map Hmap;
// Type aka __go_type_descriptor
#define string __reflection
-#define KindPtr GO_PTR
-#define KindNoPointers GO_NO_POINTERS
-#define kindMask GO_CODE_MASK
// PtrType aka __go_ptr_type
#define elem __element_type
@@ -216,7 +213,7 @@ static void addstackroots(G *gp, Workbuf **wbufp);
static struct {
uint64 full; // lock-free list of full blocks
- uint64 empty; // lock-free list of empty blocks
+ uint64 wempty; // lock-free list of empty blocks
byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
uint32 nproc;
int64 tstart;
@@ -943,16 +940,16 @@ scanblock(Workbuf *wbuf, bool keepworking)
// eface->__object
if((byte*)eface->__object >= arena_start && (byte*)eface->__object < arena_used) {
if(__go_is_pointer_type(t)) {
- if((t->__code & KindNoPointers))
+ if((t->__code & kindNoPointers))
continue;
obj = eface->__object;
- if((t->__code & kindMask) == KindPtr) {
+ if((t->__code & kindMask) == kindPtr) {
// Only use type information if it is a pointer-containing type.
// This matches the GC programs written by cmd/gc/reflect.c's
// dgcsym1 in case TPTR32/case TPTR64. See rationale there.
et = ((const PtrType*)t)->elem;
- if(!(et->__code & KindNoPointers))
+ if(!(et->__code & kindNoPointers))
objti = (uintptr)((const PtrType*)t)->elem->__gc;
}
} else {
@@ -981,16 +978,16 @@ scanblock(Workbuf *wbuf, bool keepworking)
if((byte*)iface->__object >= arena_start && (byte*)iface->__object < arena_used) {
t = (const Type*)iface->tab[0];
if(__go_is_pointer_type(t)) {
- if((t->__code & KindNoPointers))
+ if((t->__code & kindNoPointers))
continue;
obj = iface->__object;
- if((t->__code & kindMask) == KindPtr) {
+ if((t->__code & kindMask) == kindPtr) {
// Only use type information if it is a pointer-containing type.
// This matches the GC programs written by cmd/gc/reflect.c's
// dgcsym1 in case TPTR32/case TPTR64. See rationale there.
et = ((const PtrType*)t)->elem;
- if(!(et->__code & KindNoPointers))
+ if(!(et->__code & kindNoPointers))
objti = (uintptr)((const PtrType*)t)->elem->__gc;
}
} else {
@@ -1101,7 +1098,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
}
if(markonly(chan)) {
chantype = (ChanType*)pc[2];
- if(!(chantype->elem->__code & KindNoPointers)) {
+ if(!(chantype->elem->__code & kindNoPointers)) {
// Start chanProg.
chan_ret = pc+3;
pc = chanProg+1;
@@ -1114,7 +1111,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
case GC_CHAN:
// There are no heap pointers in struct Hchan,
// so we can ignore the leading sizeof(Hchan) bytes.
- if(!(chantype->elem->__code & KindNoPointers)) {
+ if(!(chantype->elem->__code & kindNoPointers)) {
// Channel's buffer follows Hchan immediately in memory.
// Size of buffer (cap(c)) is second int in the chan struct.
chancap = ((uintgo*)chan)[1];
@@ -1377,7 +1374,7 @@ getempty(Workbuf *b)
{
if(b != nil)
runtime_lfstackpush(&work.full, &b->node);
- b = (Workbuf*)runtime_lfstackpop(&work.empty);
+ b = (Workbuf*)runtime_lfstackpop(&work.wempty);
if(b == nil) {
// Need to allocate.
runtime_lock(&work);
@@ -1402,7 +1399,7 @@ putempty(Workbuf *b)
if(CollectStats)
runtime_xadd64(&gcstats.putempty, 1);
- runtime_lfstackpush(&work.empty, &b->node);
+ runtime_lfstackpush(&work.wempty, &b->node);
}
// Get a full work buffer off the work.full list, or return nil.
@@ -1416,7 +1413,7 @@ getfull(Workbuf *b)
runtime_xadd64(&gcstats.getfull, 1);
if(b != nil)
- runtime_lfstackpush(&work.empty, &b->node);
+ runtime_lfstackpush(&work.wempty, &b->node);
b = (Workbuf*)runtime_lfstackpop(&work.full);
if(b != nil || work.nproc == 1)
return b;
@@ -2129,7 +2126,7 @@ runtime_gc(int32 force)
// The atomic operations are not atomic if the uint64s
// are not aligned on uint64 boundaries. This has been
// a problem in the past.
- if((((uintptr)&work.empty) & 7) != 0)
+ if((((uintptr)&work.wempty) & 7) != 0)
runtime_throw("runtime: gc work buffer is misaligned");
if((((uintptr)&work.full) & 7) != 0)
runtime_throw("runtime: gc work buffer is misaligned");
@@ -2522,7 +2519,7 @@ runfinq(void* dummy __attribute__ ((unused)))
f = &fb->fin[i];
fint = ((const Type**)f->ft->__in.array)[0];
- if((fint->__code & kindMask) == KindPtr) {
+ if((fint->__code & kindMask) == kindPtr) {
// direct use of pointer
param = &f->arg;
} else if(((const InterfaceType*)fint)->__methods.__count == 0) {
diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c
index 04dc971d688..04a5b98772c 100644
--- a/libgo/runtime/mheap.c
+++ b/libgo/runtime/mheap.c
@@ -878,7 +878,7 @@ runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
// remove the span from whatever list it is in now
if(s->sizeclass > 0) {
- // must be in h->central[x].empty
+ // must be in h->central[x].mempty
c = &h->central[s->sizeclass];
runtime_lock(c);
runtime_MSpanList_Remove(s);
@@ -937,7 +937,7 @@ runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
c = &h->central[s->sizeclass];
runtime_lock(c);
// swept spans are at the end of the list
- runtime_MSpanList_InsertBack(&c->empty, s);
+ runtime_MSpanList_InsertBack(&c->mempty, s);
runtime_unlock(c);
} else {
// Swept spans are at the end of lists.
diff --git a/libgo/runtime/panic.c b/libgo/runtime/panic.c
index 3fb3bde3223..d493b54a509 100644
--- a/libgo/runtime/panic.c
+++ b/libgo/runtime/panic.c
@@ -194,6 +194,22 @@ runtime_throw(const char *s)
runtime_exit(1); // even more not reached
}
+void throw(String) __asm__ (GOSYM_PREFIX "runtime.throw");
+void
+throw(String s)
+{
+ M *mp;
+
+ mp = runtime_m();
+ if(mp->throwing == 0)
+ mp->throwing = 1;
+ runtime_startpanic();
+ runtime_printf("fatal error: %S\n", s);
+ runtime_dopanic(0);
+ *(int32*)0 = 0; // not reached
+ runtime_exit(1); // even more not reached
+}
+
void
runtime_panicstring(const char *s)
{
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index 20db789ddb6..32d0fb2a7be 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -546,9 +546,9 @@ static struct __go_channel_type chan_bool_type_descriptor =
/* __hash */
0, /* This value doesn't matter. */
/* __hashfn */
- &__go_type_hash_error_descriptor,
+ NULL,
/* __equalfn */
- &__go_type_equal_error_descriptor,
+ NULL,
/* __gc */
NULL, /* This value doesn't matter */
/* __reflection */
@@ -2753,7 +2753,7 @@ static void
procresize(int32 new)
{
int32 i, old;
- bool empty;
+ bool pempty;
G *gp;
P *p;
@@ -2781,14 +2781,14 @@ procresize(int32 new)
// collect all runnable goroutines in global queue preserving FIFO order
// FIFO order is required to ensure fairness even during frequent GCs
// see http://golang.org/issue/7126
- empty = false;
- while(!empty) {
- empty = true;
+ pempty = false;
+ while(!pempty) {
+ pempty = true;
for(i = 0; i < old; i++) {
p = runtime_allp[i];
if(p->runqhead == p->runqtail)
continue;
- empty = false;
+ pempty = false;
// pop from tail of local queue
p->runqtail--;
gp = (G*)p->runq[p->runqtail%nelem(p->runq)];
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 617766b8a99..dc00b421f99 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -376,7 +376,7 @@ void runtime_mprofinit(void);
int32 runtime_mcount(void);
int32 runtime_gcount(void);
void runtime_mcall(void(*)(G*));
-uint32 runtime_fastrand1(void);
+uint32 runtime_fastrand1(void) __asm__ (GOSYM_PREFIX "runtime.fastrand1");
int32 runtime_timediv(int64, int32, int32*);
int32 runtime_round2(int32 x); // round x up to a power of 2.