summaryrefslogtreecommitdiff
path: root/src/reflect
diff options
context:
space:
mode:
Diffstat (limited to 'src/reflect')
-rw-r--r--src/reflect/all_test.go25
-rw-r--r--src/reflect/asm_386.s3
-rw-r--r--src/reflect/asm_amd64.s7
-rw-r--r--src/reflect/asm_amd64p32.s3
-rw-r--r--src/reflect/asm_arm.s3
-rw-r--r--src/reflect/makefunc.go17
-rw-r--r--src/reflect/type.go82
-rw-r--r--src/reflect/value.go31
8 files changed, 130 insertions, 41 deletions
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 9a2a9f266..688b5d310 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -3860,3 +3860,28 @@ func TestCallMethodJump(t *testing.T) {
// Stop garbage collecting during reflect.call.
*CallGC = false
}
+
+func TestMakeFuncStackCopy(t *testing.T) {
+ target := func(in []Value) []Value {
+ runtime.GC()
+ useStack(16)
+ return []Value{ValueOf(9)}
+ }
+
+ var concrete func(*int, int) int
+ fn := MakeFunc(ValueOf(concrete).Type(), target)
+ ValueOf(&concrete).Elem().Set(fn)
+ x := concrete(nil, 7)
+ if x != 9 {
+ t.Errorf("have %#q want 9", x)
+ }
+}
+
+// use about n KB of stack
+func useStack(n int) {
+ if n == 0 {
+ return
+ }
+ var b [1024]byte // makes frame about 1KB
+ useStack(n - 1 + int(b[99]))
+}
diff --git a/src/reflect/asm_386.s b/src/reflect/asm_386.s
index c028113a0..0ffccf7d4 100644
--- a/src/reflect/asm_386.s
+++ b/src/reflect/asm_386.s
@@ -3,12 +3,14 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
+#include "funcdata.h"
// makeFuncStub is the code half of the function returned by MakeFunc.
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
// No argsize here, gc generates argsize info at call site.
TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8
+ NO_LOCAL_POINTERS
MOVL DX, 0(SP)
LEAL argframe+0(FP), CX
MOVL CX, 4(SP)
@@ -20,6 +22,7 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8
// for more details.
// No argsize here, gc generates argsize info at call site.
TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$8
+ NO_LOCAL_POINTERS
MOVL DX, 0(SP)
LEAL argframe+0(FP), CX
MOVL CX, 4(SP)
diff --git a/src/reflect/asm_amd64.s b/src/reflect/asm_amd64.s
index b3c54f048..5a6c27ac9 100644
--- a/src/reflect/asm_amd64.s
+++ b/src/reflect/asm_amd64.s
@@ -3,12 +3,14 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
+#include "funcdata.h"
// makeFuncStub is the code half of the function returned by MakeFunc.
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
-// No argsize here, gc generates argsize info at call site.
+// No arg size here; runtime pulls arg map out of the func value.
TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16
+ NO_LOCAL_POINTERS
MOVQ DX, 0(SP)
LEAQ argframe+0(FP), CX
MOVQ CX, 8(SP)
@@ -18,8 +20,9 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16
// methodValueCall is the code half of the function returned by makeMethodValue.
// See the comment on the declaration of methodValueCall in makefunc.go
// for more details.
-// No argsize here, gc generates argsize info at call site.
+// No arg size here; runtime pulls arg map out of the func value.
TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16
+ NO_LOCAL_POINTERS
MOVQ DX, 0(SP)
LEAQ argframe+0(FP), CX
MOVQ CX, 8(SP)
diff --git a/src/reflect/asm_amd64p32.s b/src/reflect/asm_amd64p32.s
index c028113a0..0ffccf7d4 100644
--- a/src/reflect/asm_amd64p32.s
+++ b/src/reflect/asm_amd64p32.s
@@ -3,12 +3,14 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
+#include "funcdata.h"
// makeFuncStub is the code half of the function returned by MakeFunc.
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
// No argsize here, gc generates argsize info at call site.
TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8
+ NO_LOCAL_POINTERS
MOVL DX, 0(SP)
LEAL argframe+0(FP), CX
MOVL CX, 4(SP)
@@ -20,6 +22,7 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8
// for more details.
// No argsize here, gc generates argsize info at call site.
TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$8
+ NO_LOCAL_POINTERS
MOVL DX, 0(SP)
LEAL argframe+0(FP), CX
MOVL CX, 4(SP)
diff --git a/src/reflect/asm_arm.s b/src/reflect/asm_arm.s
index 6bd5d48ec..5a14c6f81 100644
--- a/src/reflect/asm_arm.s
+++ b/src/reflect/asm_arm.s
@@ -3,12 +3,14 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
+#include "funcdata.h"
// makeFuncStub is jumped to by the code generated by MakeFunc.
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
// No argsize here, gc generates argsize info at call site.
TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8
+ NO_LOCAL_POINTERS
MOVW R7, 4(R13)
MOVW $argframe+0(FP), R1
MOVW R1, 8(R13)
@@ -20,6 +22,7 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8
// for more details.
// No argsize here, gc generates argsize info at call site.
TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$8
+ NO_LOCAL_POINTERS
MOVW R7, 4(R13)
MOVW $argframe+0(FP), R1
MOVW R1, 8(R13)
diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go
index 0e61fdea7..bdb8c21d7 100644
--- a/src/reflect/makefunc.go
+++ b/src/reflect/makefunc.go
@@ -13,9 +13,10 @@ import (
// makeFuncImpl is the closure value implementing the function
// returned by MakeFunc.
type makeFuncImpl struct {
- code uintptr
- typ *funcType
- fn func([]Value) []Value
+ code uintptr
+ stack *bitVector // stack bitmap for args - offset known to runtime
+ typ *funcType
+ fn func([]Value) []Value
}
// MakeFunc returns a new function of the given Type
@@ -54,7 +55,10 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
dummy := makeFuncStub
code := **(**uintptr)(unsafe.Pointer(&dummy))
- impl := &makeFuncImpl{code: code, typ: ftyp, fn: fn}
+ // makeFuncImpl contains a stack map for use by the runtime
+ _, _, _, stack := funcLayout(t, nil)
+
+ impl := &makeFuncImpl{code: code, stack: stack, typ: ftyp, fn: fn}
return Value{t, unsafe.Pointer(impl), 0, flag(Func) << flagKindShift}
}
@@ -68,6 +72,7 @@ func makeFuncStub()
type methodValue struct {
fn uintptr
+ stack *bitVector // stack bitmap for args - offset known to runtime
method int
rcvr Value
}
@@ -98,8 +103,12 @@ func makeMethodValue(op string, v Value) Value {
dummy := methodValueCall
code := **(**uintptr)(unsafe.Pointer(&dummy))
+ // methodValue contains a stack map for use by the runtime
+ _, _, _, stack := funcLayout(funcType, nil)
+
fv := &methodValue{
fn: code,
+ stack: stack,
method: int(v.flag) >> flagMethodShift,
rcvr: rcvr,
}
diff --git a/src/reflect/type.go b/src/reflect/type.go
index 6817cd74d..67818f7f4 100644
--- a/src/reflect/type.go
+++ b/src/reflect/type.go
@@ -242,7 +242,7 @@ const (
// with a unique tag like `reflect:"array"` or `reflect:"ptr"`
// so that code cannot convert from, say, *arrayType to *ptrType.
type rtype struct {
- size uintptr // size in bytes
+ size uintptr
hash uint32 // hash of type; avoids computation in hash tables
_ uint8 // unused/padding
align uint8 // alignment of variable with this type
@@ -1726,6 +1726,7 @@ type layoutType struct {
t *rtype
argSize uintptr // size of arguments
retOffset uintptr // offset of return values.
+ stack *bitVector
}
var layoutCache struct {
@@ -1739,7 +1740,7 @@ var layoutCache struct {
// The returned type exists only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in
// the name for possible debugging use.
-func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr) {
+func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stack *bitVector) {
if t.Kind() != Func {
panic("reflect: funcLayout of non-func type")
}
@@ -1750,19 +1751,21 @@ func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uin
layoutCache.RLock()
if x := layoutCache.m[k]; x.t != nil {
layoutCache.RUnlock()
- return x.t, x.argSize, x.retOffset
+ return x.t, x.argSize, x.retOffset, x.stack
}
layoutCache.RUnlock()
layoutCache.Lock()
if x := layoutCache.m[k]; x.t != nil {
layoutCache.Unlock()
- return x.t, x.argSize, x.retOffset
+ return x.t, x.argSize, x.retOffset, x.stack
}
tt := (*funcType)(unsafe.Pointer(t))
- // compute gc program for arguments
+ // compute gc program & stack bitmap for arguments
+ stack = new(bitVector)
var gc gcProg
+ var offset uintptr
if rcvr != nil {
// Reflect uses the "interface" calling convention for
// methods, where receivers take one word of argument
@@ -1770,16 +1773,21 @@ func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uin
if !isDirectIface(rcvr) {
// we pass a pointer to the receiver.
gc.append(bitsPointer)
+ stack.append2(bitsPointer)
} else if rcvr.pointers() {
// rcvr is a one-word pointer object. Its gc program
// is just what we need here.
gc.append(bitsPointer)
+ stack.append2(bitsPointer)
} else {
gc.append(bitsScalar)
+ stack.append2(bitsScalar)
}
+ offset += ptrSize
}
for _, arg := range tt.in {
gc.appendProg(arg)
+ addTypeBits(stack, &offset, arg)
}
argSize = gc.size
if runtime.GOARCH == "amd64p32" {
@@ -1789,6 +1797,7 @@ func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uin
retOffset = gc.size
for _, res := range tt.out {
gc.appendProg(res)
+ // stack map does not need result bits
}
gc.align(ptrSize)
@@ -1813,12 +1822,73 @@ func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uin
t: x,
argSize: argSize,
retOffset: retOffset,
+ stack: stack,
}
layoutCache.Unlock()
- return x, argSize, retOffset
+ return x, argSize, retOffset, stack
}
// isDirectIface reports whether t is stored directly in an interface value.
func isDirectIface(t *rtype) bool {
return t.kind&kindDirectIface != 0
}
+
+// Layout matches runtime.BitVector (well enough).
+type bitVector struct {
+ n uint32 // number of bits
+ data []byte
+}
+
+// append a bit pair to the bitmap.
+func (bv *bitVector) append2(bits uint8) {
+ // assume bv.n is a multiple of 2, since append2 is the only operation.
+ if bv.n%8 == 0 {
+ bv.data = append(bv.data, 0)
+ }
+ bv.data[bv.n/8] |= bits << (bv.n % 8)
+ bv.n += 2
+}
+
+func addTypeBits(bv *bitVector, offset *uintptr, t *rtype) {
+ *offset = align(*offset, uintptr(t.align))
+ if t.kind&kindNoPointers != 0 {
+ *offset += t.size
+ return
+ }
+
+ switch Kind(t.kind & kindMask) {
+ case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
+ // 1 pointer at start of representation
+ for bv.n < uint32(*offset/uintptr(ptrSize)) {
+ bv.append2(bitsScalar)
+ }
+ bv.append2(bitsPointer)
+
+ case Interface:
+ // 2 pointers
+ for bv.n < uint32(*offset/uintptr(ptrSize)) {
+ bv.append2(bitsScalar)
+ }
+ bv.append2(bitsPointer)
+ bv.append2(bitsPointer)
+
+ case Array:
+ // repeat inner type
+ tt := (*arrayType)(unsafe.Pointer(t))
+ for i := 0; i < int(tt.len); i++ {
+ addTypeBits(bv, offset, tt.elem)
+ }
+
+ case Struct:
+ // apply fields
+ tt := (*structType)(unsafe.Pointer(t))
+ start := *offset
+ for i := range tt.fields {
+ f := &tt.fields[i]
+ off := start + f.offset
+ addTypeBits(bv, &off, f.typ)
+ }
+ }
+
+ *offset += t.size
+}
diff --git a/src/reflect/value.go b/src/reflect/value.go
index 20d0e92ed..b0dfe840b 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -403,11 +403,6 @@ func (v Value) CallSlice(in []Value) []Value {
var callGC bool // for testing; see TestCallMethodJump
-var makeFuncStubFn = makeFuncStub
-var makeFuncStubCode = **(**uintptr)(unsafe.Pointer(&makeFuncStubFn))
-var methodValueCallFn = methodValueCall
-var methodValueCallCode = **(**uintptr)(unsafe.Pointer(&methodValueCallFn))
-
func (v Value) call(op string, in []Value) []Value {
// Get function pointer, type.
t := v.typ
@@ -486,30 +481,8 @@ func (v Value) call(op string, in []Value) []Value {
}
nout := t.NumOut()
- // If target is makeFuncStub, short circuit the unpack onto stack /
- // pack back into []Value for the args and return values. Just do the
- // call directly.
- // We need to do this here because otherwise we have a situation where
- // reflect.callXX calls makeFuncStub, neither of which knows the
- // layout of the args. That's bad for precise gc & stack copying.
- x := (*makeFuncImpl)(fn)
- if x.code == makeFuncStubCode {
- return x.fn(in)
- }
-
- // If the target is methodValueCall, do its work here: add the receiver
- // argument and call the real target directly.
- // We need to do this here because otherwise we have a situation where
- // reflect.callXX calls methodValueCall, neither of which knows the
- // layout of the args. That's bad for precise gc & stack copying.
- y := (*methodValue)(fn)
- if y.fn == methodValueCallCode {
- rcvr = y.rcvr
- rcvrtype, t, fn = methodReceiver("call", rcvr, y.method)
- }
-
// Compute frame type, allocate a chunk of memory for frame
- frametype, _, retOffset := funcLayout(t, rcvrtype)
+ frametype, _, retOffset, _ := funcLayout(t, rcvrtype)
args := unsafe_New(frametype)
off := uintptr(0)
@@ -725,7 +698,7 @@ func align(x, n uintptr) uintptr {
func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
rcvr := ctxt.rcvr
rcvrtype, t, fn := methodReceiver("call", rcvr, ctxt.method)
- frametype, argSize, retOffset := funcLayout(t, rcvrtype)
+ frametype, argSize, retOffset, _ := funcLayout(t, rcvrtype)
// Make a new frame that is one word bigger so we can store the receiver.
args := unsafe_New(frametype)