summaryrefslogtreecommitdiff
path: root/libgo
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2020-12-08 10:57:05 -0800
committerIan Lance Taylor <iant@golang.org>2020-12-08 14:01:04 -0800
commit5ea350d1d7edf8afaae9e6723cda535c9eaa7562 (patch)
tree715c0290f1c8179bac2d58bf9f5cc0fde2aec2db /libgo
parent570c312c03e151477505c8b70b25411e52751ff4 (diff)
downloadgcc-5ea350d1d7edf8afaae9e6723cda535c9eaa7562.tar.gz
libgo: update to 1.15.6 release
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/276153
Diffstat (limited to 'libgo')
-rw-r--r--libgo/MERGE2
-rw-r--r--libgo/VERSION2
-rw-r--r--libgo/go/cmd/go/internal/work/exec.go15
-rw-r--r--libgo/go/internal/poll/copy_file_range_linux.go55
-rw-r--r--libgo/go/runtime/crash_cgo_test.go13
-rw-r--r--libgo/go/runtime/os_js.go2
-rw-r--r--libgo/go/runtime/proc.go26
-rw-r--r--libgo/go/runtime/signal_unix.go8
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/needmdeadlock.go95
-rw-r--r--libgo/runtime/proc.c3
10 files changed, 201 insertions, 20 deletions
diff --git a/libgo/MERGE b/libgo/MERGE
index b753907837d..e95c59a132d 100644
--- a/libgo/MERGE
+++ b/libgo/MERGE
@@ -1,4 +1,4 @@
-c53315d6cf1b4bfea6ff356b4a1524778c683bb9
+9b955d2d3fcff6a5bc8bce7bafdc4c634a28e95b
The first line of this file holds the git revision number of the
last merge done from the master library sources.
diff --git a/libgo/VERSION b/libgo/VERSION
index 701454707cd..7b6d7469626 100644
--- a/libgo/VERSION
+++ b/libgo/VERSION
@@ -1 +1 @@
-go1.15.5
+go1.15.6
diff --git a/libgo/go/cmd/go/internal/work/exec.go b/libgo/go/cmd/go/internal/work/exec.go
index 4f689438d1d..3898b2047c3 100644
--- a/libgo/go/cmd/go/internal/work/exec.go
+++ b/libgo/go/cmd/go/internal/work/exec.go
@@ -2778,6 +2778,21 @@ func (b *Builder) cgo(a *Action, cgoExe, objdir string, pcCFLAGS, pcLDFLAGS, cgo
idx = bytes.Index(src, []byte(cgoLdflag))
}
}
+
+ // We expect to find the contents of cgoLDFLAGS in flags.
+ if len(cgoLDFLAGS) > 0 {
+ outer:
+ for i := range flags {
+ for j, f := range cgoLDFLAGS {
+ if f != flags[i+j] {
+ continue outer
+ }
+ }
+ flags = append(flags[:i], flags[i+len(cgoLDFLAGS):]...)
+ break
+ }
+ }
+
if err := checkLinkerFlags("LDFLAGS", "go:cgo_ldflag", flags); err != nil {
return nil, nil, err
}
diff --git a/libgo/go/internal/poll/copy_file_range_linux.go b/libgo/go/internal/poll/copy_file_range_linux.go
index 09de299ff71..fc34aef4cba 100644
--- a/libgo/go/internal/poll/copy_file_range_linux.go
+++ b/libgo/go/internal/poll/copy_file_range_linux.go
@@ -10,15 +10,61 @@ import (
"syscall"
)
-var copyFileRangeSupported int32 = 1 // accessed atomically
+var copyFileRangeSupported int32 = -1 // accessed atomically
const maxCopyFileRangeRound = 1 << 30
+func kernelVersion() (major int, minor int) {
+ var uname syscall.Utsname
+ if err := syscall.Uname(&uname); err != nil {
+ return
+ }
+
+ rl := uname.Release
+ var values [2]int
+ vi := 0
+ value := 0
+ for _, c := range rl {
+ if '0' <= c && c <= '9' {
+ value = (value * 10) + int(c-'0')
+ } else {
+ // Note that we're assuming N.N.N here. If we see anything else we are likely to
+ // mis-parse it.
+ values[vi] = value
+ vi++
+ if vi >= len(values) {
+ break
+ }
+ value = 0
+ }
+ }
+ switch vi {
+ case 0:
+ return 0, 0
+ case 1:
+ return values[0], 0
+ case 2:
+ return values[0], values[1]
+ }
+ return
+}
+
// CopyFileRange copies at most remain bytes of data from src to dst, using
// the copy_file_range system call. dst and src must refer to regular files.
func CopyFileRange(dst, src *FD, remain int64) (written int64, handled bool, err error) {
- if atomic.LoadInt32(&copyFileRangeSupported) == 0 {
+ if supported := atomic.LoadInt32(&copyFileRangeSupported); supported == 0 {
return 0, false, nil
+ } else if supported == -1 {
+ major, minor := kernelVersion()
+ if major > 5 || (major == 5 && minor >= 3) {
+ atomic.StoreInt32(&copyFileRangeSupported, 1)
+ } else {
+ // copy_file_range(2) is broken in various ways on kernels older than 5.3,
+ // see issue #42400 and
+ // https://man7.org/linux/man-pages/man2/copy_file_range.2.html#VERSIONS
+ atomic.StoreInt32(&copyFileRangeSupported, 0)
+ return 0, false, nil
+ }
}
for remain > 0 {
max := remain
@@ -41,7 +87,7 @@ func CopyFileRange(dst, src *FD, remain int64) (written int64, handled bool, err
// use copy_file_range(2) again.
atomic.StoreInt32(&copyFileRangeSupported, 0)
return 0, false, nil
- case syscall.EXDEV, syscall.EINVAL, syscall.EOPNOTSUPP, syscall.EPERM:
+ case syscall.EXDEV, syscall.EINVAL, syscall.EIO, syscall.EOPNOTSUPP, syscall.EPERM:
// Prior to Linux 5.3, it was not possible to
// copy_file_range across file systems. Similarly to
// the ENOSYS case above, if we see EXDEV, we have
@@ -53,6 +99,9 @@ func CopyFileRange(dst, src *FD, remain int64) (written int64, handled bool, err
// file. This is another case where no data has been
// transfered, so we consider it unhandled.
//
+ // If src and dst are on CIFS, we can see EIO.
+ // See issue #42334.
+ //
// If the file is on NFS, we can see EOPNOTSUPP.
// See issue #40731.
//
diff --git a/libgo/go/runtime/crash_cgo_test.go b/libgo/go/runtime/crash_cgo_test.go
index 7c10213b868..64a7c088585 100644
--- a/libgo/go/runtime/crash_cgo_test.go
+++ b/libgo/go/runtime/crash_cgo_test.go
@@ -612,3 +612,16 @@ func TestEINTR(t *testing.T) {
t.Fatalf("want %s, got %s\n", want, output)
}
}
+
+// Issue #42207.
+func TestNeedmDeadlock(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skipf("no signals on %s", runtime.GOOS)
+ }
+ output := runTestProg(t, "testprogcgo", "NeedmDeadlock")
+ want := "OK\n"
+ if output != want {
+ t.Fatalf("want %s, got %s\n", want, output)
+ }
+}
diff --git a/libgo/go/runtime/os_js.go b/libgo/go/runtime/os_js.go
index ff0ee3aa6be..94983b358d4 100644
--- a/libgo/go/runtime/os_js.go
+++ b/libgo/go/runtime/os_js.go
@@ -59,7 +59,7 @@ func mpreinit(mp *m) {
}
//go:nosplit
-func msigsave(mp *m) {
+func sigsave(p *sigset) {
}
//go:nosplit
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index 6c720503c44..e0b4b50456e 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -571,7 +571,7 @@ func schedinit() {
cpuinit() // must run before alginit
alginit() // maps must not be used before this call
- msigsave(_g_.m)
+ sigsave(&_g_.m.sigmask)
initSigmask = _g_.m.sigmask
goargs()
@@ -1496,6 +1496,18 @@ func needm(x byte) {
exit(1)
}
+ // Save and block signals before getting an M.
+ // The signal handler may call needm itself,
+ // and we must avoid a deadlock. Also, once g is installed,
+ // any incoming signals will try to execute,
+ // but we won't have the sigaltstack settings and other data
+ // set up appropriately until the end of minit, which will
+ // unblock the signals. This is the same dance as when
+ // starting a new m to run Go code via newosproc.
+ var sigmask sigset
+ sigsave(&sigmask)
+ sigblock()
+
// Lock extra list, take head, unlock popped list.
// nilokay=false is safe here because of the invariant above,
// that the extra list always contains or will soon contain
@@ -1513,14 +1525,8 @@ func needm(x byte) {
extraMCount--
unlockextra(mp.schedlink.ptr())
- // Save and block signals before installing g.
- // Once g is installed, any incoming signals will try to execute,
- // but we won't have the sigaltstack settings and other data
- // set up appropriately until the end of minit, which will
- // unblock the signals. This is the same dance as when
- // starting a new m to run Go code via newosproc.
- msigsave(mp)
- sigblock()
+ // Store the original signal mask for use by minit.
+ mp.sigmask = sigmask
// Install g (= m->curg).
setg(mp.curg)
@@ -3300,7 +3306,7 @@ func beforefork() {
// a signal handler before exec if a signal is sent to the process
// group. See issue #18600.
gp.m.locks++
- msigsave(gp.m)
+ sigsave(&gp.m.sigmask)
sigblock()
}
diff --git a/libgo/go/runtime/signal_unix.go b/libgo/go/runtime/signal_unix.go
index 6b69dcf06d2..ec7c6471b5d 100644
--- a/libgo/go/runtime/signal_unix.go
+++ b/libgo/go/runtime/signal_unix.go
@@ -956,15 +956,15 @@ func sigfwdgo(sig uint32, info *_siginfo_t, ctx unsafe.Pointer) bool {
return true
}
-// msigsave saves the current thread's signal mask into mp.sigmask.
+// sigsave saves the current thread's signal mask into *p.
// This is used to preserve the non-Go signal mask when a non-Go
// thread calls a Go function.
// This is nosplit and nowritebarrierrec because it is called by needm
// which may be called on a non-Go thread with no g available.
//go:nosplit
//go:nowritebarrierrec
-func msigsave(mp *m) {
- sigprocmask(_SIG_SETMASK, nil, &mp.sigmask)
+func sigsave(p *sigset) {
+ sigprocmask(_SIG_SETMASK, nil, p)
}
// msigrestore sets the current thread's signal mask to sigmask.
@@ -1038,7 +1038,7 @@ func minitSignalStack() {
// thread's signal mask. When this is called all signals have been
// blocked for the thread. This starts with m.sigmask, which was set
// either from initSigmask for a newly created thread or by calling
-// msigsave if this is a non-Go thread calling a Go function. It
+// sigsave if this is a non-Go thread calling a Go function. It
// removes all essential signals from the mask, thus causing those
// signals to not be blocked. Then it sets the thread's signal mask.
// After this is called the thread can receive signals.
diff --git a/libgo/go/runtime/testdata/testprogcgo/needmdeadlock.go b/libgo/go/runtime/testdata/testprogcgo/needmdeadlock.go
new file mode 100644
index 00000000000..5a9c359006d
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/needmdeadlock.go
@@ -0,0 +1,95 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!windows
+
+package main
+
+// This is for issue #42207.
+// During a call to needm we could get a SIGCHLD signal
+// which would itself call needm, causing a deadlock.
+
+/*
+#include <signal.h>
+#include <pthread.h>
+#include <sched.h>
+#include <unistd.h>
+
+extern void GoNeedM();
+
+#define SIGNALERS 10
+
+static void* needmSignalThread(void* p) {
+ pthread_t* pt = (pthread_t*)(p);
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ if (pthread_kill(*pt, SIGCHLD) < 0) {
+ return NULL;
+ }
+ usleep(1);
+ }
+ return NULL;
+}
+
+// We don't need many calls, as the deadlock is only likely
+// to occur the first couple of times that needm is called.
+// After that there will likely be an extra M available.
+#define CALLS 10
+
+static void* needmCallbackThread(void* p) {
+ int i;
+
+ for (i = 0; i < SIGNALERS; i++) {
+ sched_yield(); // Help the signal threads get started.
+ }
+ for (i = 0; i < CALLS; i++) {
+ GoNeedM();
+ }
+ return NULL;
+}
+
+static void runNeedmSignalThread() {
+ int i;
+ pthread_t caller;
+ pthread_t s[SIGNALERS];
+
+ pthread_create(&caller, NULL, needmCallbackThread, NULL);
+ for (i = 0; i < SIGNALERS; i++) {
+ pthread_create(&s[i], NULL, needmSignalThread, &caller);
+ }
+ for (i = 0; i < SIGNALERS; i++) {
+ pthread_join(s[i], NULL);
+ }
+ pthread_join(caller, NULL);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "os"
+ "time"
+)
+
+func init() {
+ register("NeedmDeadlock", NeedmDeadlock)
+}
+
+//export GoNeedM
+func GoNeedM() {
+}
+
+func NeedmDeadlock() {
+ // The failure symptom is that the program hangs because of a
+ // deadlock in needm, so set an alarm.
+ go func() {
+ time.Sleep(5 * time.Second)
+ fmt.Println("Hung for 5 seconds")
+ os.Exit(1)
+ }()
+
+ C.runNeedmSignalThread()
+ fmt.Println("OK")
+}
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index 274ce01c0bf..c037df645b9 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -222,6 +222,9 @@ runtime_m(void)
}
// Set g.
+
+void runtime_setg(G*) __attribute__ ((no_split_stack));
+
void
runtime_setg(G* gp)
{