From a74951c5af5498db5d4be0c14dcaa45fb452e23a Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 4 Sep 2024 16:46:33 +0000 Subject: [PATCH 01/20] [release-branch.go1.23] unique: don't retain uncloned input as key Currently the unique package tries to clone strings that get stored in its internal map to avoid retaining large strings. However, this falls over entirely due to the fact that the original string is *still* stored in the map as a key. Whoops. Fix this by storing the cloned value in the map instead. This change also adds a test which fails without this change. For #69370. Fixes #69383. Change-Id: I1a6bb68ed79b869ea12ab6be061a5ae4b4377ddb Reviewed-on: https://go-review.googlesource.com/c/go/+/610738 Reviewed-by: Michael Pratt Auto-Submit: Michael Knyszek LUCI-TryBot-Result: Go LUCI (cherry picked from commit 21ac23a96f204dfb558a8d3071380c1d105a93ba) Reviewed-on: https://go-review.googlesource.com/c/go/+/612295 Auto-Submit: Tim King --- src/unique/handle.go | 7 ++++--- src/unique/handle_test.go | 22 ++++++++++++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/src/unique/handle.go b/src/unique/handle.go index 96d8fedb0cabe6..abc620f60fe14e 100644 --- a/src/unique/handle.go +++ b/src/unique/handle.go @@ -50,13 +50,13 @@ func Make[T comparable](value T) Handle[T] { toInsert *T // Keep this around to keep it alive. toInsertWeak weak.Pointer[T] ) - newValue := func() weak.Pointer[T] { + newValue := func() (T, weak.Pointer[T]) { if toInsert == nil { toInsert = new(T) *toInsert = clone(value, &m.cloneSeq) toInsertWeak = weak.Make(toInsert) } - return toInsertWeak + return *toInsert, toInsertWeak } var ptr *T for { @@ -64,7 +64,8 @@ func Make[T comparable](value T) Handle[T] { wp, ok := m.Load(value) if !ok { // Try to insert a new value into the map. - wp, _ = m.LoadOrStore(value, newValue()) + k, v := newValue() + wp, _ = m.LoadOrStore(k, v) } // Now that we're sure there's a value in the map, let's // try to get the pointer we need out of it. diff --git a/src/unique/handle_test.go b/src/unique/handle_test.go index b031bbf6852c6b..dd4b01ef79900b 100644 --- a/src/unique/handle_test.go +++ b/src/unique/handle_test.go @@ -9,7 +9,10 @@ import ( "internal/abi" "reflect" "runtime" + "strings" "testing" + "time" + "unsafe" ) // Set up special types. Because the internal maps are sharded by type, @@ -110,3 +113,22 @@ func checkMapsFor[T comparable](t *testing.T, value T) { } t.Errorf("failed to drain internal maps of %v", value) } + +func TestMakeClonesStrings(t *testing.T) { + s := strings.Clone("abcdefghijklmnopqrstuvwxyz") // N.B. Must be big enough to not be tiny-allocated. + ran := make(chan bool) + runtime.SetFinalizer(unsafe.StringData(s), func(_ *byte) { + ran <- true + }) + h := Make(s) + + // Clean up s (hopefully) and run the finalizer. + runtime.GC() + + select { + case <-time.After(1 * time.Second): + t.Fatal("string was improperly retained") + case <-ran: + } + runtime.KeepAlive(h) +} From c8c6f9abfbbd2f949285a527036a3d0dbd991e74 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 6 Sep 2024 12:19:01 -0700 Subject: [PATCH 02/20] [release-branch.go1.23] syscall: on exec failure, close pidfd For #69284 Fixes #69402 Change-Id: I6350209302778ba5e44fa03d0b9e680d2b4ec192 Reviewed-on: https://go-review.googlesource.com/c/go/+/611495 LUCI-TryBot-Result: Go LUCI Reviewed-by: roger peppe Reviewed-by: Tim King Auto-Submit: Ian Lance Taylor Reviewed-by: Dmitri Shuralyov (cherry picked from commit 8926ca9c5ec3ea0b51e413e87f737aeb1422ea48) Reviewed-on: https://go-review.googlesource.com/c/go/+/613616 Reviewed-by: Ian Lance Taylor Auto-Submit: Ian Lance Taylor Reviewed-by: Tobias Klauser --- src/os/pidfd_linux_test.go | 60 +++++++++++++++++++++++++++++++++++++ src/syscall/exec_bsd.go | 5 ++++ src/syscall/exec_freebsd.go | 5 ++++ src/syscall/exec_libc.go | 5 ++++ src/syscall/exec_libc2.go | 5 ++++ src/syscall/exec_linux.go | 8 +++++ src/syscall/exec_unix.go | 4 +++ 7 files changed, 92 insertions(+) diff --git a/src/os/pidfd_linux_test.go b/src/os/pidfd_linux_test.go index fa0877037baadc..c1f41d02d66c73 100644 --- a/src/os/pidfd_linux_test.go +++ b/src/os/pidfd_linux_test.go @@ -9,6 +9,7 @@ import ( "internal/syscall/unix" "internal/testenv" "os" + "os/exec" "syscall" "testing" ) @@ -89,3 +90,62 @@ func TestStartProcessWithPidfd(t *testing.T) { t.Errorf("SendSignal: got %v, want %v", err, syscall.ESRCH) } } + +// Issue #69284 +func TestPidfdLeak(t *testing.T) { + testenv.MustHaveExec(t) + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + + // Find the next 10 descriptors. + // We need to get more than one descriptor in practice; + // the pidfd winds up not being the next descriptor. + const count = 10 + want := make([]int, count) + for i := range count { + var err error + want[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors. + for _, d := range want { + syscall.Close(d) + } + + // Start a process 10 times. + for range 10 { + // For testing purposes this has to be an absolute path. + // Otherwise we will fail finding the executable + // and won't start a process at all. + cmd := exec.Command("/noSuchExecutable") + cmd.Run() + } + + // Open the next 10 descriptors again. + got := make([]int, count) + for i := range count { + var err error + got[i], err = syscall.Open(exe, syscall.O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + } + + // Close the descriptors + for _, d := range got { + syscall.Close(d) + } + + t.Logf("got %v", got) + t.Logf("want %v", want) + + // Allow some slack for runtime epoll descriptors and the like. + if got[count-1] > want[count-1]+5 { + t.Errorf("got descriptor %d, want %d", got[count-1], want[count-1]) + } +} diff --git a/src/syscall/exec_bsd.go b/src/syscall/exec_bsd.go index 149cc2f11c128c..bbdab46de48c03 100644 --- a/src/syscall/exec_bsd.go +++ b/src/syscall/exec_bsd.go @@ -293,3 +293,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_freebsd.go b/src/syscall/exec_freebsd.go index 3226cb88cd999a..686fd23bef435d 100644 --- a/src/syscall/exec_freebsd.go +++ b/src/syscall/exec_freebsd.go @@ -317,3 +317,8 @@ childerror: RawSyscall(SYS_EXIT, 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_libc.go b/src/syscall/exec_libc.go index 768e8c131c1323..0e886508737d1e 100644 --- a/src/syscall/exec_libc.go +++ b/src/syscall/exec_libc.go @@ -314,6 +314,11 @@ childerror: } } +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} + func ioctlPtr(fd, req uintptr, arg unsafe.Pointer) (err Errno) { return ioctl(fd, req, uintptr(arg)) } diff --git a/src/syscall/exec_libc2.go b/src/syscall/exec_libc2.go index 7a6750084486cf..a0579627a300bf 100644 --- a/src/syscall/exec_libc2.go +++ b/src/syscall/exec_libc2.go @@ -289,3 +289,8 @@ childerror: rawSyscall(abi.FuncPCABI0(libc_exit_trampoline), 253, 0, 0) } } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + // Nothing to do. +} diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index e4b9ce1bf47da3..26844121910e8f 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -735,3 +735,11 @@ func writeUidGidMappings(pid int, sys *SysProcAttr) error { return nil } + +// forkAndExecFailureCleanup cleans up after an exec failure. +func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { + if sys.PidFD != nil && *sys.PidFD != -1 { + Close(*sys.PidFD) + *sys.PidFD = -1 + } +} diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go index 1b90aa7e72e0ed..4747fa075834af 100644 --- a/src/syscall/exec_unix.go +++ b/src/syscall/exec_unix.go @@ -237,6 +237,10 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) for err1 == EINTR { _, err1 = Wait4(pid, &wstatus, 0, nil) } + + // OS-specific cleanup on failure. + forkAndExecFailureCleanup(attr, sys) + return 0, err } From fbddfae62f19b5f04555aa593970ac4c6f5a38e5 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Wed, 18 Sep 2024 22:39:05 +0700 Subject: [PATCH 03/20] [release-branch.go1.23] cmd/compile: fix wrong esacpe analysis for rangefunc CL 584596 "-range" suffix to the name of closure generated for a rangefunc loop body. However, this breaks the condition that escape analysis uses for checking whether a closure contains within function, which is "F.funcN" for outer function "F" and closure "funcN". Fixing this by adding new "-rangeN" to the condition. Updates #69434 Fixes #69511 Change-Id: I411de8f63b69a6514a9e9504d49d62e00ce4115d Reviewed-on: https://go-review.googlesource.com/c/go/+/614096 Reviewed-by: David Chase Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI Auto-Submit: Cuong Manh Le Reviewed-on: https://go-review.googlesource.com/c/go/+/614195 --- src/cmd/compile/internal/escape/solve.go | 4 +- test/fixedbugs/issue69434.go | 173 +++++++++++++++++++++++ test/fixedbugs/issue69507.go | 133 +++++++++++++++++ 3 files changed, 308 insertions(+), 2 deletions(-) create mode 100644 test/fixedbugs/issue69434.go create mode 100644 test/fixedbugs/issue69507.go diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go index 2675a16a241fe3..ef17bc48ef2342 100644 --- a/src/cmd/compile/internal/escape/solve.go +++ b/src/cmd/compile/internal/escape/solve.go @@ -318,9 +318,9 @@ func containsClosure(f, c *ir.Func) bool { return false } - // Closures within function Foo are named like "Foo.funcN..." + // Closures within function Foo are named like "Foo.funcN..." or "Foo-rangeN". // TODO(mdempsky): Better way to recognize this. fn := f.Sym().Name cn := c.Sym().Name - return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.' + return len(cn) > len(fn) && cn[:len(fn)] == fn && (cn[len(fn)] == '.' || cn[len(fn)] == '-') } diff --git a/test/fixedbugs/issue69434.go b/test/fixedbugs/issue69434.go new file mode 100644 index 00000000000000..682046601960da --- /dev/null +++ b/test/fixedbugs/issue69434.go @@ -0,0 +1,173 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "fmt" + "io" + "iter" + "math/rand" + "os" + "strings" + "unicode" +) + +// WordReader is the struct that implements io.Reader +type WordReader struct { + scanner *bufio.Scanner +} + +// NewWordReader creates a new WordReader from an io.Reader +func NewWordReader(r io.Reader) *WordReader { + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanWords) + return &WordReader{ + scanner: scanner, + } +} + +// Read reads data from the input stream and returns a single lowercase word at a time +func (wr *WordReader) Read(p []byte) (n int, err error) { + if !wr.scanner.Scan() { + if err := wr.scanner.Err(); err != nil { + return 0, err + } + return 0, io.EOF + } + word := wr.scanner.Text() + cleanedWord := removeNonAlphabetic(word) + if len(cleanedWord) == 0 { + return wr.Read(p) + } + n = copy(p, []byte(cleanedWord)) + return n, nil +} + +// All returns an iterator allowing the caller to iterate over the WordReader using for/range. +func (wr *WordReader) All() iter.Seq[string] { + word := make([]byte, 1024) + return func(yield func(string) bool) { + var err error + var n int + for n, err = wr.Read(word); err == nil; n, err = wr.Read(word) { + if !yield(string(word[:n])) { + return + } + } + if err != io.EOF { + fmt.Fprintf(os.Stderr, "error reading word: %v\n", err) + } + } +} + +// removeNonAlphabetic removes non-alphabetic characters from a word using strings.Map +func removeNonAlphabetic(word string) string { + return strings.Map(func(r rune) rune { + if unicode.IsLetter(r) { + return unicode.ToLower(r) + } + return -1 + }, word) +} + +// ProbabilisticSkipper determines if an item should be retained with probability 1/(1<>= 1 + pr.counter-- + if pr.counter == 0 { + pr.refreshCounter() + } + return remove +} + +// EstimateUniqueWordsIter estimates the number of unique words using a probabilistic counting method +func EstimateUniqueWordsIter(reader io.Reader, memorySize int) int { + wordReader := NewWordReader(reader) + words := make(map[string]struct{}, memorySize) + + rounds := 0 + roundRemover := NewProbabilisticSkipper(1) + wordSkipper := NewProbabilisticSkipper(rounds) + wordSkipper.check(rounds) + + for word := range wordReader.All() { + wordSkipper.check(rounds) + if wordSkipper.ShouldSkip() { + delete(words, word) + } else { + words[word] = struct{}{} + + if len(words) >= memorySize { + rounds++ + + wordSkipper = NewProbabilisticSkipper(rounds) + for w := range words { + if roundRemover.ShouldSkip() { + delete(words, w) + } + } + } + } + wordSkipper.check(rounds) + } + + if len(words) == 0 { + return 0 + } + + invProbability := 1 << rounds + estimatedUniqueWords := len(words) * invProbability + return estimatedUniqueWords +} + +func main() { + input := "Hello, world! This is a test. Hello, world, hello!" + expectedUniqueWords := 6 // "hello", "world", "this", "is", "a", "test" (but "hello" and "world" are repeated) + memorySize := 6 + + reader := strings.NewReader(input) + estimatedUniqueWords := EstimateUniqueWordsIter(reader, memorySize) + if estimatedUniqueWords != expectedUniqueWords { + // ... + } +} diff --git a/test/fixedbugs/issue69507.go b/test/fixedbugs/issue69507.go new file mode 100644 index 00000000000000..fc300c848ee62f --- /dev/null +++ b/test/fixedbugs/issue69507.go @@ -0,0 +1,133 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + err := run() + if err != nil { + panic(err) + } +} + +func run() error { + methods := "AB" + + type node struct { + tag string + choices []string + } + all := []node{ + {"000", permutations(methods)}, + } + + next := 1 + for len(all) > 0 { + cur := all[0] + k := copy(all, all[1:]) + all = all[:k] + + if len(cur.choices) == 1 { + continue + } + + var bestM map[byte][]string + bMax := len(cur.choices) + 1 + bMin := -1 + for sel := range selections(methods) { + m := make(map[byte][]string) + for _, order := range cur.choices { + x := findFirstMatch(order, sel) + m[x] = append(m[x], order) + } + + min := len(cur.choices) + 1 + max := -1 + for _, v := range m { + if len(v) < min { + min = len(v) + } + if len(v) > max { + max = len(v) + } + } + if max < bMax || (max == bMax && min > bMin) { + bestM = m + bMin = min + bMax = max + } + } + + if bMax == len(cur.choices) { + continue + } + + cc := Keys(bestM) + for c := range cc { + choices := bestM[c] + next++ + + switch c { + case 'A': + case 'B': + default: + panic("unexpected selector type " + string(c)) + } + all = append(all, node{"", choices}) + } + } + return nil +} + +func permutations(s string) []string { + if len(s) <= 1 { + return []string{s} + } + + var result []string + for i, char := range s { + rest := s[:i] + s[i+1:] + for _, perm := range permutations(rest) { + result = append(result, string(char)+perm) + } + } + return result +} + +type Seq[V any] func(yield func(V) bool) + +func selections(s string) Seq[string] { + return func(yield func(string) bool) { + for bits := 1; bits < 1< Date: Fri, 6 Sep 2024 17:19:34 -0700 Subject: [PATCH 04/20] [release-branch.go1.23] runtime: if stop/reset races with running timer, return correct result The timer code is careful to ensure that if stop/reset is called while a timer is being run, we cancel the run. However, the code failed to ensure that in that case stop/reset returned true, meaning that the timer had been stopped. In the racing case stop/reset could see that t.when had been set to zero, and return false, even though the timer had not and never would fire. Fix this by tracking whether a timer run is in progress, and using that to reliably detect that the run was cancelled, meaning that stop/reset should return true. For #69312 Fixes #69333 Change-Id: I78e870063eb96650638f12c056e32c931417c84a Reviewed-on: https://go-review.googlesource.com/c/go/+/611496 Reviewed-by: David Chase Reviewed-by: Cuong Manh Le Reviewed-by: Michael Knyszek LUCI-TryBot-Result: Go LUCI Auto-Submit: Ian Lance Taylor (cherry picked from commit 2ebaff4890596ed6064e2dcbbe5e68bc93bed882) Reviewed-on: https://go-review.googlesource.com/c/go/+/616096 Reviewed-by: Ian Lance Taylor Commit-Queue: Ian Lance Taylor Auto-Submit: Ian Lance Taylor --- src/runtime/time.go | 82 +++++++++++++++++++++++++++++++++++++++--- src/time/sleep_test.go | 62 ++++++++++++++++++++++++++++++++ 2 files changed, 139 insertions(+), 5 deletions(-) diff --git a/src/runtime/time.go b/src/runtime/time.go index fc664f49eb8d7c..b43cf9589bf90b 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -26,10 +26,40 @@ type timer struct { // mu protects reads and writes to all fields, with exceptions noted below. mu mutex - astate atomic.Uint8 // atomic copy of state bits at last unlock - state uint8 // state bits - isChan bool // timer has a channel; immutable; can be read without lock - blocked uint32 // number of goroutines blocked on timer's channel + astate atomic.Uint8 // atomic copy of state bits at last unlock + state uint8 // state bits + isChan bool // timer has a channel; immutable; can be read without lock + + // isSending is used to handle races between running a + // channel timer and stopping or resetting the timer. + // It is used only for channel timers (t.isChan == true). + // The lowest zero bit is set when about to send a value on the channel, + // and cleared after sending the value. + // The stop/reset code uses this to detect whether it + // stopped the channel send. + // + // An isSending bit is set only when t.mu is held. + // An isSending bit is cleared only when t.sendLock is held. + // isSending is read only when both t.mu and t.sendLock are held. + // + // Setting and clearing Uint8 bits handles the case of + // a timer that is reset concurrently with unlockAndRun. + // If the reset timer runs immediately, we can wind up with + // concurrent calls to unlockAndRun for the same timer. + // Using matched bit set and clear in unlockAndRun + // ensures that the value doesn't get temporarily out of sync. + // + // We use a uint8 to keep the timer struct small. + // This means that we can only support up to 8 concurrent + // runs of a timer, where a concurrent run can only occur if + // we start a run, unlock the timer, the timer is reset to a new + // value (or the ticker fires again), it is ready to run, + // and it is actually run, all before the first run completes. + // Since completing a run is fast, even 2 concurrent timer runs are + // nearly impossible, so this should be safe in practice. + isSending atomic.Uint8 + + blocked uint32 // number of goroutines blocked on timer's channel // Timer wakes up at when, and then at when+period, ... (period > 0 only) // each time calling f(arg, seq, delay) in the timer goroutine, so f must be @@ -431,6 +461,15 @@ func (t *timer) stop() bool { // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -525,6 +564,15 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in // Stop any future sends with stale values. // See timer.unlockAndRun. t.seq++ + + // If there is currently a send in progress, + // incrementing seq is going to prevent that + // send from actually happening. That means + // that we should return true: the timer was + // stopped, even though t.when may be zero. + if t.isSending.Load() > 0 { + pending = true + } } t.unlock() if !async && t.isChan { @@ -1013,6 +1061,24 @@ func (t *timer) unlockAndRun(now int64) { } t.updateHeap() } + + async := debug.asynctimerchan.Load() != 0 + var isSendingClear uint8 + if !async && t.isChan { + // Tell Stop/Reset that we are sending a value. + // Set the lowest zero bit. + // We do this awkward step because atomic.Uint8 + // doesn't support Add or CompareAndSwap. + // We only set bits with t locked. + v := t.isSending.Load() + i := sys.TrailingZeros8(^v) + if i == 8 { + throw("too many concurrent timer firings") + } + isSendingClear = 1 << i + t.isSending.Or(isSendingClear) + } + t.unlock() if raceenabled { @@ -1028,7 +1094,6 @@ func (t *timer) unlockAndRun(now int64) { ts.unlock() } - async := debug.asynctimerchan.Load() != 0 if !async && t.isChan { // For a timer channel, we want to make sure that no stale sends // happen after a t.stop or t.modify, but we cannot hold t.mu @@ -1044,6 +1109,10 @@ func (t *timer) unlockAndRun(now int64) { // and double-check that t.seq is still the seq value we saw above. // If not, the timer has been updated and we should skip the send. // We skip the send by reassigning f to a no-op function. + // + // The isSending field tells t.stop or t.modify that we have + // started to send the value. That lets them correctly return + // true meaning that no value was sent. lock(&t.sendLock) if t.seq != seq { f = func(any, uintptr, int64) {} @@ -1053,6 +1122,9 @@ func (t *timer) unlockAndRun(now int64) { f(arg, seq, delay) if !async && t.isChan { + // We are no longer sending a value. + t.isSending.And(^isSendingClear) + unlock(&t.sendLock) } diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 29f56ef7520baa..5357ed23c8e352 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -785,6 +785,68 @@ func TestAdjustTimers(t *testing.T) { } } +func TestStopResult(t *testing.T) { + testStopResetResult(t, true) +} + +func TestResetResult(t *testing.T) { + testStopResetResult(t, false) +} + +// Test that when racing between running a timer and stopping a timer Stop +// consistently indicates whether a value can be read from the channel. +// Issue #69312. +func testStopResetResult(t *testing.T, testStop bool) { + for _, name := range []string{"0", "1", "2"} { + t.Run("asynctimerchan="+name, func(t *testing.T) { + testStopResetResultGODEBUG(t, testStop, name) + }) + } +} + +func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { + t.Setenv("GODEBUG", "asynctimerchan="+godebug) + + stopOrReset := func(timer *Timer) bool { + if testStop { + return timer.Stop() + } else { + return timer.Reset(1 * Hour) + } + } + + start := make(chan struct{}) + var wg sync.WaitGroup + const N = 1000 + wg.Add(N) + for range N { + go func() { + defer wg.Done() + <-start + for j := 0; j < 100; j++ { + timer1 := NewTimer(1 * Millisecond) + timer2 := NewTimer(1 * Millisecond) + select { + case <-timer1.C: + if !stopOrReset(timer2) { + // The test fails if this + // channel read times out. + <-timer2.C + } + case <-timer2.C: + if !stopOrReset(timer1) { + // The test fails if this + // channel read times out. + <-timer1.C + } + } + } + }() + } + close(start) + wg.Wait() +} + // Benchmark timer latency when the thread that creates the timer is busy with // other work and the timers must be serviced by other threads. // https://golang.org/issue/38860 From ed07b321aef7632f956ce991dd10fdd7e1abd827 Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Tue, 1 Oct 2024 16:56:29 +0000 Subject: [PATCH 05/20] [release-branch.go1.23] go1.23.2 Change-Id: I904d2e951796dd4142d6e9de4a55af07852bca51 Reviewed-on: https://go-review.googlesource.com/c/go/+/617019 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek Auto-Submit: Gopher Robot Reviewed-by: David Chase --- VERSION | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index c0bacc9e28f531..7c5b4094049322 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23.1 -time 2024-08-29T20:56:24Z +go1.23.2 +time 2024-09-28T01:34:15Z From f8080edefd60c8740915f922cf8a4352e6658174 Mon Sep 17 00:00:00 2001 From: Shulhan Date: Sat, 13 Jul 2024 12:18:04 +0700 Subject: [PATCH 06/20] [release-branch.go1.23] runtime: fix TestGdbAutotmpTypes on gdb version 15 On Arch Linux with gdb version 15.1, the test for TestGdbAutotmpTypes print the following output, ---- ~/src/go/src/runtime $ go test -run=TestGdbAutotmpTypes -v === RUN TestGdbAutotmpTypes === PAUSE TestGdbAutotmpTypes === CONT TestGdbAutotmpTypes runtime-gdb_test.go:78: gdb version 15.1 runtime-gdb_test.go:570: gdb output: Loading Go Runtime support. Target 'exec' cannot support this command. Breakpoint 1 at 0x46e416: file /tmp/TestGdbAutotmpTypes750485513/001/main.go, line 8. This GDB supports auto-downloading debuginfo from the following URLs: Enable debuginfod for this session? (y or [n]) [answered N; input not from terminal] Debuginfod has been disabled. To make this setting permanent, add 'set debuginfod enabled off' to .gdbinit. [New LWP 355373] [New LWP 355374] [New LWP 355375] [New LWP 355376] Thread 1 "a.exe" hit Breakpoint 1, main.main () at /tmp/TestGdbAutotmpTypes750485513/001/main.go:8 8 func main() { 9 var iface interface{} = map[string]astruct{} All types matching regular expression "astruct": File runtime: []main.astruct bucket hash main.astruct typedef hash * map[string]main.astruct; typedef noalg.[8]main.astruct noalg.[8]main.astruct; noalg.map.bucket[string]main.astruct runtime-gdb_test.go:587: could not find []main.astruct; in 'info typrs astruct' output !!! FAIL exit status 1 FAIL runtime 0.273s $ ---- In the back trace for "File runtime", each output lines does not end with ";" anymore, while in test we check the string with it. While at it, print the expected string with "%q" instead of "%s" for better error message. For #67089 Fixes #69746 Change-Id: If6019ee68c0d8e495c920f98568741462c7d0fd0 Reviewed-on: https://go-review.googlesource.com/c/go/+/598135 Reviewed-by: David Chase Reviewed-by: Meng Zhuo LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt (cherry picked from commit ff695ca2e3ea37dcb688d470e86ed64849c61f2e) Reviewed-on: https://go-review.googlesource.com/c/go/+/617455 Reviewed-by: Michael Knyszek Auto-Submit: Michael Knyszek --- src/runtime/runtime-gdb_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index 5defe2f615eaa4..14561330bbf281 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -575,15 +575,15 @@ func TestGdbAutotmpTypes(t *testing.T) { // Check that the backtrace matches the source code. types := []string{ - "[]main.astruct;", - "bucket;", - "hash;", - "main.astruct;", - "hash * map[string]main.astruct;", + "[]main.astruct", + "bucket", + "hash", + "main.astruct", + "hash * map[string]main.astruct", } for _, name := range types { if !strings.Contains(sgot, name) { - t.Fatalf("could not find %s in 'info typrs astruct' output", name) + t.Fatalf("could not find %q in 'info typrs astruct' output", name) } } } From 9563300f6e262589ae25c71d778bfcd646d4a750 Mon Sep 17 00:00:00 2001 From: cions Date: Tue, 24 Sep 2024 01:27:40 +0000 Subject: [PATCH 07/20] [release-branch.go1.23] os: ignore SIGSYS in checkPidfd In Android version 11 and earlier, pidfd-related system calls are not allowed by the seccomp policy, which causes crashes due to SIGSYS signals. For #69065 Fixes #69640 Change-Id: Ib29631639a5cf221ac11b4d82390cb79436b8657 GitHub-Last-Rev: aad6b3b32c81795f86bc4a9e81aad94899daf520 GitHub-Pull-Request: golang/go#69543 Reviewed-on: https://go-review.googlesource.com/c/go/+/614277 Auto-Submit: Ian Lance Taylor LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Ian Lance Taylor (cherry picked from commit a3a05ed04cb53c53bdacded2d16f0f3e5facdbb0) Reviewed-on: https://go-review.googlesource.com/c/go/+/616077 Reviewed-by: Michael Knyszek Reviewed-by: Kirill Kolyshkin Reviewed-by: Dmitri Shuralyov Reviewed-by: Mauri de Souza Meneguzzo Auto-Submit: Michael Knyszek --- src/os/pidfd_linux.go | 16 ++++++++++++++++ src/runtime/os_linux.go | 13 +++++++++++-- src/runtime/os_unix_nonlinux.go | 7 +++++++ src/runtime/signal_unix.go | 17 +++++++++++++++++ 4 files changed, 51 insertions(+), 2 deletions(-) diff --git a/src/os/pidfd_linux.go b/src/os/pidfd_linux.go index 545cfe9613b8b4..01a98ca17c286f 100644 --- a/src/os/pidfd_linux.go +++ b/src/os/pidfd_linux.go @@ -14,6 +14,7 @@ package os import ( "errors" "internal/syscall/unix" + "runtime" "sync" "syscall" "unsafe" @@ -147,6 +148,13 @@ var checkPidfdOnce = sync.OnceValue(checkPidfd) // execution environment in which the above system calls are restricted by // seccomp or a similar technology. func checkPidfd() error { + // In Android version < 12, pidfd-related system calls are not allowed + // by seccomp and trigger the SIGSYS signal. See issue #69065. + if runtime.GOOS == "android" { + ignoreSIGSYS() + defer restoreSIGSYS() + } + // Get a pidfd of the current process (opening of "/proc/self" won't // work for waitid). fd, err := unix.PidFDOpen(syscall.Getpid(), 0) @@ -174,3 +182,11 @@ func checkPidfd() error { return nil } + +// Provided by runtime. +// +//go:linkname ignoreSIGSYS +func ignoreSIGSYS() + +//go:linkname restoreSIGSYS +func restoreSIGSYS() diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go index 6ce656c70e146e..e80d390e0d09f2 100644 --- a/src/runtime/os_linux.go +++ b/src/runtime/os_linux.go @@ -879,8 +879,9 @@ func runPerThreadSyscall() { } const ( - _SI_USER = 0 - _SI_TKILL = -6 + _SI_USER = 0 + _SI_TKILL = -6 + _SYS_SECCOMP = 1 ) // sigFromUser reports whether the signal was sent because of a call @@ -892,6 +893,14 @@ func (c *sigctxt) sigFromUser() bool { return code == _SI_USER || code == _SI_TKILL } +// sigFromSeccomp reports whether the signal was sent from seccomp. +// +//go:nosplit +func (c *sigctxt) sigFromSeccomp() bool { + code := int32(c.sigcode()) + return code == _SYS_SECCOMP +} + //go:nosplit func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) { r, _, err := syscall.Syscall6(syscall.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0) diff --git a/src/runtime/os_unix_nonlinux.go b/src/runtime/os_unix_nonlinux.go index b98753b8fe12b7..0e8b61c3b11aa2 100644 --- a/src/runtime/os_unix_nonlinux.go +++ b/src/runtime/os_unix_nonlinux.go @@ -13,3 +13,10 @@ package runtime func (c *sigctxt) sigFromUser() bool { return c.sigcode() == _SI_USER } + +// sigFromSeccomp reports whether the signal was sent from seccomp. +// +//go:nosplit +func (c *sigctxt) sigFromSeccomp() bool { + return false +} diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index 8ba498bdb238d5..6f40f440e807f8 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -605,6 +605,19 @@ var crashing atomic.Int32 var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool var testSigusr1 func(gp *g) bool +// sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065. +var sigsysIgnored uint32 + +//go:linkname ignoreSIGSYS os.ignoreSIGSYS +func ignoreSIGSYS() { + atomic.Store(&sigsysIgnored, 1) +} + +//go:linkname restoreSIGSYS os.restoreSIGSYS +func restoreSIGSYS() { + atomic.Store(&sigsysIgnored, 0) +} + // sighandler is invoked when a signal occurs. The global g will be // set to a gsignal goroutine and we will be running on the alternate // signal stack. The parameter gp will be the value of the global g @@ -715,6 +728,10 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { return } + if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 { + return + } + if flags&_SigKill != 0 { dieFromSignal(sig) } From cc16cdf48f228caebc55c982ed5b1b187ff39fcc Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 2 Oct 2024 13:38:25 -0700 Subject: [PATCH 08/20] [release-branch.go1.23] runtime: clear isSending bit earlier I've done some more testing of the new isSending field. I'm not able to get more than 2 bits set. That said, with this change it's significantly less likely to have even 2 bits set. The idea here is to clear the bit before possibly locking the channel we are sending the value on, thus avoiding some delay and some serialization. For #69312 For #69333 Change-Id: I8b5f167f162bbcbcbf7ea47305967f349b62b0f4 Reviewed-on: https://go-review.googlesource.com/c/go/+/617596 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek Reviewed-by: Ian Lance Taylor Commit-Queue: Ian Lance Taylor Auto-Submit: Ian Lance Taylor --- src/runtime/time.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/runtime/time.go b/src/runtime/time.go index b43cf9589bf90b..7abd15ee862608 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -1114,6 +1114,11 @@ func (t *timer) unlockAndRun(now int64) { // started to send the value. That lets them correctly return // true meaning that no value was sent. lock(&t.sendLock) + + // We are committed to possibly sending a value based on seq, + // so no need to keep telling stop/modify that we are sending. + t.isSending.And(^isSendingClear) + if t.seq != seq { f = func(any, uintptr, int64) {} } @@ -1122,9 +1127,6 @@ func (t *timer) unlockAndRun(now int64) { f(arg, seq, delay) if !async && t.isChan { - // We are no longer sending a value. - t.isSending.And(^isSendingClear) - unlock(&t.sendLock) } From 7fc83126731de12449f7b38c32e2e318c439a6d4 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Tue, 11 Jun 2024 16:34:38 -0400 Subject: [PATCH 09/20] [release-branch.go1.23] os: add clone(CLONE_PIDFD) check to pidfd feature check clone(CLONE_PIDFD) was added in Linux 5.2 and pidfd_open was added in Linux 5.3. Thus our feature check for pidfd_open should be sufficient to ensure that clone(CLONE_PIDFD) works. Unfortuantely, some alternative Linux implementations may not follow this strict ordering. For example, QEMU 7.2 (Dec 2022) added pidfd_open, but clone(CLONE_PIDFD) was only added in QEMU 8.0 (Apr 2023). Debian bookworm provides QEMU 7.2 by default. For #68976. Fixes #69259. Change-Id: Ie3f3dc51f0cd76944871bf98690abf59f68fd7bf Reviewed-on: https://go-review.googlesource.com/c/go/+/592078 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui (cherry picked from commit 7a5fc9b34deb8d9fe22c9d060a5839827344fcc2) Reviewed-on: https://go-review.googlesource.com/c/go/+/612218 --- src/os/pidfd_linux.go | 24 ++++++++++-- src/syscall/exec_linux.go | 81 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 3 deletions(-) diff --git a/src/os/pidfd_linux.go b/src/os/pidfd_linux.go index 01a98ca17c286f..0bfef7759cc679 100644 --- a/src/os/pidfd_linux.go +++ b/src/os/pidfd_linux.go @@ -8,6 +8,10 @@ // v5.3: pidfd_open syscall, clone3 syscall; // v5.4: P_PIDFD idtype support for waitid syscall; // v5.6: pidfd_getfd syscall. +// +// N.B. Alternative Linux implementations may not follow this ordering. e.g., +// QEMU user mode 7.2 added pidfd_open, but CLONE_PIDFD was not added until +// 8.0. package os @@ -140,9 +144,9 @@ func pidfdWorks() bool { var checkPidfdOnce = sync.OnceValue(checkPidfd) -// checkPidfd checks whether all required pidfd-related syscalls work. -// This consists of pidfd_open and pidfd_send_signal syscalls, and waitid -// syscall with idtype of P_PIDFD. +// checkPidfd checks whether all required pidfd-related syscalls work. This +// consists of pidfd_open and pidfd_send_signal syscalls, waitid syscall with +// idtype of P_PIDFD, and clone(CLONE_PIDFD). // // Reasons for non-working pidfd syscalls include an older kernel and an // execution environment in which the above system calls are restricted by @@ -180,9 +184,23 @@ func checkPidfd() error { return NewSyscallError("pidfd_send_signal", err) } + // Verify that clone(CLONE_PIDFD) works. + // + // This shouldn't be necessary since pidfd_open was added in Linux 5.3, + // after CLONE_PIDFD in Linux 5.2, but some alternative Linux + // implementations may not adhere to this ordering. + if err := checkClonePidfd(); err != nil { + return err + } + return nil } +// Provided by syscall. +// +//go:linkname checkClonePidfd +func checkClonePidfd() error + // Provided by runtime. // //go:linkname ignoreSIGSYS diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index 26844121910e8f..3e15676fcb3d5f 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -7,6 +7,7 @@ package syscall import ( + errpkg "errors" "internal/itoa" "runtime" "unsafe" @@ -328,6 +329,7 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att if clone3 != nil { pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3), 0) } else { + // N.B. Keep in sync with doCheckClonePidfd. flags |= uintptr(SIGCHLD) if runtime.GOARCH == "s390x" { // On Linux/s390, the first two arguments of clone(2) are swapped. @@ -743,3 +745,82 @@ func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) { *sys.PidFD = -1 } } + +// checkClonePidfd verifies that clone(CLONE_PIDFD) works by actually doing a +// clone. +// +//go:linkname os_checkClonePidfd os.checkClonePidfd +func os_checkClonePidfd() error { + pidfd := int32(-1) + pid, errno := doCheckClonePidfd(&pidfd) + if errno != 0 { + return errno + } + + if pidfd == -1 { + // Bad: CLONE_PIDFD failed to provide a pidfd. Reap the process + // before returning. + + var err error + for { + var status WaitStatus + _, err = Wait4(int(pid), &status, 0, nil) + if err != EINTR { + break + } + } + if err != nil { + return err + } + + return errpkg.New("clone(CLONE_PIDFD) failed to return pidfd") + } + + // Good: CLONE_PIDFD provided a pidfd. Reap the process and close the + // pidfd. + defer Close(int(pidfd)) + + for { + const _P_PIDFD = 3 + _, _, errno = Syscall6(SYS_WAITID, _P_PIDFD, uintptr(pidfd), 0, WEXITED, 0, 0) + if errno != EINTR { + break + } + } + if errno != 0 { + return errno + } + + return nil +} + +// doCheckClonePidfd implements the actual clone call of os_checkClonePidfd and +// child execution. This is a separate function so we can separate the child's +// and parent's stack frames if we're using vfork. +// +// This is go:noinline because the point is to keep the stack frames of this +// and os_checkClonePidfd separate. +// +//go:noinline +func doCheckClonePidfd(pidfd *int32) (pid uintptr, errno Errno) { + flags := uintptr(CLONE_VFORK|CLONE_VM|CLONE_PIDFD|SIGCHLD) + if runtime.GOARCH == "s390x" { + // On Linux/s390, the first two arguments of clone(2) are swapped. + pid, errno = rawVforkSyscall(SYS_CLONE, 0, flags, uintptr(unsafe.Pointer(pidfd))) + } else { + pid, errno = rawVforkSyscall(SYS_CLONE, flags, 0, uintptr(unsafe.Pointer(pidfd))) + } + if errno != 0 || pid != 0 { + // If we're in the parent, we must return immediately + // so we're not in the same stack frame as the child. + // This can at most use the return PC, which the child + // will not modify, and the results of + // rawVforkSyscall, which must have been written after + // the child was replaced. + return + } + + for { + RawSyscall(SYS_EXIT, 0, 0, 0) + } +} From 6495ce0495041ba28fdbad8ae8b0e0996481e6f4 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Wed, 2 Oct 2024 17:20:12 -0400 Subject: [PATCH 10/20] [release-branch.go1.23] syscall: use SYS_EXIT_GROUP in CLONE_PIDFD feature check child Inside Google we have seen issues with QEMU user mode failing to wake a parent waitid when this child exits with SYS_EXIT. This bug appears to not affect SYS_EXIT_GROUP. It is currently unclear if this is a general QEMU or specific to Google's configuration, but SYS_EXIT and SYS_EXIT_GROUP are semantically equivalent here, so we can use the latter here in case this is a general QEMU bug. For #68976. For #69259. Change-Id: I34e51088c9a6b7493a060e2a719a3cc4a3d54aa0 Reviewed-on: https://go-review.googlesource.com/c/go/+/617417 Reviewed-by: Ian Lance Taylor LUCI-TryBot-Result: Go LUCI (cherry picked from commit 47a99359206f0dd41228deda0aa31f1e769cc156) Reviewed-on: https://go-review.googlesource.com/c/go/+/617716 --- src/syscall/exec_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index 3e15676fcb3d5f..dfd9a8368a9e50 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -821,6 +821,6 @@ func doCheckClonePidfd(pidfd *int32) (pid uintptr, errno Errno) { } for { - RawSyscall(SYS_EXIT, 0, 0, 0) + RawSyscall(SYS_EXIT_GROUP, 0, 0, 0) } } From 35c010ad6db5113f51e1867ab3d0108754a3264c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Geisend=C3=B6rfer?= Date: Fri, 30 Aug 2024 08:17:19 +0200 Subject: [PATCH 11/20] [release-branch.go1.23] runtime: fix GoroutineProfile stacks not getting null terminated Fix a regression introduced in CL 572396 causing goroutine stacks not getting null terminated. This bug impacts callers that reuse the []StackRecord slice for multiple calls to GoroutineProfile. See https://github.com/felixge/fgprof/issues/33 for an example of the problem. Add a test case to prevent similar regressions in the future. Use null padding instead of null termination to be consistent with other profile types and because it's less code to implement. Also fix the ThreadCreateProfile code path. Fixes #69258 Change-Id: I0b9414f6c694c304bc03a5682586f619e9bf0588 Reviewed-on: https://go-review.googlesource.com/c/go/+/609815 Reviewed-by: Tim King LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt (cherry picked from commit 49e542aa85b7c2d9f6cf50de00843b455bc1e635) Reviewed-on: https://go-review.googlesource.com/c/go/+/621277 Reviewed-by: Cherry Mui --- src/runtime/mprof.go | 6 ++- src/runtime/pprof/pprof_test.go | 92 +++++++++++++++++++++++++++++---- 2 files changed, 86 insertions(+), 12 deletions(-) diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 006274757e66f1..82b7fa68aecbde 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1270,7 +1270,8 @@ func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok // of calling ThreadCreateProfile directly. func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) { - copy(p[0].Stack0[:], r.Stack) + i := copy(p[0].Stack0[:], r.Stack) + clear(p[0].Stack0[i:]) p = p[1:] }) } @@ -1649,7 +1650,8 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { return } for i, mr := range records[0:n] { - copy(p[i].Stack0[:], mr.Stack) + l := copy(p[i].Stack0[:], mr.Stack) + clear(p[i].Stack0[l:]) } return } diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 30ef50b1c0fa7a..d16acf54dabd98 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -2441,16 +2441,7 @@ func TestTimeVDSO(t *testing.T) { } func TestProfilerStackDepth(t *testing.T) { - // Disable sampling, otherwise it's difficult to assert anything. - oldMemRate := runtime.MemProfileRate - runtime.MemProfileRate = 1 - runtime.SetBlockProfileRate(1) - oldMutexRate := runtime.SetMutexProfileFraction(1) - t.Cleanup(func() { - runtime.MemProfileRate = oldMemRate - runtime.SetBlockProfileRate(0) - runtime.SetMutexProfileFraction(oldMutexRate) - }) + t.Cleanup(disableSampling()) const depth = 128 go produceProfileEvents(t, depth) @@ -2742,3 +2733,84 @@ runtime/pprof.inlineA`, }) } } + +func TestProfileRecordNullPadding(t *testing.T) { + // Produce events for the different profile types. + t.Cleanup(disableSampling()) + memSink = make([]byte, 1) // MemProfile + <-time.After(time.Millisecond) // BlockProfile + blockMutex(t) // MutexProfile + runtime.GC() + + // Test that all profile records are null padded. + testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile) + testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile) + testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, true) + }) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, false) + }) + // Not testing ThreadCreateProfile because it is broken, see issue 6104. +} + +func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) { + stack0 := func(sr *T) *[32]uintptr { + switch t := any(sr).(type) { + case *runtime.StackRecord: + return &t.Stack0 + case *runtime.MemProfileRecord: + return &t.Stack0 + case *runtime.BlockProfileRecord: + return &t.Stack0 + default: + panic(fmt.Sprintf("unexpected type %T", sr)) + } + } + + t.Run(name, func(t *testing.T) { + var p []T + for { + n, ok := fn(p) + if ok { + p = p[:n] + break + } + p = make([]T, n*2) + for i := range p { + s0 := stack0(&p[i]) + for j := range s0 { + // Poison the Stack0 array to identify lack of zero padding + s0[j] = ^uintptr(0) + } + } + } + + if len(p) == 0 { + t.Fatal("no records found") + } + + for _, sr := range p { + for i, v := range stack0(&sr) { + if v == ^uintptr(0) { + t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr) + } + } + } + }) +} + +// disableSampling configures the profilers to capture all events, otherwise +// it's difficult to assert anything. +func disableSampling() func() { + oldMemRate := runtime.MemProfileRate + runtime.MemProfileRate = 1 + runtime.SetBlockProfileRate(1) + oldMutexRate := runtime.SetMutexProfileFraction(1) + return func() { + runtime.MemProfileRate = oldMemRate + runtime.SetBlockProfileRate(0) + runtime.SetMutexProfileFraction(oldMutexRate) + } +} From 8d79bf799b46875b52bef6a47f89b73ead824160 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Mon, 14 Oct 2024 11:46:17 -0700 Subject: [PATCH 12/20] [release-branch.go1.23] runtime: don't frob isSending for tickers The Ticker Stop and Reset methods don't report a value, so we don't need to track whether they are interrupting a send. This includes a test that used to fail about 2% of the time on my laptop when run under x/tools/cmd/stress. For #69880 Fixes #69882 Change-Id: Ic6d14b344594149dd3c24b37bbe4e42e83f9a9ad Reviewed-on: https://go-review.googlesource.com/c/go/+/620136 LUCI-TryBot-Result: Go LUCI Reviewed-by: Ian Lance Taylor Auto-Submit: Michael Knyszek Auto-Submit: Ian Lance Taylor Reviewed-by: Michael Knyszek (cherry picked from commit 48849e0866f64a40d04a9151e44e5a73acdfc17b) Reviewed-on: https://go-review.googlesource.com/c/go/+/620137 Reviewed-by: Dmitri Shuralyov --- src/runtime/time.go | 17 +++++++++++------ src/time/sleep_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/src/runtime/time.go b/src/runtime/time.go index 7abd15ee862608..19b4ac99010529 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -33,6 +33,7 @@ type timer struct { // isSending is used to handle races between running a // channel timer and stopping or resetting the timer. // It is used only for channel timers (t.isChan == true). + // It is not used for tickers. // The lowest zero bit is set when about to send a value on the channel, // and cleared after sending the value. // The stop/reset code uses this to detect whether it @@ -467,7 +468,7 @@ func (t *timer) stop() bool { // send from actually happening. That means // that we should return true: the timer was // stopped, even though t.when may be zero. - if t.isSending.Load() > 0 { + if t.period == 0 && t.isSending.Load() > 0 { pending = true } } @@ -529,6 +530,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in t.maybeRunAsync() } t.trace("modify") + oldPeriod := t.period t.period = period if f != nil { t.f = f @@ -570,7 +572,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in // send from actually happening. That means // that we should return true: the timer was // stopped, even though t.when may be zero. - if t.isSending.Load() > 0 { + if oldPeriod == 0 && t.isSending.Load() > 0 { pending = true } } @@ -1064,7 +1066,7 @@ func (t *timer) unlockAndRun(now int64) { async := debug.asynctimerchan.Load() != 0 var isSendingClear uint8 - if !async && t.isChan { + if !async && t.isChan && t.period == 0 { // Tell Stop/Reset that we are sending a value. // Set the lowest zero bit. // We do this awkward step because atomic.Uint8 @@ -1115,9 +1117,12 @@ func (t *timer) unlockAndRun(now int64) { // true meaning that no value was sent. lock(&t.sendLock) - // We are committed to possibly sending a value based on seq, - // so no need to keep telling stop/modify that we are sending. - t.isSending.And(^isSendingClear) + if t.period == 0 { + // We are committed to possibly sending a value + // based on seq, so no need to keep telling + // stop/modify that we are sending. + t.isSending.And(^isSendingClear) + } if t.seq != seq { f = func(any, uintptr, int64) {} diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 5357ed23c8e352..520ff957d09fc1 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -847,6 +847,31 @@ func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { wg.Wait() } +// Test having a large number of goroutines wake up a timer simultaneously. +// This used to trigger a crash when run under x/tools/cmd/stress. +func TestMultiWakeup(t *testing.T) { + if testing.Short() { + t.Skip("-short") + } + + goroutines := runtime.GOMAXPROCS(0) + timer := NewTicker(Microsecond) + var wg sync.WaitGroup + wg.Add(goroutines) + for range goroutines { + go func() { + defer wg.Done() + for range 100000 { + select { + case <-timer.C: + case <-After(Millisecond): + } + } + }() + } + wg.Wait() +} + // Benchmark timer latency when the thread that creates the timer is busy with // other work and the timers must be serviced by other threads. // https://golang.org/issue/38860 From 58babf6e0bf58cd81bb5a71744a1c195fba2d6c8 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Mon, 21 Oct 2024 17:34:22 +0000 Subject: [PATCH 13/20] [release-branch.go1.23] runtime,time: use atomic.Int32 for isSending This change switches isSending to be an atomic.Int32 instead of an atomic.Uint8. The Int32 version is managed as a counter, which is something that we couldn't do with Uint8 without adding a new intrinsic which may not be available on all architectures. That is, instead of only being able to support 8 concurrent timer firings on the same timer because we only have 8 independent bits to set for each concurrent timer firing, we can now have 2^31-1 concurrent timer firings before running into any issues. Like the fact that each bit-set was matched with a clear, here we match increments with decrements to indicate that we're in the "sending on a channel" critical section in the timer code, so we can report the correct result back on Stop or Reset. We choose an Int32 instead of a Uint32 because it's easier to check for obviously bad values (negative values are always bad) and 2^31-1 concurrent timer firings should be enough for anyone. Previously, we avoided anything bigger than a Uint8 because we could pack it into some padding in the runtime.timer struct. But it turns out that the type that actually matters, runtime.timeTimer, is exactly 96 bytes in size. This means its in the next size class up in the 112 byte size class because of an allocation header. We thus have some free space to work with. This change increases the size of this struct from 96 bytes to 104 bytes. (I'm not sure if runtime.timer is often allocated directly, but if it is, we get lucky in the same way too. It's exactly 80 bytes in size, which means its in the 96-byte size class, leaving us with some space to work with.) Fixes #69978 For #69969. Related to #69880 and #69312 and #69882. Change-Id: I9fd59cb6a69365c62971d1f225490a65c58f3e77 Cq-Include-Trybots: luci.golang.try:go1.23-linux-amd64-longtest Reviewed-on: https://go-review.googlesource.com/c/go/+/621616 Reviewed-by: Ian Lance Taylor Auto-Submit: Michael Knyszek LUCI-TryBot-Result: Go LUCI (cherry picked from commit 6a49f81edc7aa8aa12e26a1a0ed8819a3e5c7b5e) Reviewed-on: https://go-review.googlesource.com/c/go/+/621856 Auto-Submit: Ian Lance Taylor Reviewed-by: Michael Pratt --- src/runtime/time.go | 59 +++++++++++++----------------------------- src/time/sleep_test.go | 30 +++++++++++++++++++-- 2 files changed, 46 insertions(+), 43 deletions(-) diff --git a/src/runtime/time.go b/src/runtime/time.go index 19b4ac99010529..7b344a349610d3 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -30,36 +30,6 @@ type timer struct { state uint8 // state bits isChan bool // timer has a channel; immutable; can be read without lock - // isSending is used to handle races between running a - // channel timer and stopping or resetting the timer. - // It is used only for channel timers (t.isChan == true). - // It is not used for tickers. - // The lowest zero bit is set when about to send a value on the channel, - // and cleared after sending the value. - // The stop/reset code uses this to detect whether it - // stopped the channel send. - // - // An isSending bit is set only when t.mu is held. - // An isSending bit is cleared only when t.sendLock is held. - // isSending is read only when both t.mu and t.sendLock are held. - // - // Setting and clearing Uint8 bits handles the case of - // a timer that is reset concurrently with unlockAndRun. - // If the reset timer runs immediately, we can wind up with - // concurrent calls to unlockAndRun for the same timer. - // Using matched bit set and clear in unlockAndRun - // ensures that the value doesn't get temporarily out of sync. - // - // We use a uint8 to keep the timer struct small. - // This means that we can only support up to 8 concurrent - // runs of a timer, where a concurrent run can only occur if - // we start a run, unlock the timer, the timer is reset to a new - // value (or the ticker fires again), it is ready to run, - // and it is actually run, all before the first run completes. - // Since completing a run is fast, even 2 concurrent timer runs are - // nearly impossible, so this should be safe in practice. - isSending atomic.Uint8 - blocked uint32 // number of goroutines blocked on timer's channel // Timer wakes up at when, and then at when+period, ... (period > 0 only) @@ -99,6 +69,20 @@ type timer struct { // sendLock protects sends on the timer's channel. // Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0. sendLock mutex + + // isSending is used to handle races between running a + // channel timer and stopping or resetting the timer. + // It is used only for channel timers (t.isChan == true). + // It is not used for tickers. + // The value is incremented when about to send a value on the channel, + // and decremented after sending the value. + // The stop/reset code uses this to detect whether it + // stopped the channel send. + // + // isSending is incremented only when t.mu is held. + // isSending is decremented only when t.sendLock is held. + // isSending is read only when both t.mu and t.sendLock are held. + isSending atomic.Int32 } // init initializes a newly allocated timer t. @@ -1065,20 +1049,11 @@ func (t *timer) unlockAndRun(now int64) { } async := debug.asynctimerchan.Load() != 0 - var isSendingClear uint8 if !async && t.isChan && t.period == 0 { // Tell Stop/Reset that we are sending a value. - // Set the lowest zero bit. - // We do this awkward step because atomic.Uint8 - // doesn't support Add or CompareAndSwap. - // We only set bits with t locked. - v := t.isSending.Load() - i := sys.TrailingZeros8(^v) - if i == 8 { + if t.isSending.Add(1) < 0 { throw("too many concurrent timer firings") } - isSendingClear = 1 << i - t.isSending.Or(isSendingClear) } t.unlock() @@ -1121,7 +1096,9 @@ func (t *timer) unlockAndRun(now int64) { // We are committed to possibly sending a value // based on seq, so no need to keep telling // stop/modify that we are sending. - t.isSending.And(^isSendingClear) + if t.isSending.Add(-1) < 0 { + throw("mismatched isSending updates") + } } if t.seq != seq { diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 520ff957d09fc1..285a2e748c4af7 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -847,9 +847,9 @@ func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) { wg.Wait() } -// Test having a large number of goroutines wake up a timer simultaneously. +// Test having a large number of goroutines wake up a ticker simultaneously. // This used to trigger a crash when run under x/tools/cmd/stress. -func TestMultiWakeup(t *testing.T) { +func TestMultiWakeupTicker(t *testing.T) { if testing.Short() { t.Skip("-short") } @@ -872,6 +872,32 @@ func TestMultiWakeup(t *testing.T) { wg.Wait() } +// Test having a large number of goroutines wake up a timer simultaneously. +// This used to trigger a crash when run under x/tools/cmd/stress. +func TestMultiWakeupTimer(t *testing.T) { + if testing.Short() { + t.Skip("-short") + } + + goroutines := runtime.GOMAXPROCS(0) + timer := NewTimer(Nanosecond) + var wg sync.WaitGroup + wg.Add(goroutines) + for range goroutines { + go func() { + defer wg.Done() + for range 10000 { + select { + case <-timer.C: + default: + } + timer.Reset(Nanosecond) + } + }() + } + wg.Wait() +} + // Benchmark timer latency when the thread that creates the timer is busy with // other work and the timers must be serviced by other threads. // https://golang.org/issue/38860 From cfe0ae0b7070048ceda021988b01fbc6a8589a1b Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Wed, 23 Oct 2024 16:28:52 +0000 Subject: [PATCH 14/20] [release-branch.go1.23] runtime: uphold goroutine profile invariants in coroswitch Goroutine profiles require checking in with the profiler before any goroutine starts running. coroswitch is a place where a goroutine may start running, but where we do not check in with the profiler, which leads to crashes. Fix this by checking in with the profiler the same way execute does. For #69998. Fixes #70001. Change-Id: Idef6dd31b70a73dd1c967b56c307c7a46a26ba73 Reviewed-on: https://go-review.googlesource.com/c/go/+/622016 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI (cherry picked from commit 2a98a1849f059ffa94ab23a1ab7d8fa0fd0b48dd) Reviewed-on: https://go-review.googlesource.com/c/go/+/622375 Reviewed-by: Michael Pratt --- src/runtime/coro.go | 12 +++++++++ src/runtime/pprof/pprof_test.go | 45 +++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/src/runtime/coro.go b/src/runtime/coro.go index 30ada455e4985e..d93817f92f1caa 100644 --- a/src/runtime/coro.go +++ b/src/runtime/coro.go @@ -208,6 +208,18 @@ func coroswitch_m(gp *g) { // directly if possible. setGNoWB(&mp.curg, gnext) setMNoWB(&gnext.m, mp) + + // Synchronize with any out-standing goroutine profile. We're about to start + // executing, and an invariant of the profiler is that we tryRecordGoroutineProfile + // whenever a goroutine is about to start running. + // + // N.B. We must do this before transitioning to _Grunning but after installing gnext + // in curg, so that we have a valid curg for allocation (tryRecordGoroutineProfile + // may allocate). + if goroutineProfile.active { + tryRecordGoroutineProfile(gnext, nil, osyield) + } + if !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) { // The CAS failed: use casgstatus, which will take care of // coordinating with the garbage collector about the state change. diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index d16acf54dabd98..41952ff147c4b7 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -15,6 +15,7 @@ import ( "internal/syscall/unix" "internal/testenv" "io" + "iter" "math" "math/big" "os" @@ -1754,6 +1755,50 @@ func TestGoroutineProfileConcurrency(t *testing.T) { } } +// Regression test for #69998. +func TestGoroutineProfileCoro(t *testing.T) { + testenv.MustHaveParallelism(t) + + goroutineProf := Lookup("goroutine") + + // Set up a goroutine to just create and run coroutine goroutines all day. + iterFunc := func() { + p, stop := iter.Pull2( + func(yield func(int, int) bool) { + for i := 0; i < 10000; i++ { + if !yield(i, i) { + return + } + } + }, + ) + defer stop() + for { + _, _, ok := p() + if !ok { + break + } + } + } + var wg sync.WaitGroup + done := make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + iterFunc() + select { + case <-done: + default: + } + } + }() + + // Take a goroutine profile. If the bug in #69998 is present, this will crash + // with high probability. We don't care about the output for this bug. + goroutineProf.WriteTo(io.Discard, 1) +} + func BenchmarkGoroutine(b *testing.B) { withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) { return func(b *testing.B) { From 5472853843bd9ae72ddc107a556558d13e39b035 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 7 Oct 2024 11:11:06 -0400 Subject: [PATCH 15/20] [release-branch.go1.23] cmd/link: generate Mach-O UUID when -B flag is specified Currently, on Mach-O, the Go linker doesn't generate LC_UUID in internal linking mode. This causes some macOS system tools unable to track the binary, as well as in some cases the binary unable to access local network on macOS 15. This CL makes the linker start generate LC_UUID. Currently, the UUID is generated if the -B flag is specified. And we'll make it generate UUID by default in a later CL. The -B flag is currently for generating GNU build ID on ELF, which is a similar concept to Mach-O's UUID. Instead of introducing another flag, we just use the same flag and the same setting. Specifically, "-B gobuildid" will generate a UUID based on the Go build ID. Updates #68678. Fixes #69992. Cq-Include-Trybots: luci.golang.try:go1.23-darwin-amd64_14,go1.23-darwin-arm64_13 Change-Id: I90089a78ba144110bf06c1c6836daf2d737ff10a Reviewed-on: https://go-review.googlesource.com/c/go/+/618595 Reviewed-by: Michael Knyszek Reviewed-by: Ingo Oeser Reviewed-by: Than McIntosh LUCI-TryBot-Result: Go LUCI (cherry picked from commit 20ed60311848ca40e51cb430fa602dd83a9c726f) Reviewed-on: https://go-review.googlesource.com/c/go/+/622595 Reviewed-by: Michael Pratt --- src/cmd/link/internal/ld/elf.go | 14 ++++++++++--- src/cmd/link/internal/ld/macho.go | 16 ++++++++++++++ src/cmd/link/internal/ld/macho_update_uuid.go | 2 +- src/cmd/link/internal/ld/main.go | 6 +++++- test/fixedbugs/issue14636.go | 21 ++++++++++++------- 5 files changed, 47 insertions(+), 12 deletions(-) diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 0d8455d92e336b..bc484dedf6ed54 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -805,13 +805,19 @@ func elfwritefreebsdsig(out *OutBuf) int { return int(sh.Size) } -func addbuildinfo(val string) { +func addbuildinfo(ctxt *Link) { + val := *flagHostBuildid if val == "gobuildid" { buildID := *flagBuildid if buildID == "" { Exitf("-B gobuildid requires a Go build ID supplied via -buildid") } + if ctxt.IsDarwin() { + buildinfo = uuidFromGoBuildId(buildID) + return + } + hashedBuildID := notsha256.Sum256([]byte(buildID)) buildinfo = hashedBuildID[:20] @@ -821,11 +827,13 @@ func addbuildinfo(val string) { if !strings.HasPrefix(val, "0x") { Exitf("-B argument must start with 0x: %s", val) } - ov := val val = val[2:] - const maxLen = 32 + maxLen := 32 + if ctxt.IsDarwin() { + maxLen = 16 + } if hex.DecodedLen(len(val)) > maxLen { Exitf("-B option too long (max %d digits): %s", maxLen, ov) } diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 34624c25a9f333..c5a85f0e75e7cf 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -297,6 +297,8 @@ func getMachoHdr() *MachoHdr { return &machohdr } +// Create a new Mach-O load command. ndata is the number of 32-bit words for +// the data (not including the load command header). func newMachoLoad(arch *sys.Arch, type_ uint32, ndata uint32) *MachoLoad { if arch.PtrSize == 8 && (ndata&1 != 0) { ndata++ @@ -849,6 +851,20 @@ func asmbMacho(ctxt *Link) { } } + if ctxt.IsInternal() && len(buildinfo) > 0 { + ml := newMachoLoad(ctxt.Arch, LC_UUID, 4) + // Mach-O UUID is 16 bytes + if len(buildinfo) < 16 { + buildinfo = append(buildinfo, make([]byte, 16)...) + } + // By default, buildinfo is already in UUIDv3 format + // (see uuidFromGoBuildId). + ml.data[0] = ctxt.Arch.ByteOrder.Uint32(buildinfo) + ml.data[1] = ctxt.Arch.ByteOrder.Uint32(buildinfo[4:]) + ml.data[2] = ctxt.Arch.ByteOrder.Uint32(buildinfo[8:]) + ml.data[3] = ctxt.Arch.ByteOrder.Uint32(buildinfo[12:]) + } + if ctxt.IsInternal() && ctxt.NeedCodeSign() { ml := newMachoLoad(ctxt.Arch, LC_CODE_SIGNATURE, 2) ml.data[0] = uint32(codesigOff) diff --git a/src/cmd/link/internal/ld/macho_update_uuid.go b/src/cmd/link/internal/ld/macho_update_uuid.go index de27e655d59bf4..40e0c11ed19d6e 100644 --- a/src/cmd/link/internal/ld/macho_update_uuid.go +++ b/src/cmd/link/internal/ld/macho_update_uuid.go @@ -42,7 +42,7 @@ func uuidFromGoBuildId(buildID string) []byte { // to use this UUID flavor than any of the others. This is similar // to how other linkers handle this (for example this code in lld: // https://github.com/llvm/llvm-project/blob/2a3a79ce4c2149d7787d56f9841b66cacc9061d0/lld/MachO/Writer.cpp#L524). - rv[6] &= 0xcf + rv[6] &= 0x0f rv[6] |= 0x30 rv[8] &= 0x3f rv[8] |= 0xc0 diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 56e865d8a53287..12bc896c66c3d7 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -95,6 +95,7 @@ var ( flagN = flag.Bool("n", false, "no-op (deprecated)") FlagS = flag.Bool("s", false, "disable symbol table") flag8 bool // use 64-bit addresses in symbol table + flagHostBuildid = flag.String("B", "", "set ELF NT_GNU_BUILD_ID `note` or Mach-O UUID; use \"gobuildid\" to generate it from the Go build ID") flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker") flagCheckLinkname = flag.Bool("checklinkname", true, "check linkname symbol references") FlagDebugTramp = flag.Int("debugtramp", 0, "debug trampolines") @@ -196,7 +197,6 @@ func Main(arch *sys.Arch, theArch Arch) { flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`") flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`") flag.BoolVar(&ctxt.compressDWARF, "compressdwarf", true, "compress DWARF if possible") - objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF; use \"gobuildid\" to generate it from the Go build ID", addbuildinfo) objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) }) objabi.AddVersionFlag() // -V objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) }) @@ -294,6 +294,10 @@ func Main(arch *sys.Arch, theArch Arch) { *flagBuildid = "go-openbsd" } + if *flagHostBuildid != "" { + addbuildinfo(ctxt) + } + // enable benchmarking var bench *benchmark.Metrics if len(*benchmarkFlag) != 0 { diff --git a/test/fixedbugs/issue14636.go b/test/fixedbugs/issue14636.go index c8e751fb613c2e..a866c9a9e30e8e 100644 --- a/test/fixedbugs/issue14636.go +++ b/test/fixedbugs/issue14636.go @@ -12,22 +12,29 @@ import ( "bytes" "log" "os/exec" + "runtime" "strings" ) func main() { - checkLinkOutput("", "-B argument must start with 0x") + // The cannot open file error indicates that the parsing of -B flag + // succeeded and it failed at a later step. checkLinkOutput("0", "-B argument must start with 0x") - checkLinkOutput("0x", "usage") + checkLinkOutput("0x", "cannot open file nonexistent.o") checkLinkOutput("0x0", "-B argument must have even number of digits") - checkLinkOutput("0x00", "usage") + checkLinkOutput("0x00", "cannot open file nonexistent.o") checkLinkOutput("0xYZ", "-B argument contains invalid hex digit") - checkLinkOutput("0x"+strings.Repeat("00", 32), "usage") - checkLinkOutput("0x"+strings.Repeat("00", 33), "-B option too long (max 32 digits)") + + maxLen := 32 + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + maxLen = 16 + } + checkLinkOutput("0x"+strings.Repeat("00", maxLen), "cannot open file nonexistent.o") + checkLinkOutput("0x"+strings.Repeat("00", maxLen+1), "-B option too long") } func checkLinkOutput(buildid string, message string) { - cmd := exec.Command("go", "tool", "link", "-B", buildid) + cmd := exec.Command("go", "tool", "link", "-B", buildid, "nonexistent.o") out, err := cmd.CombinedOutput() if err == nil { log.Fatalf("expected cmd/link to fail") @@ -39,6 +46,6 @@ func checkLinkOutput(buildid string, message string) { } if !strings.Contains(firstLine, message) { - log.Fatalf("cmd/link output did not include expected message %q: %s", message, firstLine) + log.Fatalf("%s: cmd/link output did not include expected message %q: %s", buildid, message, firstLine) } } From 6ba3a8a6ba5214ec88b83e39148de8cd540a6e94 Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Wed, 23 Oct 2024 16:01:08 -0700 Subject: [PATCH 16/20] [release-branch.go1.23] internal/poll: keep copying after successful Sendfile return on BSD The BSD implementation of poll.SendFile incorrectly halted copying after succesfully writing one full chunk of data. Adjust the copy loop to match the Linux and Solaris implementations. In testing, empirically macOS appears to sometimes return EAGAIN from sendfile after successfully copying a full chunk. Add a check to all implementations to return nil after successfully copying all data if the last sendfile call returns EAGAIN. For #70000 For #70020 Change-Id: I57ba649491fc078c7330310b23e1cfd85135c8ff Reviewed-on: https://go-review.googlesource.com/c/go/+/622235 LUCI-TryBot-Result: Go LUCI Reviewed-by: Ian Lance Taylor (cherry picked from commit bd388c0216bcb33d7325b0ad9722a3be8155a289) Reviewed-on: https://go-review.googlesource.com/c/go/+/622696 --- src/internal/poll/sendfile_bsd.go | 19 ++-- src/internal/poll/sendfile_linux.go | 3 + src/internal/poll/sendfile_solaris.go | 3 + src/os/copy_test.go | 154 ++++++++++++++++++++++++++ src/os/readfrom_linux_test.go | 41 ------- 5 files changed, 171 insertions(+), 49 deletions(-) create mode 100644 src/os/copy_test.go diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 669df94cc12e0d..0b0966815deedd 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -38,22 +38,25 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, pos += int64(n) written += int64(n) remain -= int64(n) + continue + } else if err != syscall.EAGAIN && err != syscall.EINTR { + // This includes syscall.ENOSYS (no kernel + // support) and syscall.EINVAL (fd types which + // don't implement sendfile), and other errors. + // We should end the loop when there is no error + // returned from sendfile(2) or it is not a retryable error. + break } if err == syscall.EINTR { continue } - // This includes syscall.ENOSYS (no kernel - // support) and syscall.EINVAL (fd types which - // don't implement sendfile), and other errors. - // We should end the loop when there is no error - // returned from sendfile(2) or it is not a retryable error. - if err != syscall.EAGAIN { - break - } if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil { break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/internal/poll/sendfile_linux.go b/src/internal/poll/sendfile_linux.go index d1c4d5c0d3d34d..1c4130d45da89c 100644 --- a/src/internal/poll/sendfile_linux.go +++ b/src/internal/poll/sendfile_linux.go @@ -50,6 +50,9 @@ func SendFile(dstFD *FD, src int, remain int64) (written int64, err error, handl break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/internal/poll/sendfile_solaris.go b/src/internal/poll/sendfile_solaris.go index ec675833a225dc..b7c3f81a1efdcd 100644 --- a/src/internal/poll/sendfile_solaris.go +++ b/src/internal/poll/sendfile_solaris.go @@ -61,6 +61,9 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, break } } + if err == syscall.EAGAIN { + err = nil + } handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) return } diff --git a/src/os/copy_test.go b/src/os/copy_test.go new file mode 100644 index 00000000000000..82346ca4e57e3e --- /dev/null +++ b/src/os/copy_test.go @@ -0,0 +1,154 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + "errors" + "io" + "math/rand/v2" + "net" + "os" + "runtime" + "sync" + "testing" + + "golang.org/x/net/nettest" +) + +// Exercise sendfile/splice fast paths with a moderately large file. +// +// https://go.dev/issue/70000 + +func TestLargeCopyViaNetwork(t *testing.T) { + const size = 10 * 1024 * 1024 + dir := t.TempDir() + + src, err := os.Create(dir + "/src") + if err != nil { + t.Fatal(err) + } + defer src.Close() + if _, err := io.CopyN(src, newRandReader(), size); err != nil { + t.Fatal(err) + } + if _, err := src.Seek(0, 0); err != nil { + t.Fatal(err) + } + + dst, err := os.Create(dir + "/dst") + if err != nil { + t.Fatal(err) + } + defer dst.Close() + + client, server := createSocketPair(t, "tcp") + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + if n, err := io.Copy(dst, server); n != size || err != nil { + t.Errorf("copy to destination = %v, %v; want %v, nil", n, err, size) + } + }() + go func() { + defer wg.Done() + defer client.Close() + if n, err := io.Copy(client, src); n != size || err != nil { + t.Errorf("copy from source = %v, %v; want %v, nil", n, err, size) + } + }() + wg.Wait() + + if _, err := dst.Seek(0, 0); err != nil { + t.Fatal(err) + } + if err := compareReaders(dst, io.LimitReader(newRandReader(), size)); err != nil { + t.Fatal(err) + } +} + +func compareReaders(a, b io.Reader) error { + bufa := make([]byte, 4096) + bufb := make([]byte, 4096) + for { + na, erra := io.ReadFull(a, bufa) + if erra != nil && erra != io.EOF { + return erra + } + nb, errb := io.ReadFull(b, bufb) + if errb != nil && errb != io.EOF { + return errb + } + if !bytes.Equal(bufa[:na], bufb[:nb]) { + return errors.New("contents mismatch") + } + if erra == io.EOF && errb == io.EOF { + break + } + } + return nil +} + +type randReader struct { + rand *rand.Rand +} + +func newRandReader() *randReader { + return &randReader{rand.New(rand.NewPCG(0, 0))} +} + +func (r *randReader) Read(p []byte) (int, error) { + var v uint64 + var n int + for i := range p { + if n == 0 { + v = r.rand.Uint64() + n = 8 + } + p[i] = byte(v & 0xff) + v >>= 8 + n-- + } + return len(p), nil +} + +func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { + t.Helper() + if !nettest.TestableNetwork(proto) { + t.Skipf("%s does not support %q", runtime.GOOS, proto) + } + + ln, err := nettest.NewLocalListener(proto) + if err != nil { + t.Fatalf("NewLocalListener error: %v", err) + } + t.Cleanup(func() { + if ln != nil { + ln.Close() + } + if client != nil { + client.Close() + } + if server != nil { + server.Close() + } + }) + ch := make(chan struct{}) + go func() { + var err error + server, err = ln.Accept() + if err != nil { + t.Errorf("Accept new connection error: %v", err) + } + ch <- struct{}{} + }() + client, err = net.Dial(proto, ln.Addr().String()) + <-ch + if err != nil { + t.Fatalf("Dial new connection error: %v", err) + } + return client, server +} diff --git a/src/os/readfrom_linux_test.go b/src/os/readfrom_linux_test.go index 8dcb9cb2172882..45867477dc26b2 100644 --- a/src/os/readfrom_linux_test.go +++ b/src/os/readfrom_linux_test.go @@ -14,15 +14,12 @@ import ( "net" . "os" "path/filepath" - "runtime" "strconv" "strings" "sync" "syscall" "testing" "time" - - "golang.org/x/net/nettest" ) func TestCopyFileRange(t *testing.T) { @@ -784,41 +781,3 @@ func testGetPollFDAndNetwork(t *testing.T, proto string) { t.Fatalf("server Control error: %v", err) } } - -func createSocketPair(t *testing.T, proto string) (client, server net.Conn) { - t.Helper() - if !nettest.TestableNetwork(proto) { - t.Skipf("%s does not support %q", runtime.GOOS, proto) - } - - ln, err := nettest.NewLocalListener(proto) - if err != nil { - t.Fatalf("NewLocalListener error: %v", err) - } - t.Cleanup(func() { - if ln != nil { - ln.Close() - } - if client != nil { - client.Close() - } - if server != nil { - server.Close() - } - }) - ch := make(chan struct{}) - go func() { - var err error - server, err = ln.Accept() - if err != nil { - t.Errorf("Accept new connection error: %v", err) - } - ch <- struct{}{} - }() - client, err = net.Dial(proto, ln.Addr().String()) - <-ch - if err != nil { - t.Fatalf("Dial new connection error: %v", err) - } - return client, server -} From 958f3a0309855bc2e362e2951c70849ebec76f30 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Thu, 24 Oct 2024 13:10:54 +0800 Subject: [PATCH 17/20] [release-branch.go1.23] internal/poll: handle the special case of sendfile(2) sending the full chunk CL 622235 would fix #70000 while resulting in one extra sendfile(2) system call when sendfile(2) returns (>0, EAGAIN). That's also why I left sendfile_bsd.go behind, and didn't make it line up with other two implementations: sendfile_linux.go and sendfile_solaris.go. Unlike sendfile(2)'s on Linux and Solaris that always return (0, EAGAIN), sendfile(2)'s on *BSD and macOS may return (>0, EAGAIN) when using a socket marked for non-blocking I/O. In that case, the current code will try to re-call sendfile(2) immediately, which will most likely get us a (0, EAGAIN). After that, it goes to `dstFD.pd.waitWrite(dstFD.isFile)` below, which should have been done in the first place. Thus, the real problem that leads to #70000 is that the old code doesn't handle the special case of sendfile(2) sending the exact number of bytes the caller requested. Fixes #70000 Fixes #70020 Change-Id: I6073d6b9feb58b3d7e114ec21e4e80d9727bca66 Reviewed-on: https://go-review.googlesource.com/c/go/+/622255 LUCI-TryBot-Result: Go LUCI Reviewed-by: Ian Lance Taylor TryBot-Result: Gopher Robot Reviewed-by: Damien Neil Run-TryBot: Andy Pan Reviewed-on: https://go-review.googlesource.com/c/go/+/622697 --- src/internal/poll/sendfile_bsd.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 0b0966815deedd..341e07ca1fed6a 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -32,13 +32,28 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, if int64(n) > remain { n = int(remain) } + m := n pos1 := pos n, err = syscall.Sendfile(dst, src, &pos1, n) if n > 0 { pos += int64(n) written += int64(n) remain -= int64(n) - continue + // (n, nil) indicates that sendfile(2) has transferred + // the exact number of bytes we requested, or some unretryable + // error have occurred with partial bytes sent. Either way, we + // don't need to go through the following logic to check EINTR + // or fell into dstFD.pd.waitWrite, just continue to send the + // next chunk or break the loop. + if n == m { + continue + } else if err != syscall.EAGAIN && + err != syscall.EINTR && + err != syscall.EBUSY { + // Particularly, EPIPE. Errors like that would normally lead + // the subsequent sendfile(2) call to (-1, EBADF). + break + } } else if err != syscall.EAGAIN && err != syscall.EINTR { // This includes syscall.ENOSYS (no kernel // support) and syscall.EINVAL (fd types which From a0d15cb9c8f3c35c96129857984d25446041f29e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Geisend=C3=B6rfer?= Date: Sat, 7 Sep 2024 13:44:09 +0200 Subject: [PATCH 18/20] [release-branch.go1.23] runtime: fix MutexProfile missing root frames Fix a regression introduced in CL 598515 causing runtime.MutexProfile stack traces to omit their root frames. In most cases this was merely causing the `runtime.goexit` frame to go missing. But in the case of runtime._LostContendedRuntimeLock, an empty stack trace was being produced. Add a test that catches this regression by checking for a stack trace with the `runtime.goexit` frame. Also fix a separate problem in expandFrame that could cause out-of-bounds panics when profstackdepth is set to a value below 32. There is no test for this fix because profstackdepth can't be changed at runtime right now. Fixes #69865 Change-Id: I1600fe62548ea84981df0916d25072c3ddf1ea1a Reviewed-on: https://go-review.googlesource.com/c/go/+/611615 Reviewed-by: David Chase Reviewed-by: Nick Ripley Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI (cherry picked from commit c64ca8c6ef13723b9f25f4b5e1c7b6986b958d2e) Reviewed-on: https://go-review.googlesource.com/c/go/+/621276 Reviewed-by: Cherry Mui --- src/runtime/mprof.go | 3 ++- src/runtime/pprof/mprof_test.go | 2 +- src/runtime/pprof/pprof_test.go | 46 ++++++++++++++++++++++++++++++--- 3 files changed, 45 insertions(+), 6 deletions(-) diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 82b7fa68aecbde..ee3e59a9aa99ce 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1136,11 +1136,12 @@ func expandFrames(p []BlockProfileRecord) { for i := range p { cf := CallersFrames(p[i].Stack()) j := 0 - for ; j < len(expandedStack); j++ { + for j < len(expandedStack) { f, more := cf.Next() // f.PC is a "call PC", but later consumers will expect // "return PCs" expandedStack[j] = f.PC + 1 + j++ if !more { break } diff --git a/src/runtime/pprof/mprof_test.go b/src/runtime/pprof/mprof_test.go index 391588d4acd0ec..ef373b36848437 100644 --- a/src/runtime/pprof/mprof_test.go +++ b/src/runtime/pprof/mprof_test.go @@ -145,7 +145,7 @@ func TestMemoryProfiler(t *testing.T) { } t.Logf("Profile = %v", p) - stks := stacks(p) + stks := profileStacks(p) for _, test := range tests { if !containsStack(stks, test.stk) { t.Fatalf("No matching stack entry for %q\n\nProfile:\n%v\n", test.stk, p) diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 41952ff147c4b7..da4ad17d77e6fd 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -982,7 +982,7 @@ func TestBlockProfile(t *testing.T) { t.Fatalf("invalid profile: %v", err) } - stks := stacks(p) + stks := profileStacks(p) for _, test := range tests { if !containsStack(stks, test.stk) { t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk) @@ -992,7 +992,7 @@ func TestBlockProfile(t *testing.T) { } -func stacks(p *profile.Profile) (res [][]string) { +func profileStacks(p *profile.Profile) (res [][]string) { for _, s := range p.Sample { var stk []string for _, l := range s.Location { @@ -1005,6 +1005,22 @@ func stacks(p *profile.Profile) (res [][]string) { return res } +func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) { + for _, record := range records { + frames := runtime.CallersFrames(record.Stack()) + var stk []string + for { + frame, more := frames.Next() + stk = append(stk, frame.Function) + if !more { + break + } + } + res = append(res, stk) + } + return res +} + func containsStack(got [][]string, want []string) bool { for _, stk := range got { if len(stk) < len(want) { @@ -1289,7 +1305,7 @@ func TestMutexProfile(t *testing.T) { t.Fatalf("invalid profile: %v", err) } - stks := stacks(p) + stks := profileStacks(p) for _, want := range [][]string{ {"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"}, } { @@ -1329,6 +1345,28 @@ func TestMutexProfile(t *testing.T) { t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D) } }) + + t.Run("records", func(t *testing.T) { + // Record a mutex profile using the structured record API. + var records []runtime.BlockProfileRecord + for { + n, ok := runtime.MutexProfile(records) + if ok { + records = records[:n] + break + } + records = make([]runtime.BlockProfileRecord, n*2) + } + + // Check that we see the same stack trace as the proto profile. For + // historical reason we expect a runtime.goexit root frame here that is + // omitted in the proto profile. + stks := blockRecordStacks(records) + want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"} + if !containsStack(stks, want) { + t.Errorf("No matching stack entry for %+v", want) + } + }) } func TestMutexProfileRateAdjust(t *testing.T) { @@ -2514,7 +2552,7 @@ func TestProfilerStackDepth(t *testing.T) { } t.Logf("Profile = %v", p) - stks := stacks(p) + stks := profileStacks(p) var stk []string for _, s := range stks { if hasPrefix(s, test.prefix) { From 1207de4f6c3739eb4339ff9eb5a794e9bdd7c4d2 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Tue, 8 Oct 2024 18:10:17 +0200 Subject: [PATCH 19/20] [release-branch.go1.23] runtime: reduce syscall.SyscallX stack usage syscall.SyscallX consumes a lot of stack space, which is a problem because they are nosplit functions. They used to use less stack space, but CL 563315, that landed in Go 1.23, increased the stack usage by a lot. This CL reduces the stack usage back to the previous level. Fixes #69848 Updates #69813 Change-Id: Iddedd28b693c66a258da687389768055c493fc2e Reviewed-on: https://go-review.googlesource.com/c/go/+/618497 Reviewed-by: Cherry Mui Reviewed-by: Michael Knyszek LUCI-TryBot-Result: Go LUCI (cherry picked from commit fa7343aca326aad061ab877c1a4cebb96c4355c1) Reviewed-on: https://go-review.googlesource.com/c/go/+/623516 Reviewed-by: Dmitri Shuralyov Reviewed-by: Michael Pratt --- src/runtime/syscall_windows.go | 30 +++++++++++++++-------------- src/runtime/syscall_windows_test.go | 7 +++++++ 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 69d720a395c48d..85b1b8c9024a73 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -454,43 +454,37 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint //go:linkname syscall_Syscall syscall.Syscall //go:nosplit func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3) } //go:linkname syscall_Syscall6 syscall.Syscall6 //go:nosplit func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6) } //go:linkname syscall_Syscall9 syscall.Syscall9 //go:nosplit func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9) } //go:linkname syscall_Syscall12 syscall.Syscall12 //go:nosplit func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) } //go:linkname syscall_Syscall15 syscall.Syscall15 //go:nosplit func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) } //go:linkname syscall_Syscall18 syscall.Syscall18 //go:nosplit func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) { - args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18} - return syscall_SyscallN(fn, args[:nargs]...) + return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18) } // maxArgs should be divisible by 2, as Windows stack @@ -503,7 +497,15 @@ const maxArgs = 42 //go:linkname syscall_SyscallN syscall.SyscallN //go:nosplit func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { - if len(args) > maxArgs { + return syscall_syscalln(fn, uintptr(len(args)), args...) +} + +//go:nosplit +func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) { + if n > uintptr(len(args)) { + panic("syscall: n > len(args)") // should not be reachable from user code + } + if n > maxArgs { panic("runtime: SyscallN has too many arguments") } @@ -512,7 +514,7 @@ func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { // calls back into Go. c := &getg().m.winsyscall c.fn = fn - c.n = uintptr(len(args)) + c.n = n if c.n != 0 { c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) } diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go index 6a056c8d2b190c..156cf3eb8e5c71 100644 --- a/src/runtime/syscall_windows_test.go +++ b/src/runtime/syscall_windows_test.go @@ -1212,6 +1212,13 @@ func TestBigStackCallbackSyscall(t *testing.T) { } } +func TestSyscallStackUsage(t *testing.T) { + // Test that the stack usage of a syscall doesn't exceed the limit. + // See https://go.dev/issue/69813. + syscall.Syscall15(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + syscall.Syscall18(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) +} + var ( modwinmm = syscall.NewLazyDLL("winmm.dll") modkernel32 = syscall.NewLazyDLL("kernel32.dll") From c390a1c22e8951263e6c01346a4281d604b25062 Mon Sep 17 00:00:00 2001 From: Gopher Robot Date: Wed, 6 Nov 2024 22:21:42 +0000 Subject: [PATCH 20/20] [release-branch.go1.23] go1.23.3 Change-Id: I065005a4a18f801d09ad3ebc886e90a6dd1df69a Reviewed-on: https://go-review.googlesource.com/c/go/+/626137 Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI Auto-Submit: Gopher Robot Reviewed-by: David Chase --- VERSION | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/VERSION b/VERSION index 7c5b4094049322..66e6565be501e5 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23.2 -time 2024-09-28T01:34:15Z +go1.23.3 +time 2024-11-06T18:46:45Z