Untitled diff
828 lines
// Copyright 2014 The Go Authors. All rights reserved.
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// license that can be found in the LICENSE file.
package runtime
package runtime
import (
import (
"internal/cpu"
"internal/cpu"
"runtime/internal/atomic"
"runtime/internal/atomic"
"runtime/internal/sys"
"runtime/internal/sys"
"unsafe"
"unsafe"
)
)
var buildVersion = sys.TheVersion
var buildVersion = sys.TheVersion
// Goroutine scheduler
// Goroutine scheduler
// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
//
//
// The main concepts are:
// The main concepts are:
// G - goroutine.
// G - goroutine.
// M - worker thread, or machine.
// M - worker thread, or machine.
// P - processor, a resource that is required to execute Go code.
// P - processor, a resource that is required to execute Go code.
// M must have an associated P to execute Go code, however it can be
// M must have an associated P to execute Go code, however it can be
// blocked or in a syscall w/o an associated P.
// blocked or in a syscall w/o an associated P.
//
//
// Design doc at https://golang.org/s/go11sched.
// Design doc at https://golang.org/s/go11sched.
// Worker thread parking/unparking.
// Worker thread parking/unparking.
// We need to balance between keeping enough running worker threads to utilize
// We need to balance between keeping enough running worker threads to utilize
// available hardware parallelism and parking excessive running worker threads
// available hardware parallelism and parking excessive running worker threads
// to conserve CPU resources and power. This is not simple for two reasons:
// to conserve CPU resources and power. This is not simple for two reasons:
// (1) scheduler state is intentionally distributed (in particular, per-P work
// (1) scheduler state is intentionally distributed (in particular, per-P work
// queues), so it is not possible to compute global predicates on fast paths;
// queues), so it is not possible to compute global predicates on fast paths;
// (2) for optimal thread management we would need to know the future (don't park
// (2) for optimal thread management we would need to know the future (don't park
// a worker thread when a new goroutine will be readied in near future).
// a worker thread when a new goroutine will be readied in near future).
//
//
// Three rejected approaches that would work badly:
// Three rejected approaches that would work badly:
// 1. Centralize all scheduler state (would inhibit scalability).
// 1. Centralize all scheduler state (would inhibit scalability).
// 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
// 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
// is a spare P, unpark a thread and handoff it the thread and the goroutine.
// is a spare P, unpark a thread and handoff it the thread and the goroutine.
// This would lead to thread state thrashing, as the thread that readied the
// This would lead to thread state thrashing, as the thread that readied the
// goroutine can be out of work the very next moment, we will need to park it.
// goroutine can be out of work the very next moment, we will need to park it.
// Also, it would destroy locality of computation as we want to preserve
// Also, it would destroy locality of computation as we want to preserve
// dependent goroutines on the same thread; and introduce additional latency.
// dependent goroutines on the same thread; and introduce additional latency.
// 3. Unpark an additional thread whenever we ready a goroutine and there is an
// 3. Unpark an additional thread whenever we ready a goroutine and there is an
// idle P, but don't do handoff. This would lead to excessive thread parking/
// idle P, but don't do handoff. This would lead to excessive thread parking/
// unparking as the additional threads will instantly park without discovering
// unparking as the additional threads will instantly park without discovering
// any work to do.
// any work to do.
//
//
// The current approach:
// The current approach:
// We unpark an additional thread when we ready a goroutine if (1) there is an
// We unpark an additional thread when we ready a goroutine if (1) there is an
// idle P and there are no "spinning" worker threads. A worker thread is considered
// idle P and there are no "spinning" worker threads. A worker thread is considered
// spinning if it is out of local work and did not find work in global run queue/
// spinning if it is out of local work and did not find work in global run queue/
// netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
// netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
// Threads unparked this way are also considered spinning; we don't do goroutine
// Threads unparked this way are also considered spinning; we don't do goroutine
// handoff so such threads are out of work initially. Spinning threads do some
// handoff so such threads are out of work initially. Spinning threads do some
// spinning looking for work in per-P run queues before parking. If a spinning
// spinning looking for work in per-P run queues before parking. If a spinning
// thread finds work it takes itself out of the spinning state and proceeds to
// thread finds work it takes itself out of the spinning state and proceeds to
// execution. If it does not find work it takes itself out of the spinning state
// execution. If it does not find work it takes itself out of the spinning state
// and then parks.
// and then parks.
// If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
// If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
// new threads when readying goroutines. To compensate for that, if the last spinning
// new threads when readying goroutines. To compensate for that, if the last spinning
// thread finds work and stops spinning, it must unpark a new spinning thread.
// thread finds work and stops spinning, it must unpark a new spinning thread.
// This approach smooths out unjustified spikes of thread unparking,
// This approach smooths out unjustified spikes of thread unparking,
// but at the same time guarantees eventual maximal CPU parallelism utilization.
// but at the same time guarantees eventual maximal CPU parallelism utilization.
//
//
// The main implementation complication is that we need to be very careful during
// The main implementation complication is that we need to be very careful during
// spinning->non-spinning thread transition. This transition can race with submission
// spinning->non-spinning thread transition. This transition can race with submission
// of a new goroutine, and either one part or another needs to unpark another worker
// of a new goroutine, and either one part or another needs to unpark another worker
// thread. If they both fail to do that, we can end up with semi-persistent CPU
// thread. If they both fail to do that, we can end up with semi-persistent CPU
// underutilization. The general pattern for goroutine readying is: submit a goroutine
// underutilization. The general pattern for goroutine readying is: submit a goroutine
// to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
// to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
// The general pattern for spinning->non-spinning transition is: decrement nmspinning,
// The general pattern for spinning->non-spinning transition is: decrement nmspinning,
// #StoreLoad-style memory barrier, check all per-P work queues for new work.
// #StoreLoad-style memory barrier, check all per-P work queues for new work.
// Note that all this complexity does not apply to global run queue as we are not
// Note that all this complexity does not apply to global run queue as we are not
// sloppy about thread unparking when submitting to global queue. Also see comments
// sloppy about thread unparking when submitting to global queue. Also see comments
// for nmspinning manipulation.
// for nmspinning manipulation.
var (
var (
m0 m
m0 m
g0 g
g0 g
raceprocctx0 uintptr
raceprocctx0 uintptr
)
)
//go:linkname runtime_init runtime.init
//go:linkname runtime_init runtime.init
func runtime_init()
func runtime_init()
//go:linkname main_init main.init
//go:linkname main_init main.init
func main_init()
func main_init()
// main_init_done is a signal used by cgocallbackg that initialization
// main_init_done is a signal used by cgocallbackg that initialization
// has been completed. It is made before _cgo_notify_runtime_init_done,
// has been completed. It is made before _cgo_notify_runtime_init_done,
// so all cgo calls can rely on it existing. When main_init is complete,
// so all cgo calls can rely on it existing. When main_init is complete,
// it is closed, meaning cgocallbackg can reliably receive from it.
// it is closed, meaning cgocallbackg can reliably receive from it.
var main_init_done chan bool
var main_init_done chan bool
//go:linkname main_main main.main
//go:linkname main_main main.main
func main_main()
func main_main()
// mainStarted indicates that the main M has started.
// mainStarted indicates that the main M has started.
var mainStarted bool
var mainStarted bool
// runtimeInitTime is the nanotime() at which the runtime started.
// runtimeInitTime is the nanotime() at which the runtime started.
var runtimeInitTime int64
var runtimeInitTime int64
// Value to use for signal mask for newly created M's.
// Value to use for signal mask for newly created M's.
var initSigmask sigset
var initSigmask sigset
// The main goroutine.
// The main goroutine.
func main() {
func main() {
g := getg()
g := getg()
// Racectx of m0->g0 is used only as the parent of the main goroutine.
// Racectx of m0->g0 is used only as the parent of the main goroutine.
// It must not be used for anything else.
// It must not be used for anything else.
g.m.g0.racectx = 0
g.m.g0.racectx = 0
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
// Using decimal instead of binary GB and MB because
// they look nicer in the stack overflow failure message.
// they look nicer in the stack overflow failure message.
if sys.PtrSize == 8 {
if sys.PtrSize == 8 {
maxstacksize = 1000000000
maxstacksize = 1000000000
} else {
} else {
maxstacksize = 250000000
maxstacksize = 250000000
}
}
// Allow newproc to start new Ms.
// Allow newproc to start new Ms.
mainStarted = true
mainStarted = true
if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
systemstack(func() {
systemstack(func() {
newm(sysmon, nil)
newm(sysmon, nil)
})
})
}
}
// Lock the main goroutine onto this, the main OS thread,
// Lock the main goroutine onto this, the main OS thread,
// during initialization. Most programs won't care, but a few
// during initialization. Most programs won't care, but a few
// do require certain calls to be made by the main thread.
// do require certain calls to be made by the main thread.
// Those can arrange for main.main to run in the main thread
// Those can arrange for main.main to run in the main thread
// by calling runtime.LockOSThread during initialization
// by calling runtime.LockOSThread during initialization
// to preserve the lock.
// to preserve the lock.
lockOSThread()
lockOSThread()
if g.m != &m0 {
if g.m != &m0 {
throw("runtime.main not on m0")
throw("runtime.main not on m0")
}
}
runtime_init() // must be before defer
runtime_init() // must be before defer
if nanotime() == 0 {
if nanotime() == 0 {
throw("nanotime returning zero")
throw("nanotime returning zero")
}
}
// Defer unlock so that runtime.Goexit during init does the unlock too.
// Defer unlock so that runtime.Goexit during init does the unlock too.
needUnlock := true
needUnlock := true
defer func() {
defer func() {
if needUnlock {
if needUnlock {
unlockOSThread()
unlockOSThread()
}
}
}()
}()
// Record when the world started.
// Record when the world started.
runtimeInitTime = nanotime()
runtimeInitTime = nanotime()
gcenable()
gcenable()
main_init_done = make(chan bool)
main_init_done = make(chan bool)
if iscgo {
if iscgo {
if _cgo_thread_start == nil {
if _cgo_thread_start == nil {
throw("_cgo_thread_start missing")
throw("_cgo_thread_start missing")
}
}
if GOOS != "windows" {
if GOOS != "windows" {
if _cgo_setenv == nil {
if _cgo_setenv == nil {
throw("_cgo_setenv missing")
throw("_cgo_setenv missing")
}
}
if _cgo_unsetenv == nil {
if _cgo_unsetenv == nil {
throw("_cgo_unsetenv missing")
throw("_cgo_unsetenv missing")
}
}
}
}
if _cgo_notify_runtime_init_done == nil {
if _cgo_notify_runtime_init_done == nil {
throw("_cgo_notify_runtime_init_done missing")
throw("_cgo_notify_runtime_init_done missing")
}
}
// Start the template thread in case we enter Go from
// Start the template thread in case we enter Go from
// a C-created thread and need to create a new thread.
// a C-created thread and need to create a new thread.
startTemplateThread()
startTemplateThread()
cgocall(_cgo_notify_runtime_init_done, nil)
cgocall(_cgo_notify_runtime_init_done, nil)
}
}
fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
fn()
fn()
close(main_init_done)
close(main_init_done)
needUnlock = false
needUnlock = false
unlockOSThread()
unlockOSThread()
if isarchive || islibrary {
if isarchive || islibrary {
// A program compiled with -buildmode=c-archive or c-shared
// A program compiled with -buildmode=c-archive or c-shared
// has a main, but it is not executed.
// has a main, but it is not executed.
return
return
}
}
fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
fn()
fn()
if raceenabled {
if raceenabled {
racefini()
racefini()
}
}
// Make racy client program work: if panicking on
// Make racy client program work: if panicking on
// another goroutine at the same time as main returns,
// another goroutine at the same time as main returns,
// let the other goroutine finish printing the panic trace.
// let the other goroutine finish printing the panic trace.
// Once it does, it will exit. See issues 3934 and 20018.
// Once it does, it will exit. See issues 3934 and 20018.
if atomic.Load(&runningPanicDefers) != 0 {
if atomic.Load(&runningPanicDefers) != 0 {
// Running deferred functions should not take long.
// Running deferred functions should not take long.
for c := 0; c < 1000; c++ {
for c := 0; c < 1000; c++ {
if atomic.Load(&runningPanicDefers) == 0 {
if atomic.Load(&runningPanicDefers) == 0 {
break
break
}
}
Gosched()
Gosched()
}
}
}
}
if atomic.Load(&panicking) != 0 {
if atomic.Load(&panicking) != 0 {
gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
}
}
exit(0)
exit(0)
for {
for {
var x *int32
var x *int32
*x = 0
*x = 0
}
}
}
}
// os_beforeExit is called from os.Exit(0).
// os_beforeExit is called from os.Exit(0).
//go:linkname os_beforeExit os.runtime_beforeExit
//go:linkname os_beforeExit os.runtime_beforeExit
func os_beforeExit() {
func os_beforeExit() {
if raceenabled {
if raceenabled {
racefini()
racefini()
}
}
}
}
// start forcegc helper goroutine
// start forcegc helper goroutine
func init() {
func init() {
go forcegchelper()
go forcegchelper()
}
}
func forcegchelper() {
func forcegchelper() {
forcegc.g = getg()
forcegc.g = getg()
for {
for {
lock(&forcegc.lock)
lock(&forcegc.lock)
if forcegc.idle != 0 {
if forcegc.idle != 0 {
throw("forcegc: phase error")
throw("forcegc: phase error")
}
}
atomic.Store(&forcegc.idle, 1)
atomic.Store(&forcegc.idle, 1)
goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1)
goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1)
// this goroutine is explicitly resumed by sysmon
// this goroutine is explicitly resumed by sysmon
if debug.gctrace > 0 {
if debug.gctrace > 0 {
println("GC forced")
println("GC forced")
}
}
// Time-triggered, fully concurrent.
// Time-triggered, fully concurrent.
gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
}
}
}
}
//go:nosplit
//go:nosplit
// Gosched yields the processor, allowing other goroutines to run. It does not
// Gosched yields the processor, allowing other goroutines to run. It does not
// suspend the current goroutine, so execution resumes automatically.
// suspend the current goroutine, so execution resumes automatically.
func Gosched() {
func Gosched() {
checkTimeouts()
checkTimeouts()
mcall(gosched_m)
mcall(gosched_m)
}
}
// goschedguarded yields the processor like gosched, but also checks
// goschedguarded yields the processor like gosched, but also checks
// for forbidden states and opts out of the yield in those cases.
// for forbidden states and opts out of the yield in those cases.
//go:nosplit
//go:nosplit
func goschedguarded() {
func goschedguarded() {
mcall(goschedguarded_m)
mcall(goschedguarded_m)
}
}
// Puts the current goroutine into a waiting state and calls unlockf.
// Puts the current goroutine into a waiting state and calls unlockf.
// If unlockf returns false, the goroutine is resumed.
// If unlockf returns false, the goroutine is resumed.
// unlockf must not access this G's stack, as it may be moved between
// unlockf must not access this G's stack, as it may be moved between
// the call to gopark and the call to unlockf.
// the call to gopark and the call to unlockf.
// Reason explains why the goroutine has been parked.
// Reason explains why the goroutine has been parked.
// It is displayed in stack traces and heap dumps.
// It is displayed in stack traces and heap dumps.
// Reasons should be unique and descriptive.
// Reasons should be unique and descriptive.
// Do not re-use reasons, add new ones.
// Do not re-use reasons, add new ones.
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
if reason != waitReasonSleep {
if reason != waitReasonSleep {
checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
}
}
mp := acquirem()
mp := acquirem()
gp := mp.curg
gp := mp.curg
status := readgstatus(gp)
status := readgstatus(gp)
if status != _Grunning && status != _Gscanrunning {
if status != _Grunning && status != _Gscanrunning {
throw("gopark: bad g status")
throw("gopark: bad g status")
}
}
mp.waitlock = lock
mp.waitlock = lock
mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
gp.waitreason = reason
gp.waitreason = reason
mp.waittraceev = traceEv
mp.waittraceev = traceEv
mp.waittraceskip = traceskip
mp.waittraceskip = traceskip
releasem(mp)
releasem(mp)
// can't do anything that might move the G between Ms here.
// can't do anything that might move the G between Ms here.
mcall(park_m)
mcall(park_m)
}
}
// Puts the current goroutine into a waiting state and unlocks the lock.
// Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling goready(gp).
// The goroutine can be made runnable again by calling goready(gp).
func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
}
}
func goready(gp *g, traceskip int) {
func goready(gp *g, traceskip int) {
systemstack(func() {
systemstack(func() {
ready(gp, traceskip, true)
ready(gp, traceskip, true)
})
})
}
}
//go:nosplit
//go:nosplit
func acquireSudog() *sudog {
func acquireSudog() *sudog {
// Delicate dance: the semaphore implementation calls
// Delicate dance: the semaphore implementation calls
// acquireSudog, acquireSudog calls new(sudog),
// acquireSudog, acquireSudog calls new(sudog),
// new calls malloc, malloc can call the garbage collector,
// new calls malloc, malloc can call the garbage collector,
// and the garbage collector calls the semaphore implementation
// and the garbage collector calls the semaphore implementation
// in stopTheWorld.
// in stopTheWorld.
// Break the cycle by doing acquirem/releasem around new(sudog).
// Break the cycle by doing acquirem/releasem around new(sudog).
// The acquirem/releasem increments m.locks during new(sudog),
// The acquirem/releasem increments m.locks during new(sudog),
// which keeps the garbage collector from being invoked.
// which keeps the garbage collector from being invoked.
mp := acquirem()
mp := acquirem()
pp := mp.p.ptr()
pp := mp.p.ptr()
if len(pp.sudogcache) == 0 {
if len(pp.sudogcache) == 0 {
lock(&sched.sudoglock)
lock(&sched.sudoglock)
// First, try to grab a batch from central cache.
// First, try to grab a batch from central cache.
for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
s := sched.sudogcache
s := sched.sudogcache
sched.sudogcache = s.next
sched.sudogcache = s.next
s.next = nil
s.next = nil
pp.sudogcache = append(pp.sudogcache, s)
pp.sudogcache = append(pp.sudogcache, s)
}
}
unlock(&sched.sudoglock)
unlock(&sched.sudoglock)
// If the central cache is empty, allocate a new one.
// If the central cache is empty, allocate a new one.
if len(pp.sudogcache) == 0 {
if len(pp.sudogcache) == 0 {
pp.sudogcache = append(pp.sudogcache, new(sudog))
pp.sudogcache = append(pp.sudogcache, new(sudog))
}
}
}
}
n := len(pp.sudogcache)
n := len(pp.sudogcache)
s := pp.sudogcache[n-1]
s := pp.sudogcache[n-1]
pp.sudogcache[n-1] = nil
pp.sudogcache[n-1] = nil
pp.sudogcache = pp.sudogcache[:n-1]
pp.sudogcache = pp.sudogcache[:n-1]
if s.elem != nil {
if s.elem != nil {
throw("acquireSudog: found s.elem != nil in cache")
throw("acquireSudog: found s.elem != nil in cache")
}
}
releasem(mp)
releasem(mp)
return s
return s
}
}
//go:nosplit
//go:nosplit
func releaseSudog(s *sudog) {
func releaseSudog(s *sudog) {
if s.elem != nil {
if s.elem != nil {
throw("runtime: sudog with non-nil elem")
throw("runtime: sudog with non-nil elem")
}
}
if s.isSelect {
if s.isSelect {
throw("runtime: sudog with non-false isSelect")
throw("runtime: sudog with non-false isSelect")
}
}
if s.next != nil {
if s.next != nil {
throw("runtime: sudog with non-nil next")
throw("runtime: sudog with non-nil next")
}
}
if s.prev != nil {
if s.prev != nil {
throw("runtime: sudog with non-nil prev")
throw("runtime: sudog with non-nil prev")
}
}
if s.waitlink != nil {
if s.waitlink != nil {
throw("runtime: sudog with non-nil waitlink")
throw("runtime: sudog with non-nil waitlink")
}
}
if s.c != nil {
if s.c != nil {
throw("runtime: sudog with non-nil c")
throw("runtime: sudog with non-nil c")
}
}
gp := getg()
gp := getg()
if gp.param != nil {
if gp.param != nil {
throw("runtime: releaseSudog with non-nil gp.param")
throw("runtime: releaseSudog with non-nil gp.param")
}
}
mp := acquirem() // avoid rescheduling to another P
mp := acquirem() // avoid rescheduling to another P
pp := mp.p.ptr()
pp := mp.p.ptr()
if len(pp.sudogcache) == cap(pp.sudogcache) {
if len(pp.sudogcache) == cap(pp.sudogcache) {
// Transfer half of local cache to the central cache.
// Transfer half of local cache to the central cache.
var first, last *sudog
var first, last *sudog
for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
n := len(pp.sudogcache)
n := len(pp.sudogcache)
p := pp.sudogcache[n-1]
p := pp.sudogcache[n-1]
pp.sudogcache[n-1] = nil
pp.sudogcache[n-1] = nil
pp.sudogcache = pp.sudogcache[:n-1]
pp.sudogcache = pp.sudogcache[:n-1]
if first == nil {
if first == nil {
first = p
first = p
} else {
} else {
last.next = p
last.next = p
}
}
last = p
last = p
}
}
lock(&sched.sudoglock)
lock(&sched.sudoglock)
last.next = sched.sudogcache
last.next = sched.sudogcache
sched.sudogcache = first
sched.sudogcache = first
unlock(&sched.sudoglock)
unlock(&sched.sudoglock)
}
}
pp.sudogcache = append(pp.sudogcache, s)
pp.sudogcache = append(pp.sudogcache, s)
releasem(mp)
releasem(mp)
}
}
// funcPC returns the entry PC of the function f.
// funcPC returns the entry PC of the function f.
// It assumes that f is a func value. Otherwise the behavior is undefined.
// It assumes that f is a func value. Otherwise the behavior is undefined.
// CAREFUL: In programs with plugins, funcPC can return different values
// CAREFUL: In programs with plugins, funcPC can return different values
// for the same function (because there are actually multiple copies of
// for the same function (because there are actually multiple copies of
// the same function in the address space). To be safe, don't use the
// the same function in the address space). To be safe, don't use the
// results of this function in any == expression. It is only safe to
// results of this function in any == expression. It is only safe to
// use the result as an address at which to start executing code.
// use the result as an address at which to start executing code.
//go:nosplit
//go:nosplit
func funcPC(f interface{}) uintptr {
func funcPC(f interface{}) uintptr {
return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
}
}
// called from assembly
// called from assembly
func badmcall(fn func(*g)) {
func badmcall(fn func(*g)) {
throw("runtime: mcall called on m->g0 stack")
throw("runtime: mcall called on m->g0 stack")
}
}
func badmcall2(fn func(*g)) {
func badmcall2(fn func(*g)) {
throw("runtime: mcall function returned")
throw("runtime: mcall function returned")
}
}
func badreflectcall() {
func badreflectcall() {
panic(plainError("arg size to reflect.call more than 1GB"))
panic(plainError("arg size to reflect.call more than 1GB"))
}
}
var badmorestackg0Msg = "fatal: morestack on g0\n"
var badmorestackg0Msg = "fatal: morestack on g0\n"
//go:nosplit
//go:nosplit
//go:nowritebarrierrec
//go:nowritebarrierrec
func badmorestackg0() {
func badmorestackg0() {
sp := stringStructOf(&badmorestackg0Msg)
sp := stringStructOf(&badmorestackg0Msg)
write(2, sp.str, int32(sp.len))
write(2, sp.str, int32(sp.len))
}
}
var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
//go:nosplit
//go:nosplit
//go:nowritebarrierrec
//go:nowritebarrierrec
func badmorestackgsignal() {
func badmorestackgsignal() {
sp := stringStructOf(&badmorestackgsignalMsg)
sp := stringStructOf(&badmorestackgsignalMsg)
write(2, sp.str, int32(sp.len))
write(2, sp.str, int32(sp.len))
}
}
//go:nosplit
//go:nosplit
func badctxt() {
func badctxt() {
throw("ctxt != 0")
throw("ctxt != 0")
}
}
func lockedOSThread() bool {
func lockedOSThread() bool {
gp := getg()
gp := getg()
return gp.lockedm != 0 && gp.m.lockedg != 0
return gp.lockedm != 0 && gp.m.lockedg != 0
}
}
var (
var (
allgs []*g
allgs []*g
allglock mutex
allglock mutex
)
)
func allgadd(gp *g) {
func allgadd(gp *g) {
if readgstatus(gp) == _Gidle {
if readgstatus(gp) == _Gidle {
throw("allgadd: bad status Gidle")
throw("allgadd: bad status Gidle")
}
}
lock(&allglock)
lock(&allglock)
allgs = append(allgs, gp)
allgs = append(allgs, gp)
allglen = uintptr(len(allgs))
allglen = uintptr(len(allgs))
unlock(&allglock)
unlock(&allglock)
}
}
const (
const (
// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
_GoidCacheBatch = 16
_GoidCacheBatch = 16
)
)
// cpuinit extracts the environment variable GODEBUG from the environment on
// cpuinit extracts the environment variable GODEBUG from the environment on
// Unix-like operating systems and calls internal/cpu.Initialize.
// Unix-like operating systems and calls internal/cpu.Initialize.
func cpuinit() {
func cpuinit() {
const prefix = "GODEBUG="
const prefix = "GODEBUG="
var env string
var env string
switch GOOS {
switch GOOS {
case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "solaris", "linux":
case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "solaris", "linux":
cpu.DebugOptions = true
cpu.DebugOptions = true
// Similar to goenv_unix but extracts the environment value for
// Similar to goenv_unix but extracts the environment value for
// GODEBUG directly.
// GODEBUG directly.
// TODO(moehrmann): remove when general goenvs() can be called before cpuinit()
// TODO(moehrmann): remove when general goenvs() can be called before cpuinit()
n := int32(0)
n := int32(0)
for argv_index(argv, argc+1+n) != nil {
for argv_index(argv, argc+1+n) != nil {
n++
n++
}
}
for i := int32(0); i < n; i++ {
for i := int32(0); i < n; i++ {
p := argv_index(argv, argc+1+i)
p := argv_index(argv, argc+1+i)
s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
if hasPrefix(s, prefix) {
if hasPrefix(s, prefix) {
env = gostring(p)[len(prefix):]
env = gostring(p)[len(prefix):]
break
break
}
}
}
}
}
}
cpu.Initialize(env)
cpu.Initialize(env)
// Support cpu feature variables are used in code generated by the compiler
// Support cpu feature variables are used in code generated by the compiler
// to guard execution of instructions that can not be assumed to be always supported.
// to guard execution of instructions that can not be assumed to be always supported.
x86HasPOPCNT = cpu.X86.HasPOPCNT
x86HasPOPCNT = cpu.X86.HasPOPCNT
x86HasSSE41 = cpu.X86.HasSSE41
x86HasSSE41 = cpu.X86.HasSSE41
arm64HasATOMICS = cpu.ARM64.HasATOMICS
arm64HasATOMICS = cpu.ARM64.HasATOMICS
}
}
// The bootstrap sequence is:
// The bootstrap sequence is:
//
//
// call osinit
// call osinit
// call schedinit
// call schedinit
// make & queue new G
// make & queue new G
// call runtimeĀ·mstart
// call runtimeĀ·mstart
//
//
// The new G calls runtimeĀ·main.
// The new G calls runtimeĀ·main.
func schedinit() {
func schedinit() {
// raceinit must be the first call to race detector.
// raceinit must be the first call to race detector.
// In particular, it must be done before mallocinit below calls racemapshadow.
// In particular, it must be done before mallocinit below calls racemapshadow.
_g_ := getg()
_g_ := getg()
if raceenabled {
if raceenabled {
_g_.racectx, raceprocctx0 = raceinit()
_g_.racectx, raceprocctx0 = raceinit()
}
}
sched.maxmcount = 10000
sched.maxmcount = 10000
tracebackinit()
tracebackinit()
moduledataverify()
moduledataverify()
stackinit()
stackinit()
mallocinit()
mallocinit()
mcommoninit(_g_.m)
mcommoninit(_g_.m)
cpuinit() // must run before alginit
cpuinit() // must run before alginit
alginit() // maps must not be used before this call
alginit() // maps must not be used before this call
modulesinit() // provides activeModules
modulesinit() // provides activeModules
typelinksinit() // uses maps, activeModules
typelinksinit() // uses maps, activeModules
itabsinit() // uses activeModules
itabsinit() // uses activeModules
msigsave(_g_.m)
msigsave(_g_.m)
initSigmask = _g_.m.sigmask
initSigmask = _g_.m.sigmask
goargs()
goargs()
goenvs()
goenvs()
parsedebugvars()
parsedebugvars()
gcinit()
gcinit()
sched.lastpoll = uint64(nanotime())
sched.lastpoll = uint64(nanotime())
procs := ncpu
procs := ncpu
if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
procs = n
procs = n
}
}
if procresize(procs) != nil {
if procresize(procs) != nil {
throw("unknown runnable goroutine during bootstrap")
throw("unknown runnable goroutine during bootstrap")
}
}
// For cgocheck > 1, we turn on the write barrier at all times
// For cgocheck > 1, we turn on the write barrier at all times
// and check all pointer writes. We can't do this until after
// and check all pointer writes. We can't do this until after
// procresize because the write barrier needs a P.
// procresize because the write barrier needs a P.
if debug.cgocheck > 1 {
if debug.cgocheck > 1 {
writeBarrier.cgo = true
writeBarrier.cgo = true
writeBarrier.enabled = true
writeBarrier.enabled = true
for _, p := range allp {
for _, p := range allp {
p.wbBuf.reset()
p.wbBuf.reset()
}
}
}
}
if buildVersion == "" {
if buildVersion == "" {
// Condition should never trigger. This code just serves
// Condition should never trigger. This code just serves
// to ensure runtimeĀ·buildVersion is kept in the resulting binary.
// to ensure runtimeĀ·buildVersion is kept in the resulting binary.
buildVersion = "unknown"
buildVersion = "unknown"
}
}
}
}
func dumpgstatus(gp *g) {
func dumpgstatus(gp *g) {
_g_ := getg()
_g_ := getg()
print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
}
}
func checkmcount() {
func checkmcount() {
// sched lock is held
// sched lock is held
if mcount() > sched.maxmcount {
if mcount() > sched.maxmcount {
print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
throw("thread exhaustion")
throw("thread exhaustion")
}
}
}
}
func mcommoninit(mp *m) {
func mcommoninit(mp *m) {
_g_ := getg()
_g_ := getg()
// g0 stack won't make sense for user (and is not necessary unwindable).
// g0 stack won't make sense for user (and is not necessary unwindable).
if _g_ != _g_.m.g0 {
if _g_ != _g_.m.g0 {
callers(1, mp.createstack[:])
callers(1, mp.createstack[:])
}
}
lock(&sched.lock)
lock(&sched.lock)
if sched.mnext+1 < sched.mnext {
if sched.mnext+1 < sched.mnext {
throw("runtime: thread ID overflow")
throw("runtime: thread ID overflow")
}
}
mp.id = sched.mnext
mp.id = sched.mnext
sched.mnext++
sched.mnext++
checkmcount()
checkmcount()
mp.fastrand[0] = 1597334677 * uint32(mp.id)
mp.fastrand[0] = 1597334677 * uint32(mp.id)
mp.fastrand[1] = uint32(cputicks())
mp.fastrand[1] = uint32(cputicks())
if mp.fastrand[0]|mp.fastrand[1] == 0 {
if mp.fastrand[0]|mp.fastrand[1] == 0 {
mp.fastrand[1] = 1
mp.fastrand[1] = 1
}
}
mpreinit(mp)
mpreinit(mp)
if mp.gsignal != nil {
if mp.gsignal != nil {
mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
}
}
// Add to allm so garbage collector doesn't free g->m
// Add to allm so garbage collector doesn't free g->m
// when it is just in a register or thread-local storage.
// when it is just in a register or thread-local storage.
mp.alllink = allm
mp.alllink = allm
// NumCgoCall() iterates over allm w/o schedlock,
// NumCgoCall() iterates over allm w/o schedlock,
// so we need to publish it safely.
// so we need to publish it safely.
atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
unlock(&sched.lock)
unlock(&sched.lock)
// Allocate memory to hold a cgo traceback if the cgo call crashes.
// Allocate memory to hold a cgo traceback if the cgo call crashes.
if iscgo || GOOS == "solaris" || GOOS == "windows" {
if iscgo || GOOS == "solaris" || GOOS == "windows" {
mp.cgoCallers = new(cgoCallers)
mp.cgoCallers = new(cgoCallers)
}
}
}
}
// Mark gp ready to run.
// Mark gp ready to run.
func ready(gp *g, traceskip int, next bool) {
func ready(gp *g, traceskip int, next bool) {
if trace.enabled {
if trace.enabled {
traceGoUnpark(gp, traceskip)
traceGoUnpark(gp, traceskip)
}
}
status := readgstatus(gp)
status := readgstatus(gp)
// Mark runnable.
// Mark runnable.
_g_ := getg()
_g_ := getg()
_g_.m.locks++ // disable preemption because it can be holding p in a local var
_g_.m.locks++ // disable preemption because it can be holding p in a local var
if status&^_Gscan != _Gwaiting {
if status&^_Gscan != _Gwaiting {
dumpgstatus(gp)
dumpgstatus(gp)
throw("bad g->status in ready")
throw("bad g->status in ready")
}
}
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
casgstatus(gp, _Gwaiting, _Grunnable)
casgstatus(gp, _Gwaiting, _Grunnable)
runqput(_g_.m.p.ptr(), gp, next)
runqput(_g_.m.p.ptr(), gp, next)
if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
wakep()
wakep()
}
}
_g_.m.locks--
_g_.m.locks--
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack
_g_.stackguard0 = stackPreempt
_g_.stackguard0 = stackPreempt
}
}
}
}
// freezeStopWait is a large value that freezetheworld sets
// freezeStopWait is a large value that freezetheworld sets
// sched.stopwait to in order to request that all Gs permanently stop.
// sched.stopwait to in order to request that all Gs permanently stop.
const freezeStopWait = 0x7fffffff
const freezeStopWait = 0x7fffffff
// freezing is set to non-zero if the runtime is trying to freeze the
// freezing is set to non-zero if the runtime is trying to freeze the
// world.
// world.
var freezing uint32
var freezing uint32
// Similar to stopTheWorld but best-effort and can be called several times.
// Similar to stopTheWorld but best-effort and can be called several times.
// There is no reverse operation, used during crashing.
// There is no reverse operation, used during crashing.
// This function must not lock any mutexes.
// This function must not lock any mutexes.
func freezetheworld() {
func freezetheworld() {
atomic.Store(&freezing, 1)
atomic.Store(&freezing, 1)
// stopwait and preemption requests can be lost
// stopwait and preemption requests can be lost
// due to races with concurrently executing threads,
// due to races with concurrently executing threads,
// so try several times
// so try several times
for i := 0; i < 5; i++ {
for i := 0; i < 5; i++ {
// this should tell the scheduler to not start any new goroutines
// this should tell the scheduler to not start any new goroutines
sched.stopwait = freezeStopWait
sched.stopwait = freezeStopWait
atomic.Store(&sched.gcwaiting, 1)
atomic.Store(&sched.gcwaiting, 1)
// this should stop running goroutines
// this should stop running goroutines
if !preemptall() {
if !preemptall() {
break // no running goroutines
break // no running goroutines
}
}
usleep(1000)
usleep(1000)
}
}
// to be sure
// to be sure
usleep(1000)
usleep(1000)
preemptall()
preemptall()
usleep(1000)
usleep(1000)
}
}
func isscanstatus(status uint32) bool {
func isscanstatus(status uint32) bool {
if status == _Gscan {
if status == _Gscan {
throw("isscanstatus: Bad status Gscan")
throw("isscanstatus: Bad status Gscan")
}
}
return status&_Gscan == _Gscan
return status&_Gscan == _Gscan
}
}
// All reads and writes of g's status go through readgstatus, casgstatus
// All reads and writes of g's status go through readgstatus, casgstatus
// castogscanstatus, casfrom_Gscanstatus.
// castogscanstatus, casfrom_Gscanstatus.
//go:nosplit
//go:nosplit
func readgstatus(gp *g) uint32 {
func readgstatus(gp *g) uint32 {
return atomic.Load(&gp.atomicstatus)
return atomic.Load(&gp.atomicstatus)
}
}
// Ownership of gcscanvalid:
// Ownership of gcscanvalid:
//
//
// If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
// If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
// then gp owns gp.gcscanvalid, and other goroutines must not modify it.
// then gp owns gp.gcscanvalid, and other goroutines must not modify it.
//
//
// Otherwise, a second goroutine can lock the scan state by setting _Gscan
// Otherwise, a second goroutine can lock the scan state by setting _Gscan
// in the status bit and then modify gcscanvalid, and then unlock the scan state.
// in the status bit and then modify gcscanvalid, and then unlock the scan state.
//
//
// Note that the first condition implies an exception to the second:
// Note that the first condition implies an exception to the second:
// if a second goroutine changes gp's status to _Grunning|_Gscan,
// if a second goroutine changes gp's status to _Grunning|_Gscan,
// that second goroutine still does not have the right to modify gcscanvalid.
// that second goroutine still does not have the right to modify gcscanvalid.
// The Gscanstatuses are acting like locks and this releases them.
// The Gscanstatuses are acting like locks and this releases them.
// If it proves to be a performance hit we should be able to make these
// If it proves to be a performance hit we should be able to make these
// simple atomic stores but for now we are going to throw if
// simple atomic stores but for now we are going to throw if
// we see an inconsistent state.
// we see an inconsistent state.
func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
success := false
success := false
// Check that transition is valid.
// Check that transition is valid.
switch oldval {
switch oldval {
default:
default:
print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
dumpgstatus(gp)
dumpgstatus(gp)
throw("casfrom_Gscanstatus:top gp->status is not in scan state")
throw("casfrom_Gscanstatus:top gp->status is not in scan state")
case _Gscanrunnable,
case _Gscanrunnable,
_Gscanwaiting,
_Gscanwaiting,
_Gscanrunning,
_Gscanrunning,
_Gscansyscall:
_Gscansyscall:
if newval == oldval&^_Gscan {
if newval == oldval&^_Gscan {
success = atomic.Cas(&gp.atomicstatus, oldval, newval)
success = atomic.Cas(&gp.atomicstatus, oldval, newval)
}
}
}
}
if !success {
if !success {
print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
dumpgstatus(gp)
dumpgstatus(gp)
throw("casfrom_Gscanstatus: gp->status is not in scan state")
throw("casfrom_Gscanstatus: gp->status is not in scan state")
}
}
}
}
// This will return false if the gp is not in the expected status and the cas fails.
// This will return false if the gp is not in the expected status and the cas fails.
// This acts like a lock acquire while the casfromgstatus acts like a lock release.
// This acts like a lock acquire while the casfromgstatus acts like a lock release.
func castogscanstatus(gp *g, oldval, newval uint32) bool {
func castogscanstatus(gp *g, oldval, newval uint32) bool {
switch oldval {
switch oldval {
case _Grunnable,
case _Grunnable,
_Grunning,
_Grunning,
_Gwaiting,
_Gwaiting,
_Gsyscall:
_Gsyscall:
if newval == oldval|_Gscan {
if newval == oldval|_Gscan {
return atomic.Cas(&gp.atomicstatus, oldval, newval)
return atomic.Cas(&gp.atomicstatus, oldval, newval)
}
}
}
}
print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
throw("castogscanstatus")
throw("castogscanstatus")
panic("not reached")
panic("not reached")
}
}
// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
// and casfrom_Gscanstatus instead.
// and casfrom_Gscanstatus instead.
// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
// put it in the Gscan state is finished.
// put it in the Gscan state is finished.
//go:nosplit
//go:nosplit
func casgstatus(gp *g, oldval, newval uint32) {
func casgstatus(gp *g, oldval, newval uint32) {
if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
systemstack(func() {
systemstack(func() {
print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
throw("casgstatus: bad incoming values")
throw("casgstatus: bad incoming values")
})
})
}
}
if oldval == _Grunning && gp.gcscanvalid {
if oldval == _Grunning && gp.gcscanvalid {
// If oldvall == _Grunning, then the actual status must be
// If oldvall == _Grunning, then the actual status must be
// _Grunning or _Grunning|_Gscan; either way,
// _Grunning or _Grunning|_Gscan; either way,
// we own gp.gcscanvalid, so it's safe to read.
// we own gp.gcscanvalid, so it's safe to read.
// gp.gcscanvalid must not be true when we are running.
// gp.gcscanvalid must not be true when we are running.
systemstack(func() {
systemstack(func() {
print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
throw("casgstatus")
throw("casgstatus")
})
})
}
}
// See https://golang.org/cl/21503 for justification of the yield delay.
// See https://golang.org/cl/21503 for justification of the yield delay.
const yieldDelay = 5 * 1000
const yieldDelay = 5 * 1000
var nextYield int64
var nextYield int64
// loop if gp->atomicstatus is in a scan state giving
// loop if gp->atomicstatus is in a scan state giving
// GC time to finish and change the state to oldval.
// GC time to finish and change the state to oldval.
for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
throw("casgstatus: waiting for Gwaiting but is Grunnable")
throw("casgstatus: waiting for Gwaiting but is Grunnable")
}
}
// Help GC if needed.
// Help GC if needed.
// if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
// if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
// gp.preemptscan = false
// gp.preemptscan = false
// systemstack(func() {
// systemstack(func() {
// gcphasework(gp)
// gcphasework(gp)
// })
// })
// }
// }
// But meanwhile just yield.
// But meanwhile just yield.
if i == 0 {
if i == 0 {
nextYield = nanotime() + yieldDelay
nextYield = nanotime() + yieldDelay
}
}
if nanotime() < nextYield {
if nanotime() < nextYield {
for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
procyield(1)
procyield(1)
}
}
} else {
} else {
osyield()
osyield()
nextYield = nanotime() + yieldDelay/2
nextYield = nanotime() + yieldDelay/2
}
}
}
}
if newval == _Grunning {
if newval == _Grunning {
gp.gcscanvalid = false
gp.gcscanvalid = false
}
}
}
}
// casg
// casg