diff --git a/cmd/geth/attach_test.go b/cmd/geth/attach_test.go index 7c5f95175..91007ccf6 100644 --- a/cmd/geth/attach_test.go +++ b/cmd/geth/attach_test.go @@ -61,7 +61,7 @@ func TestRemoteDbWithHeaders(t *testing.T) { } func testReceiveHeaders(t *testing.T, ln net.Listener, gethArgs ...string) { - var ok uint32 + var ok atomic.Uint32 server := &http.Server{ Addr: "localhost:0", Handler: &testHandler{func(w http.ResponseWriter, r *http.Request) { @@ -72,12 +72,12 @@ func testReceiveHeaders(t *testing.T, ln net.Listener, gethArgs ...string) { if have, want := r.Header.Get("second"), "two"; have != want { t.Fatalf("missing header, have %v want %v", have, want) } - atomic.StoreUint32(&ok, 1) + ok.Store(1) }}} go server.Serve(ln) defer server.Close() runGeth(t, gethArgs...).WaitExit() - if atomic.LoadUint32(&ok) != 1 { + if ok.Load() != 1 { t.Fatal("Test fail, expected invocation to succeed") } } diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index b27ce5571..41591ac13 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -261,16 +261,16 @@ func importChain(ctx *cli.Context) error { defer db.Close() // Start periodically gathering memory profiles - var peakMemAlloc, peakMemSys uint64 + var peakMemAlloc, peakMemSys atomic.Uint64 go func() { stats := new(runtime.MemStats) for { runtime.ReadMemStats(stats) - if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { - atomic.StoreUint64(&peakMemAlloc, stats.Alloc) + if peakMemAlloc.Load() < stats.Alloc { + peakMemAlloc.Store(stats.Alloc) } - if atomic.LoadUint64(&peakMemSys) < stats.Sys { - atomic.StoreUint64(&peakMemSys, stats.Sys) + if peakMemSys.Load() < stats.Sys { + peakMemSys.Store(stats.Sys) } time.Sleep(5 * time.Second) } @@ -303,8 +303,8 @@ func importChain(ctx *cli.Context) error { mem := new(runtime.MemStats) runtime.ReadMemStats(mem) - fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) - fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) + fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(peakMemAlloc.Load())/1024/1024) + fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(peakMemSys.Load())/1024/1024) fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) diff --git a/cmd/geth/les_test.go b/cmd/geth/les_test.go index 746f12c76..b36c3265a 100644 --- a/cmd/geth/les_test.go +++ b/cmd/geth/les_test.go @@ -107,10 +107,10 @@ func ipcEndpoint(ipcPath, datadir string) string { // but windows require pipes to sit in "\\.\pipe\". Therefore, to run several // nodes simultaneously, we need to distinguish between them, which we do by // the pipe filename instead of folder. -var nextIPC = uint32(0) +var nextIPC atomic.Uint32 func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { - ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1)) + ipcName := fmt.Sprintf("geth-%d.ipc", nextIPC.Add(1)) args = append([]string{"--networkid=42", "--port=0", "--authrpc.port", "0", "--ipcpath", ipcName}, args...) t.Logf("Starting %v with rpc: %v", name, args)