summaryrefslogtreecommitdiff
path: root/src/pkg/testing
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/testing')
-rw-r--r--src/pkg/testing/allocs.go41
-rw-r--r--src/pkg/testing/benchmark.go84
-rw-r--r--src/pkg/testing/example.go80
-rw-r--r--src/pkg/testing/testing.go212
4 files changed, 313 insertions, 104 deletions
diff --git a/src/pkg/testing/allocs.go b/src/pkg/testing/allocs.go
new file mode 100644
index 000000000..d142a330b
--- /dev/null
+++ b/src/pkg/testing/allocs.go
@@ -0,0 +1,41 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "runtime"
+)
+
+// AllocsPerRun returns the average number of allocations during calls to f.
+//
+// To compute the number of allocations, the function will first be run once as
+// a warm-up. The average number of allocations over the specified number of
+// runs will then be measured and returned.
+//
+// AllocsPerRun sets GOMAXPROCS to 1 during its measurement and will restore
+// it before returning.
+func AllocsPerRun(runs int, f func()) (avg float64) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+
+ // Warm up the function
+ f()
+
+ // Measure the starting statistics
+ var memstats runtime.MemStats
+ runtime.ReadMemStats(&memstats)
+ mallocs := 0 - memstats.Mallocs
+
+ // Run the function the specified number of times
+ for i := 0; i < runs; i++ {
+ f()
+ }
+
+ // Read the final statistics
+ runtime.ReadMemStats(&memstats)
+ mallocs += memstats.Mallocs
+
+ // Average the mallocs over the runs (not counting the warm-up)
+ return float64(mallocs) / float64(runs)
+}
diff --git a/src/pkg/testing/benchmark.go b/src/pkg/testing/benchmark.go
index 41290594e..25fb2d619 100644
--- a/src/pkg/testing/benchmark.go
+++ b/src/pkg/testing/benchmark.go
@@ -9,11 +9,19 @@ import (
"fmt"
"os"
"runtime"
+ "sync"
"time"
)
var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
-var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
+var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
+var benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
+
+// Global lock to ensure only one benchmark runs at a time.
+var benchmarkLock sync.Mutex
+
+// Used for every benchmark for measuring memory.
+var memStats runtime.MemStats
// An internal type but exported because it is cross-package; part of the implementation
// of the "go test" command.
@@ -26,11 +34,18 @@ type InternalBenchmark struct {
// timing and to specify the number of iterations to run.
type B struct {
common
- N int
- benchmark InternalBenchmark
- bytes int64
- timerOn bool
- result BenchmarkResult
+ N int
+ benchmark InternalBenchmark
+ bytes int64
+ timerOn bool
+ showAllocResult bool
+ result BenchmarkResult
+ // The initial states of memStats.Mallocs and memStats.TotalAlloc.
+ startAllocs uint64
+ startBytes uint64
+ // The net total of this test after being run.
+ netAllocs uint64
+ netBytes uint64
}
// StartTimer starts timing a test. This function is called automatically
@@ -38,6 +53,9 @@ type B struct {
// a call to StopTimer.
func (b *B) StartTimer() {
if !b.timerOn {
+ runtime.ReadMemStats(&memStats)
+ b.startAllocs = memStats.Mallocs
+ b.startBytes = memStats.TotalAlloc
b.start = time.Now()
b.timerOn = true
}
@@ -49,6 +67,9 @@ func (b *B) StartTimer() {
func (b *B) StopTimer() {
if b.timerOn {
b.duration += time.Now().Sub(b.start)
+ runtime.ReadMemStats(&memStats)
+ b.netAllocs += memStats.Mallocs - b.startAllocs
+ b.netBytes += memStats.TotalAlloc - b.startBytes
b.timerOn = false
}
}
@@ -57,15 +78,27 @@ func (b *B) StopTimer() {
// It does not affect whether the timer is running.
func (b *B) ResetTimer() {
if b.timerOn {
+ runtime.ReadMemStats(&memStats)
+ b.startAllocs = memStats.Mallocs
+ b.startBytes = memStats.TotalAlloc
b.start = time.Now()
}
b.duration = 0
+ b.netAllocs = 0
+ b.netBytes = 0
}
// SetBytes records the number of bytes processed in a single operation.
// If this is called, the benchmark will report ns/op and MB/s.
func (b *B) SetBytes(n int64) { b.bytes = n }
+// ReportAllocs enables malloc statistics for this benchmark.
+// It is equivalent to setting -test.benchmem, but it only affects the
+// benchmark function that calls ReportAllocs.
+func (b *B) ReportAllocs() {
+ b.showAllocResult = true
+}
+
func (b *B) nsPerOp() int64 {
if b.N <= 0 {
return 0
@@ -75,6 +108,8 @@ func (b *B) nsPerOp() int64 {
// runN runs a single benchmark for the specified number of iterations.
func (b *B) runN(n int) {
+ benchmarkLock.Lock()
+ defer benchmarkLock.Unlock()
// Try to get a comparable environment for each run
// by clearing garbage from previous runs.
runtime.GC()
@@ -151,7 +186,7 @@ func (b *B) launch() {
b.runN(n)
// Run the benchmark for at least the specified amount of time.
- d := time.Duration(*benchTime * float64(time.Second))
+ d := *benchTime
for !b.failed && b.duration < d && n < 1e9 {
last := n
// Predict iterations/sec.
@@ -168,14 +203,16 @@ func (b *B) launch() {
n = roundUp(n)
b.runN(n)
}
- b.result = BenchmarkResult{b.N, b.duration, b.bytes}
+ b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes}
}
// The results of a benchmark run.
type BenchmarkResult struct {
- N int // The number of iterations.
- T time.Duration // The total time taken.
- Bytes int64 // Bytes processed in one iteration.
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+ MemAllocs uint64 // The total number of memory allocations.
+ MemBytes uint64 // The total number of bytes allocated.
}
func (r BenchmarkResult) NsPerOp() int64 {
@@ -192,6 +229,20 @@ func (r BenchmarkResult) mbPerSec() float64 {
return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
}
+func (r BenchmarkResult) AllocsPerOp() int64 {
+ if r.N <= 0 {
+ return 0
+ }
+ return int64(r.MemAllocs) / int64(r.N)
+}
+
+func (r BenchmarkResult) AllocedBytesPerOp() int64 {
+ if r.N <= 0 {
+ return 0
+ }
+ return int64(r.MemBytes) / int64(r.N)
+}
+
func (r BenchmarkResult) String() string {
mbs := r.mbPerSec()
mb := ""
@@ -212,6 +263,11 @@ func (r BenchmarkResult) String() string {
return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
}
+func (r BenchmarkResult) MemString() string {
+ return fmt.Sprintf("%8d B/op\t%8d allocs/op",
+ r.AllocedBytesPerOp(), r.AllocsPerOp())
+}
+
// An internal function but exported because it is cross-package; part of the implementation
// of the "go test" command.
func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
@@ -249,7 +305,11 @@ func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks [
fmt.Printf("--- FAIL: %s\n%s", benchName, b.output)
continue
}
- fmt.Printf("%v\n", r)
+ results := r.String()
+ if *benchmarkMemory || b.showAllocResult {
+ results += "\t" + r.MemString()
+ }
+ fmt.Println(results)
// Unlike with tests, we ignore the -chatty flag and always print output for
// benchmarks since the output generation time will skew the results.
if len(b.output) > 0 {
diff --git a/src/pkg/testing/example.go b/src/pkg/testing/example.go
index 671c79876..828c2d3ed 100644
--- a/src/pkg/testing/example.go
+++ b/src/pkg/testing/example.go
@@ -24,8 +24,6 @@ func RunExamples(matchString func(pat, str string) (bool, error), examples []Int
var eg InternalExample
- stdout, stderr := os.Stdout, os.Stderr
-
for _, eg = range examples {
matched, err := matchString(*match, eg.Name)
if err != nil {
@@ -35,48 +33,68 @@ func RunExamples(matchString func(pat, str string) (bool, error), examples []Int
if !matched {
continue
}
- if *chatty {
- fmt.Printf("=== RUN: %s\n", eg.Name)
+ if !runExample(eg) {
+ ok = false
}
+ }
+
+ return
+}
+
+func runExample(eg InternalExample) (ok bool) {
+ if *chatty {
+ fmt.Printf("=== RUN: %s\n", eg.Name)
+ }
- // capture stdout and stderr
- r, w, err := os.Pipe()
+ // Capture stdout.
+ stdout := os.Stdout
+ r, w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ os.Stdout = w
+ outC := make(chan string)
+ go func() {
+ buf := new(bytes.Buffer)
+ _, err := io.Copy(buf, r)
+ r.Close()
if err != nil {
- fmt.Fprintln(os.Stderr, err)
+ fmt.Fprintf(os.Stderr, "testing: copying pipe: %v\n", err)
os.Exit(1)
}
- os.Stdout, os.Stderr = w, w
- outC := make(chan string)
- go func() {
- buf := new(bytes.Buffer)
- _, err := io.Copy(buf, r)
- if err != nil {
- fmt.Fprintf(stderr, "testing: copying pipe: %v\n", err)
- os.Exit(1)
- }
- outC <- buf.String()
- }()
+ outC <- buf.String()
+ }()
+
+ start := time.Now()
+ ok = true
- // run example
- t0 := time.Now()
- eg.F()
- dt := time.Now().Sub(t0)
+ // Clean up in a deferred call so we can recover if the example panics.
+ defer func() {
+ d := time.Now().Sub(start)
- // close pipe, restore stdout/stderr, get output
+ // Close pipe, restore stdout, get output.
w.Close()
- os.Stdout, os.Stderr = stdout, stderr
+ os.Stdout = stdout
out := <-outC
- // report any errors
- tstr := fmt.Sprintf("(%.2f seconds)", dt.Seconds())
- if g, e := strings.TrimSpace(out), strings.TrimSpace(eg.Output); g != e {
- fmt.Printf("--- FAIL: %s %s\ngot:\n%s\nwant:\n%s\n",
- eg.Name, tstr, g, e)
+ var fail string
+ err := recover()
+ if g, e := strings.TrimSpace(out), strings.TrimSpace(eg.Output); g != e && err == nil {
+ fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", g, e)
+ }
+ if fail != "" || err != nil {
+ fmt.Printf("--- FAIL: %s (%v)\n%s", eg.Name, d, fail)
ok = false
} else if *chatty {
- fmt.Printf("--- PASS: %s %s\n", eg.Name, tstr)
+ fmt.Printf("--- PASS: %s (%v)\n", eg.Name, d)
}
- }
+ if err != nil {
+ panic(err)
+ }
+ }()
+ // Run example.
+ eg.F()
return
}
diff --git a/src/pkg/testing/testing.go b/src/pkg/testing/testing.go
index f59ce8ed6..86cd46c29 100644
--- a/src/pkg/testing/testing.go
+++ b/src/pkg/testing/testing.go
@@ -10,10 +10,21 @@
// [a-z]) and serves to identify the test routine.
// These TestXxx routines should be declared within the package they are testing.
//
+// Tests and benchmarks may be skipped if not applicable like this:
+// func TestTimeConsuming(t *testing.T) {
+// if testing.Short() {
+// t.Skip("skipping test in short mode.")
+// }
+// ...
+// }
+//
// Functions of the form
// func BenchmarkXxx(*testing.B)
// are considered benchmarks, and are executed by the "go test" command when
-// the -test.bench flag is provided.
+// the -test.bench flag is provided. Benchmarks are run sequentially.
+//
+// For a description of the testing flags, see
+// http://golang.org/cmd/go/#Description_of_testing_flags.
//
// A sample benchmark function looks like this:
// func BenchmarkHello(b *testing.B) {
@@ -22,26 +33,26 @@
// }
// }
//
+// The benchmark function must run the target code b.N times.
// The benchmark package will vary b.N until the benchmark function lasts
// long enough to be timed reliably. The output
-// testing.BenchmarkHello 10000000 282 ns/op
+// BenchmarkHello 10000000 282 ns/op
// means that the loop ran 10000000 times at a speed of 282 ns per loop.
//
// If a benchmark needs some expensive setup before running, the timer
-// may be stopped:
+// may be reset:
// func BenchmarkBigLen(b *testing.B) {
-// b.StopTimer()
// big := NewBig()
-// b.StartTimer()
+// b.ResetTimer()
// for i := 0; i < b.N; i++ {
// big.Len()
// }
// }
//
// The package also runs and verifies example code. Example functions may
-// include a concluding comment that begins with "Output:" and is compared with
-// the standard output of the function when the tests are run, as in these
-// examples of an example:
+// include a concluding line comment that begins with "Output:" and is compared with
+// the standard output of the function when the tests are run. (The comparison
+// ignores leading and trailing space.) These are examples of an example:
//
// func ExampleHello() {
// fmt.Println("hello")
@@ -79,6 +90,7 @@
package testing
import (
+ "bytes"
"flag"
"fmt"
"os"
@@ -86,6 +98,7 @@ import (
"runtime/pprof"
"strconv"
"strings"
+ "sync"
"time"
)
@@ -98,14 +111,16 @@ var (
short = flag.Bool("test.short", false, "run smaller test suite to save time")
// Report as tests are run; default is silent for success.
- chatty = flag.Bool("test.v", false, "verbose: print additional output")
- match = flag.String("test.run", "", "regular expression to select tests and examples to run")
- memProfile = flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
- memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
- cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
- timeout = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
- cpuListStr = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
- parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
+ chatty = flag.Bool("test.v", false, "verbose: print additional output")
+ match = flag.String("test.run", "", "regular expression to select tests and examples to run")
+ memProfile = flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
+ memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
+ cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
+ blockProfile = flag.String("test.blockprofile", "", "write a goroutine blocking profile to the named file after execution")
+ blockProfileRate = flag.Int("test.blockprofilerate", 1, "if >= 0, calls runtime.SetBlockProfileRate()")
+ timeout = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
+ cpuListStr = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
+ parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
haveExamples bool // are there examples?
@@ -115,8 +130,11 @@ var (
// common holds the elements common between T and B and
// captures common methods such as Errorf.
type common struct {
- output []byte // Output generated by test or benchmark.
- failed bool // Test or benchmark has failed.
+ mu sync.RWMutex // guards output and failed
+ output []byte // Output generated by test or benchmark.
+ failed bool // Test or benchmark has failed.
+ skipped bool // Test of benchmark has been skipped.
+
start time.Time // Time test or benchmark started
duration time.Duration
self interface{} // To be sent on signal channel when done.
@@ -128,37 +146,43 @@ func Short() bool {
return *short
}
-// decorate inserts the final newline if needed and indentation tabs for formatting.
-// If addFileLine is true, it also prefixes the string with the file and line of the call site.
-func decorate(s string, addFileLine bool) string {
- if addFileLine {
- _, file, line, ok := runtime.Caller(3) // decorate + log + public function.
- if ok {
- // Truncate file name at last file name separator.
- if index := strings.LastIndex(file, "/"); index >= 0 {
- file = file[index+1:]
- } else if index = strings.LastIndex(file, "\\"); index >= 0 {
- file = file[index+1:]
- }
- } else {
- file = "???"
- line = 1
+// Verbose reports whether the -test.v flag is set.
+func Verbose() bool {
+ return *chatty
+}
+
+// decorate prefixes the string with the file and line of the call site
+// and inserts the final newline if needed and indentation tabs for formatting.
+func decorate(s string) string {
+ _, file, line, ok := runtime.Caller(3) // decorate + log + public function.
+ if ok {
+ // Truncate file name at last file name separator.
+ if index := strings.LastIndex(file, "/"); index >= 0 {
+ file = file[index+1:]
+ } else if index = strings.LastIndex(file, "\\"); index >= 0 {
+ file = file[index+1:]
}
- s = fmt.Sprintf("%s:%d: %s", file, line, s)
+ } else {
+ file = "???"
+ line = 1
}
- s = "\t" + s // Every line is indented at least one tab.
- n := len(s)
- if n > 0 && s[n-1] != '\n' {
- s += "\n"
- n++
+ buf := new(bytes.Buffer)
+ // Every line is indented at least one tab.
+ buf.WriteByte('\t')
+ fmt.Fprintf(buf, "%s:%d: ", file, line)
+ lines := strings.Split(s, "\n")
+ if l := len(lines); l > 1 && lines[l-1] == "" {
+ lines = lines[:l-1]
}
- for i := 0; i < n-1; i++ { // -1 to avoid final newline
- if s[i] == '\n' {
+ for i, line := range lines {
+ if i > 0 {
// Second and subsequent lines are indented an extra tab.
- return s[0:i+1] + "\t" + decorate(s[i+1:n], false)
+ buf.WriteString("\n\t\t")
}
+ buf.WriteString(line)
}
- return s
+ buf.WriteByte('\n')
+ return buf.String()
}
// T is a type passed to Test functions to manage test state and support formatted test logs.
@@ -170,13 +194,25 @@ type T struct {
}
// Fail marks the function as having failed but continues execution.
-func (c *common) Fail() { c.failed = true }
+func (c *common) Fail() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.failed = true
+}
-// Failed returns whether the function has failed.
-func (c *common) Failed() bool { return c.failed }
+// Failed reports whether the function has failed.
+func (c *common) Failed() bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.failed
+}
// FailNow marks the function as having failed and stops its execution.
// Execution will continue at the next test or benchmark.
+// FailNow must be called from the goroutine running the
+// test or benchmark function, not from other goroutines
+// created during the test. Calling FailNow does not stop
+// those other goroutines.
func (c *common) FailNow() {
c.Fail()
@@ -204,43 +240,80 @@ func (c *common) FailNow() {
// log generates the output. It's always at the same stack depth.
func (c *common) log(s string) {
- c.output = append(c.output, decorate(s, true)...)
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.output = append(c.output, decorate(s)...)
}
-// Log formats its arguments using default formatting, analogous to Println(),
+// Log formats its arguments using default formatting, analogous to Println,
// and records the text in the error log.
func (c *common) Log(args ...interface{}) { c.log(fmt.Sprintln(args...)) }
-// Logf formats its arguments according to the format, analogous to Printf(),
+// Logf formats its arguments according to the format, analogous to Printf,
// and records the text in the error log.
func (c *common) Logf(format string, args ...interface{}) { c.log(fmt.Sprintf(format, args...)) }
-// Error is equivalent to Log() followed by Fail().
+// Error is equivalent to Log followed by Fail.
func (c *common) Error(args ...interface{}) {
c.log(fmt.Sprintln(args...))
c.Fail()
}
-// Errorf is equivalent to Logf() followed by Fail().
+// Errorf is equivalent to Logf followed by Fail.
func (c *common) Errorf(format string, args ...interface{}) {
c.log(fmt.Sprintf(format, args...))
c.Fail()
}
-// Fatal is equivalent to Log() followed by FailNow().
+// Fatal is equivalent to Log followed by FailNow.
func (c *common) Fatal(args ...interface{}) {
c.log(fmt.Sprintln(args...))
c.FailNow()
}
-// Fatalf is equivalent to Logf() followed by FailNow().
+// Fatalf is equivalent to Logf followed by FailNow.
func (c *common) Fatalf(format string, args ...interface{}) {
c.log(fmt.Sprintf(format, args...))
c.FailNow()
}
-// Parallel signals that this test is to be run in parallel with (and only with)
-// other parallel tests in this CPU group.
+// Skip is equivalent to Log followed by SkipNow.
+func (c *common) Skip(args ...interface{}) {
+ c.log(fmt.Sprintln(args...))
+ c.SkipNow()
+}
+
+// Skipf is equivalent to Logf followed by SkipNow.
+func (c *common) Skipf(format string, args ...interface{}) {
+ c.log(fmt.Sprintf(format, args...))
+ c.SkipNow()
+}
+
+// SkipNow marks the test as having been skipped and stops its execution.
+// Execution will continue at the next test or benchmark. See also FailNow.
+// SkipNow must be called from the goroutine running the test, not from
+// other goroutines created during the test. Calling SkipNow does not stop
+// those other goroutines.
+func (c *common) SkipNow() {
+ c.skip()
+ runtime.Goexit()
+}
+
+func (c *common) skip() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.skipped = true
+}
+
+// Skipped reports whether the test was skipped.
+func (c *common) Skipped() bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.skipped
+}
+
+// Parallel signals that this test is to be run in parallel with (and only with)
+// other parallel tests.
func (t *T) Parallel() {
t.signal <- (*T)(nil) // Release main testing loop
<-t.startParallel // Wait for serial tests to finish
@@ -257,7 +330,7 @@ func tRunner(t *T, test *InternalTest) {
t.start = time.Now()
// When this goroutine is done, either because test.F(t)
- // returned normally or because a test failure triggered
+ // returned normally or because a test failure triggered
// a call to runtime.Goexit, record the duration and send
// a signal saying that the test is done.
defer func() {
@@ -297,10 +370,14 @@ func Main(matchString func(pat, str string) (bool, error), tests []InternalTest,
func (t *T) report() {
tstr := fmt.Sprintf("(%.2f seconds)", t.duration.Seconds())
format := "--- %s: %s %s\n%s"
- if t.failed {
+ if t.Failed() {
fmt.Printf(format, "FAIL", t.name, tstr, t.output)
} else if *chatty {
- fmt.Printf(format, "PASS", t.name, tstr, t.output)
+ if t.Skipped() {
+ fmt.Printf(format, "SKIP", t.name, tstr, t.output)
+ } else {
+ fmt.Printf(format, "PASS", t.name, tstr, t.output)
+ }
}
}
@@ -356,7 +433,7 @@ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalT
continue
}
t.report()
- ok = ok && !out.failed
+ ok = ok && !out.Failed()
}
running := 0
@@ -369,7 +446,7 @@ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalT
}
t := (<-collector).(*T)
t.report()
- ok = ok && !t.failed
+ ok = ok && !t.Failed()
running--
}
}
@@ -394,7 +471,9 @@ func before() {
}
// Could save f so after can call f.Close; not worth the effort.
}
-
+ if *blockProfile != "" && *blockProfileRate >= 0 {
+ runtime.SetBlockProfileRate(*blockProfileRate)
+ }
}
// after runs after all testing.
@@ -413,6 +492,17 @@ func after() {
}
f.Close()
}
+ if *blockProfile != "" && *blockProfileRate >= 0 {
+ f, err := os.Create(*blockProfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s", err)
+ return
+ }
+ if err = pprof.Lookup("block").WriteTo(f, 0); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't write %s: %s", *blockProfile, err)
+ }
+ f.Close()
+ }
}
var timer *time.Timer