diff options
author | Ondřej Surý <ondrej@sury.org> | 2012-04-06 15:14:11 +0200 |
---|---|---|
committer | Ondřej Surý <ondrej@sury.org> | 2012-04-06 15:14:11 +0200 |
commit | 505c19580e0f43fe5224431459cacb7c21edd93d (patch) | |
tree | 79e2634c253d60afc0cc0b2f510dc7dcbb48497b /src/pkg/testing/benchmark.go | |
parent | 1336a7c91e596c423a49d1194ea42d98bca0d958 (diff) | |
download | golang-505c19580e0f43fe5224431459cacb7c21edd93d.tar.gz |
Imported Upstream version 1upstream/1
Diffstat (limited to 'src/pkg/testing/benchmark.go')
-rw-r--r-- | src/pkg/testing/benchmark.go | 127 |
1 files changed, 92 insertions, 35 deletions
diff --git a/src/pkg/testing/benchmark.go b/src/pkg/testing/benchmark.go index fd0bd8665..41290594e 100644 --- a/src/pkg/testing/benchmark.go +++ b/src/pkg/testing/benchmark.go @@ -16,7 +16,7 @@ var matchBenchmarks = flag.String("test.bench", "", "regular expression to selec var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds") // An internal type but exported because it is cross-package; part of the implementation -// of gotest. +// of the "go test" command. type InternalBenchmark struct { Name string F func(b *B) @@ -25,19 +25,21 @@ type InternalBenchmark struct { // B is a type passed to Benchmark functions to manage benchmark // timing and to specify the number of iterations to run. type B struct { + common N int benchmark InternalBenchmark - ns int64 bytes int64 - start int64 + timerOn bool + result BenchmarkResult } // StartTimer starts timing a test. This function is called automatically // before a benchmark starts, but it can also used to resume timing after // a call to StopTimer. func (b *B) StartTimer() { - if b.start == 0 { - b.start = time.Nanoseconds() + if !b.timerOn { + b.start = time.Now() + b.timerOn = true } } @@ -45,19 +47,19 @@ func (b *B) StartTimer() { // while performing complex initialization that you don't // want to measure. func (b *B) StopTimer() { - if b.start > 0 { - b.ns += time.Nanoseconds() - b.start + if b.timerOn { + b.duration += time.Now().Sub(b.start) + b.timerOn = false } - b.start = 0 } // ResetTimer sets the elapsed benchmark time to zero. // It does not affect whether the timer is running. func (b *B) ResetTimer() { - if b.start > 0 { - b.start = time.Nanoseconds() + if b.timerOn { + b.start = time.Now() } - b.ns = 0 + b.duration = 0 } // SetBytes records the number of bytes processed in a single operation. @@ -68,7 +70,7 @@ func (b *B) nsPerOp() int64 { if b.N <= 0 { return 0 } - return b.ns / int64(b.N) + return b.duration.Nanoseconds() / int64(b.N) } // runN runs a single benchmark for the specified number of iterations. @@ -125,23 +127,38 @@ func roundUp(n int) int { return 10 * base } -// run times the benchmark function. It gradually increases the number +// run times the benchmark function in a separate goroutine. +func (b *B) run() BenchmarkResult { + go b.launch() + <-b.signal + return b.result +} + +// launch launches the benchmark function. It gradually increases the number // of benchmark iterations until the benchmark runs for a second in order // to get a reasonable measurement. It prints timing information in this form // testing.BenchmarkHello 100000 19 ns/op -func (b *B) run() BenchmarkResult { +// launch is run by the fun function as a separate goroutine. +func (b *B) launch() { // Run the benchmark for a single iteration in case it's expensive. n := 1 + + // Signal that we're done whether we return normally + // or by FailNow's runtime.Goexit. + defer func() { + b.signal <- b + }() + b.runN(n) // Run the benchmark for at least the specified amount of time. - time := int64(*benchTime * 1e9) - for b.ns < time && n < 1e9 { + d := time.Duration(*benchTime * float64(time.Second)) + for !b.failed && b.duration < d && n < 1e9 { last := n // Predict iterations/sec. if b.nsPerOp() == 0 { n = 1e9 } else { - n = int(time / b.nsPerOp()) + n = int(d.Nanoseconds() / b.nsPerOp()) } // Run more iterations than we think we'll need for a second (1.5x). // Don't grow too fast in case we had timing errors previously. @@ -151,28 +168,28 @@ func (b *B) run() BenchmarkResult { n = roundUp(n) b.runN(n) } - return BenchmarkResult{b.N, b.ns, b.bytes} + b.result = BenchmarkResult{b.N, b.duration, b.bytes} } // The results of a benchmark run. type BenchmarkResult struct { - N int // The number of iterations. - Ns int64 // The total time taken. - Bytes int64 // Bytes processed in one iteration. + N int // The number of iterations. + T time.Duration // The total time taken. + Bytes int64 // Bytes processed in one iteration. } func (r BenchmarkResult) NsPerOp() int64 { if r.N <= 0 { return 0 } - return r.Ns / int64(r.N) + return r.T.Nanoseconds() / int64(r.N) } func (r BenchmarkResult) mbPerSec() float64 { - if r.Bytes <= 0 || r.Ns <= 0 || r.N <= 0 { + if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 { return 0 } - return float64(r.Bytes) * float64(r.N) / float64(r.Ns) * 1e3 + return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds() } func (r BenchmarkResult) String() string { @@ -187,17 +204,17 @@ func (r BenchmarkResult) String() string { // The format specifiers here make sure that // the ones digits line up for all three possible formats. if nsop < 10 { - ns = fmt.Sprintf("%13.2f ns/op", float64(r.Ns)/float64(r.N)) + ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N)) } else { - ns = fmt.Sprintf("%12.1f ns/op", float64(r.Ns)/float64(r.N)) + ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N)) } } return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb) } // An internal function but exported because it is cross-package; part of the implementation -// of gotest. -func RunBenchmarks(matchString func(pat, str string) (bool, os.Error), benchmarks []InternalBenchmark) { +// of the "go test" command. +func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) { // If no flag was specified, don't run benchmarks. if len(*matchBenchmarks) == 0 { return @@ -205,7 +222,7 @@ func RunBenchmarks(matchString func(pat, str string) (bool, os.Error), benchmark for _, Benchmark := range benchmarks { matched, err := matchString(*matchBenchmarks, Benchmark.Name) if err != nil { - println("invalid regexp for -test.bench:", err.String()) + fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.bench: %s\n", err) os.Exit(1) } if !matched { @@ -213,24 +230,64 @@ func RunBenchmarks(matchString func(pat, str string) (bool, os.Error), benchmark } for _, procs := range cpuList { runtime.GOMAXPROCS(procs) - b := &B{benchmark: Benchmark} + b := &B{ + common: common{ + signal: make(chan interface{}), + }, + benchmark: Benchmark, + } benchName := Benchmark.Name if procs != 1 { benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs) } - print(fmt.Sprintf("%s\t", benchName)) + fmt.Printf("%s\t", benchName) r := b.run() - print(fmt.Sprintf("%v\n", r)) + if b.failed { + // The output could be very long here, but probably isn't. + // We print it all, regardless, because we don't want to trim the reason + // the benchmark failed. + fmt.Printf("--- FAIL: %s\n%s", benchName, b.output) + continue + } + fmt.Printf("%v\n", r) + // Unlike with tests, we ignore the -chatty flag and always print output for + // benchmarks since the output generation time will skew the results. + if len(b.output) > 0 { + b.trimOutput() + fmt.Printf("--- BENCH: %s\n%s", benchName, b.output) + } if p := runtime.GOMAXPROCS(-1); p != procs { - print(fmt.Sprintf("%s left GOMAXPROCS set to %d\n", benchName, p)) + fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p) + } + } + } +} + +// trimOutput shortens the output from a benchmark, which can be very long. +func (b *B) trimOutput() { + // The output is likely to appear multiple times because the benchmark + // is run multiple times, but at least it will be seen. This is not a big deal + // because benchmarks rarely print, but just in case, we trim it if it's too long. + const maxNewlines = 10 + for nlCount, j := 0, 0; j < len(b.output); j++ { + if b.output[j] == '\n' { + nlCount++ + if nlCount >= maxNewlines { + b.output = append(b.output[:j], "\n\t... [output truncated]\n"...) + break } } } } // Benchmark benchmarks a single function. Useful for creating -// custom benchmarks that do not use gotest. +// custom benchmarks that do not use the "go test" command. func Benchmark(f func(b *B)) BenchmarkResult { - b := &B{benchmark: InternalBenchmark{"", f}} + b := &B{ + common: common{ + signal: make(chan interface{}), + }, + benchmark: InternalBenchmark{"", f}, + } return b.run() } |