summaryrefslogtreecommitdiff
path: root/test/bench
diff options
context:
space:
mode:
authordvyukov <dvyukov@google.com>2013-11-19 15:31:01 +0400
committerdvyukov <dvyukov@google.com>2013-11-19 15:31:01 +0400
commitfe64cb876a0c76fc47b7176b12784a4e1c64eb24 (patch)
tree5b7ebdf8ce61d4e3fa4ee5e59939d3260787dcd5 /test/bench
parentb7946e74261232e5cf9280f87326217462f0ec2b (diff)
downloadgo-fe64cb876a0c76fc47b7176b12784a4e1c64eb24.tar.gz
-
Diffstat (limited to 'test/bench')
-rw-r--r--test/bench/perf/bench1.go10
-rw-r--r--test/bench/perf/driver.go58
2 files changed, 48 insertions, 20 deletions
diff --git a/test/bench/perf/bench1.go b/test/bench/perf/bench1.go
index 2f776641e..85c215823 100644
--- a/test/bench/perf/bench1.go
+++ b/test/bench/perf/bench1.go
@@ -4,8 +4,12 @@ import (
"time"
)
-func Benchmark(N int64) error {
- // 13+
+func main() {
+ PerfBenchmark(SleepBenchmark)
+}
+
+func SleepBenchmark(N int64) (metrics []PerfMetric, err error) {
time.Sleep(time.Duration(N) * time.Millisecond)
- return nil
+ metrics = append(metrics, PerfMetric{"foo", 42})
+ return
}
diff --git a/test/bench/perf/driver.go b/test/bench/perf/driver.go
index 00cd66199..0cdd1455a 100644
--- a/test/bench/perf/driver.go
+++ b/test/bench/perf/driver.go
@@ -3,8 +3,9 @@ package main
import (
"flag"
"fmt"
- "os"
+ "log"
"time"
+ "runtime"
)
var (
@@ -13,45 +14,68 @@ var (
benchMem = flag.Int("benchmem", 64, "approx RSS value to aim at in benchmarks, in MB")
)
-type Result struct {
+type PerfResult struct {
N int64
RunTime time.Duration
+ Metrics []PerfMetric
}
-func main() {
- flag.Parse()
- var res Result
+type PerfMetric struct {
+ Type string
+ Val int64
+}
+
+type BenchFunc func(N int64) ([]PerfMetric, error)
+
+func PerfBenchmark(f BenchFunc) {
+ if !flag.Parsed() {
+ flag.Parse()
+ }
+ var res PerfResult
for i := 0; i < *benchNum; i++ {
- res1 := RunBenchmark()
+ res1 := RunBenchmark(f)
if res.RunTime == 0 || res.RunTime > res1.RunTime {
res = res1
}
}
fmt.Printf("GOPERF-METRIC:runtime=%v\n", int64(res.RunTime)/res.N)
+ for _, m := range res.Metrics {
+ fmt.Printf("GOPERF-METRIC:%v=%v\n", m.Type, m.Val)
+ }
}
-func RunBenchmark() Result {
- var res Result
+func RunBenchmark(f BenchFunc) PerfResult {
+ var res PerfResult
for ChooseN(&res) {
- res = RunOnce(res.N)
+ log.Printf("Benchmarking %v iterations\n", res.N)
+ res = RunOnce(f, res.N)
+ log.Printf("Done: %+v\n", res)
}
return res
}
-func RunOnce(N int64) Result {
- fmt.Printf("Benchmarking %v iterations\n", N)
+func RunOnce(f BenchFunc, N int64) PerfResult {
+ runtime.GC()
+ mstats0 := new(runtime.MemStats)
+ runtime.ReadMemStats(mstats0)
+ res := PerfResult{N: N}
+
t0 := time.Now()
- err := Benchmark(N)
+ var err error
+ res.Metrics, err = f(N)
+ res.RunTime = time.Since(t0)
+
if err != nil {
- fmt.Printf("Benchmark function failed: %v\n", err)
- os.Exit(1)
+ log.Fatalf("Benchmark function failed: %v\n", err)
}
- res := Result{N: N}
- res.RunTime = time.Since(t0)
+
+ mstats1 := new(runtime.MemStats)
+ runtime.ReadMemStats(mstats1)
+ fmt.Printf("%+v\n", *mstats1)
return res
}
-func ChooseN(res *Result) bool {
+func ChooseN(res *PerfResult) bool {
const MaxN = 1e12
last := res.N
if last == 0 {