LCOV - code coverage report
Current view: top level - pebble/internal/mkbench - write.go (source / functions) Hit Total Coverage
Test: 2023-10-05 08:17Z d4bf20c5 - tests + meta.lcov Lines: 262 311 84.2 %
Date: 2023-10-05 08:19:23 Functions: 0 0 -

          Line data    Source code
       1             : package main
       2             : 
       3             : import (
       4             :         "bufio"
       5             :         "bytes"
       6             :         "compress/bzip2"
       7             :         "compress/gzip"
       8             :         "encoding/json"
       9             :         "fmt"
      10             :         "io"
      11             :         "math"
      12             :         "os"
      13             :         "path/filepath"
      14             :         "sort"
      15             :         "strings"
      16             :         "time"
      17             : 
      18             :         "github.com/cockroachdb/errors/oserror"
      19             :         "github.com/spf13/cobra"
      20             : )
      21             : 
      22             : // A note to the reader on nomenclature used in this command.
      23             : //
      24             : // The write-throughput benchmark is generated by a roachtest with a number of
      25             : // independent worker VMs running the same benchmark (to allow for an average
      26             : // value to be recorded).
      27             : //
      28             : // An instance of the roachtest on a given day, for a given workload type (e.g.
      29             : // values of size 1024B, values of size 64B, etc.) is modelled as a `writeRun`.
      30             : // Each worker VM in a `writeRun` produces data modelled as a `rawWriteRun`.
      31             : // Each `rawWriteRun` contains the raw data points emitted periodically by the
      32             : // VM and are modelled as `writePoint`s.
      33             : //
      34             : // A `writeWorkload` (i.e. singular) models all data for a particular type of
      35             : // benchmark run (e.g. values of size 1024B), across all days. It is a mapping
      36             : // of day to `writeRun`, which is a collection of `rawWriteRun`s.
      37             : //
      38             : // The `writeWorkloads` (i.e. plural) is a mapping from workload name to its
      39             : // `writeWorkload`.
      40             : //
      41             : // The data can be thought of being modelled as follows:
      42             : //
      43             : //                                     `writeWorkloads`---------\
      44             : // - workload-name-A:                  `writeWorkload`-------\  |
      45             : //   - day-1:                          `writeRun`---------\  |  |
      46             : //      - VM-1:                        `rawWriteRun`----\ |  |  |
      47             : //        [ ... raw data point ... ]   `writePoint`     x |  |  |
      48             : //        ...                                             |  |  |
      49             : //      - VM-N:                                           |  |  |
      50             : //        [ ... raw data point ... ]                      x  |  |
      51             : //     ...                                                   |  |
      52             : //   - day-N:                                                |  |
      53             : //      - VM-1:                                              |  |
      54             : //        [ ... raw data point ... ]                         |  |
      55             : //        ...                                                |  |
      56             : //      - VM-N:                                              |  |
      57             : //        [ ... raw data point ... ]                         x  |
      58             : //   ...                                                        |
      59             : // - workload-name-Z:                                           |
      60             : //   - day-1:                                                   |
      61             : //      - VM-1:                                                 |
      62             : //        [ ... raw data point ... ]                            |
      63             : //        ...                                                   |
      64             : //      - VM-N:                                                 |
      65             : //        [ ... raw data point ... ]                            |
      66             : //     ...                                                      |
      67             : //   - day-N:                                                   |
      68             : //      - VM-1:                                                 |
      69             : //        [ ... raw data point ... ]                            |
      70             : //        ...                                                   |
      71             : //      - VM-N:                                                 |
      72             : //        [ ... raw data point ... ]                            x
      73             : 
      74             : const (
      75             :         // summaryFilename is the filename for the top-level summary output.
      76             :         summaryFilename = "summary.json"
      77             : 
      78             :         // rawRunFmt is the format string for raw benchmark data.
      79             :         rawRunFmt = "BenchmarkRaw%s %d ops/sec %v pass %s elapsed %d bytes %d levels %f writeAmp"
      80             : )
      81             : 
      82           1 : func getWriteCommand() *cobra.Command {
      83           1 :         c := &cobra.Command{
      84           1 :                 Use:   "write",
      85           1 :                 Short: "parse write throughput benchmark data",
      86           1 :                 Long: `
      87           1 : Parses write-throughput benchmark data into two sets of JSON "summary" files:
      88           1 : 
      89           1 : 1. A top-level summary.json file. Data in this file is reported per-day, per
      90           1 : workload (i.e. values=1024, etc.), and is responsible for the top-level
      91           1 : write-throughput visualizations on the Pebble benchmarks page.
      92           1 : 
      93           1 : Each data-point for a time-series contains an ops/sec figure (measured as a
      94           1 : simple average over all data points for that workload run), and a relative path
      95           1 : to a per-run summary JSON file, containing the raw data for the run.
      96           1 : 
      97           1 : 2. A per-run *-summary.json file. Data in this file contains the raw data for
      98           1 : each of the benchmark instances participating in the workload run on the given
      99           1 : day. Each key in the file is the relative path to the original raw data file.
     100           1 : Each data point contains the calculated optimal ops/sec for the instance of the
     101           1 : run (see split.go for more detail on the algorithm), in addition to the raw data
     102           1 : in CSV format.
     103           1 : 
     104           1 : This command can be run without flags at the root of the directory containing
     105           1 : the raw data. By default the raw data will be pulled from "data", and the
     106           1 : resulting top-level and per-run summary files are written to "write-throughput".
     107           1 : Both locations can be overridden with the --data-dir and --summary-dir flags,
     108           1 : respectively.
     109           1 : `,
     110           1 :                 RunE: func(cmd *cobra.Command, args []string) error {
     111           0 :                         dataDir, err := cmd.Flags().GetString("data-dir")
     112           0 :                         if err != nil {
     113           0 :                                 return err
     114           0 :                         }
     115             : 
     116           0 :                         summaryDir, err := cmd.Flags().GetString("summary-dir")
     117           0 :                         if err != nil {
     118           0 :                                 return err
     119           0 :                         }
     120             : 
     121           0 :                         return parseWrite(dataDir, summaryDir)
     122             :                 },
     123             :         }
     124             : 
     125           1 :         c.Flags().String("data-dir", "data", "path to the raw data directory")
     126           1 :         c.Flags().String("summary-dir", "write-throughput", "output directory containing the summary files")
     127           1 :         c.SilenceUsage = true
     128           1 : 
     129           1 :         return c
     130             : }
     131             : 
     132             : // writePoint is a raw datapoint from an individual write-throughput benchmark
     133             : // run.
     134             : type writePoint struct {
     135             :         elapsedSecs int
     136             :         opsSec      int
     137             :         passed      bool
     138             :         size        uint64
     139             :         levels      int
     140             :         writeAmp    float64
     141             : }
     142             : 
     143             : // formatCSV returns a comma-separated string representation of the datapoint.
     144           1 : func (p writePoint) formatCSV() string {
     145           1 :         return fmt.Sprintf(
     146           1 :                 "%d,%d,%v,%d,%d,%.2f",
     147           1 :                 p.elapsedSecs, p.opsSec, p.passed, p.size, p.levels, p.writeAmp)
     148           1 : }
     149             : 
     150             : // rawWriteRun is a collection of datapoints from a single instance of a
     151             : // benchmark run (i.e. datapoints comprising a single roachtest instance of a
     152             : // write-throughput benchmark).
     153             : type rawWriteRun struct {
     154             :         points []writePoint
     155             :         split  int // memoized
     156             : }
     157             : 
     158             : // opsPerSecSplit returns an optimal-split point that divides the passes and
     159             : // fails from the datapoints in a rawWriteRun.
     160           1 : func (r *rawWriteRun) opsPerSecSplit() int {
     161           1 :         if r.split > 0 {
     162           0 :                 return r.split
     163           0 :         }
     164             : 
     165             :         // Pre-process by partitioning the datapoint into passes and fails.
     166           1 :         var passes, fails []int
     167           1 :         for _, p := range r.points {
     168           1 :                 if p.passed {
     169           1 :                         passes = append(passes, p.opsSec)
     170           1 :                 } else {
     171           1 :                         fails = append(fails, p.opsSec)
     172           1 :                 }
     173             :         }
     174             : 
     175             :         // Compute and cache the split point as we only need to calculate it once.
     176           1 :         split := findOptimalSplit(passes, fails)
     177           1 :         r.split = split
     178           1 : 
     179           1 :         return split
     180             : }
     181             : 
     182             : // writeAmp returns the value of the write-amplification at the end of the run.
     183           1 : func (r *rawWriteRun) writeAmp() float64 {
     184           1 :         return r.points[len(r.points)-1].writeAmp
     185           1 : }
     186             : 
     187             : // formatCSV returns a comma-separated string representation of the rawWriteRun.
     188             : // The value itself is a newline-delimited string value comprised of the CSV
     189             : // representation of the individual writePoints.
     190           1 : func (r rawWriteRun) formatCSV() string {
     191           1 :         var b bytes.Buffer
     192           1 :         for _, p := range r.points {
     193           1 :                 _, _ = fmt.Fprintf(&b, "%s\n", p.formatCSV())
     194           1 :         }
     195           1 :         return b.String()
     196             : }
     197             : 
     198             : // writeRunSummary represents a single summary datapoint across all rawWriteRuns
     199             : // that comprise a writeRun. The datapoint contains a summary ops-per-second
     200             : // value, in addition to a path to the summary.json file with the combined data
     201             : // for the run.
     202             : type writeRunSummary struct {
     203             :         Name        string  `json:"name"`
     204             :         Date        string  `json:"date"`
     205             :         OpsSec      int     `json:"opsSec"`
     206             :         WriteAmp    float64 `json:"writeAmp"`
     207             :         SummaryPath string  `json:"summaryPath"`
     208             : }
     209             : 
     210             : // writeWorkloadSummary is an alias for a slice of writeRunSummaries.
     211             : type writeWorkloadSummary []writeRunSummary
     212             : 
     213             : // writeRun is a collection of one or more rawWriteRuns (i.e. the union of all
     214             : // rawWriteRuns from each worker participating in the roachtest cluster used for
     215             : // running the write-throughput benchmarks).
     216             : type writeRun struct {
     217             :         // name is the benchmark workload name (i.e. "values=1024").
     218             :         name string
     219             : 
     220             :         // date is the date on which the writeRun took place.
     221             :         date string
     222             : 
     223             :         // dir is path to the directory containing the raw data. The path is
     224             :         // relative to the data-dir.
     225             :         dir string
     226             : 
     227             :         // rawRuns is a map from input data filename to its rawWriteRun data.
     228             :         rawRuns map[string]rawWriteRun
     229             : }
     230             : 
     231             : // summaryFilename returns the filename to be used for storing the summary
     232             : // output for the writeRun. The filename preserves the original data source path
     233             : // for ease of debugging / data-provenance.
     234           1 : func (r writeRun) summaryFilename() string {
     235           1 :         parts := strings.Split(r.dir, string(os.PathSeparator))
     236           1 :         parts = append(parts, summaryFilename)
     237           1 :         return strings.Join(parts, "-")
     238           1 : }
     239             : 
     240             : // summarize computes a writeRunSummary datapoint for the writeRun.
     241           1 : func (r writeRun) summarize() writeRunSummary {
     242           1 :         var (
     243           1 :                 sumOpsSec   int
     244           1 :                 sumWriteAmp float64
     245           1 :         )
     246           1 :         for _, rr := range r.rawRuns {
     247           1 :                 sumOpsSec += rr.opsPerSecSplit()
     248           1 :                 sumWriteAmp += rr.writeAmp()
     249           1 :         }
     250           1 :         l := len(r.rawRuns)
     251           1 : 
     252           1 :         return writeRunSummary{
     253           1 :                 Name:        r.name,
     254           1 :                 Date:        r.date,
     255           1 :                 SummaryPath: r.summaryFilename(),
     256           1 :                 // Calculate an average across all raw runs in this run.
     257           1 :                 // TODO(travers): test how this works in practice, after we have
     258           1 :                 // gathered enough data.
     259           1 :                 OpsSec:   sumOpsSec / l,
     260           1 :                 WriteAmp: math.Round(100*sumWriteAmp/float64(l)) / 100, // round to 2dp.
     261           1 :         }
     262             : }
     263             : 
     264             : // cookedWriteRun is a representation of a previously parsed (or "cooked")
     265             : // writeRun.
     266             : type cookedWriteRun struct {
     267             :         OpsSec int    `json:"opsSec"`
     268             :         Raw    string `json:"rawData"`
     269             : }
     270             : 
     271             : // formatSummaryJSON returns a JSON representation of the combined raw data from
     272             : // all rawWriteRuns that comprise the writeRun. It has the form:
     273             : //
     274             : //      {
     275             : //        "original-raw-write-run-log-file-1.gz": {
     276             : //          "opsSec": ...,
     277             : //          "raw": ...,
     278             : //        },
     279             : //         ...
     280             : //        "original-raw-write-run-log-file-N.gz": {
     281             : //          "opsSec": ...,
     282             : //          "raw": ...,
     283             : //        },
     284             : //      }
     285           1 : func (r writeRun) formatSummaryJSON() ([]byte, error) {
     286           1 :         m := make(map[string]cookedWriteRun)
     287           1 :         for name, data := range r.rawRuns {
     288           1 :                 m[name] = cookedWriteRun{
     289           1 :                         OpsSec: data.opsPerSecSplit(),
     290           1 :                         Raw:    data.formatCSV(),
     291           1 :                 }
     292           1 :         }
     293           1 :         return prettyJSON(&m), nil
     294             : }
     295             : 
     296             : // write workload is a map from "day" to corresponding writeRun, for a given
     297             : // write-throughput benchmark workload (i.e. values=1024).
     298             : type writeWorkload struct {
     299             :         days map[string]*writeRun // map from day to runs for the given workload
     300             : }
     301             : 
     302             : // writeWorkloads is an alias for a map from workload name to its corresponding
     303             : // map from day to writeRun.
     304             : type writeWorkloads map[string]*writeWorkload
     305             : 
     306             : // nameDay is a (name, day) tuple, used as a map key.
     307             : type nameDay struct {
     308             :         name, day string
     309             : }
     310             : 
     311             : type writeLoader struct {
     312             :         // rootDir is the path to the root directory containing the data.
     313             :         dataDir string
     314             : 
     315             :         // summaryFilename is the name of the file containing the summary data.
     316             :         summaryDir string
     317             : 
     318             :         // workloads is a map from workload name to its corresponding data.
     319             :         workloads writeWorkloads
     320             : 
     321             :         // cooked is a "set" of (workload, day) tuples representing whether
     322             :         // previously parsed data was present for the (workload, day).
     323             :         cooked map[nameDay]bool
     324             : 
     325             :         // cookedSummaries is a map from workload name to previously generated data
     326             :         // for the workload. This data is "mixed-in" with new data when the summary
     327             :         // files are written out.
     328             :         cookedSummaries map[string]writeWorkloadSummary
     329             : }
     330             : 
     331             : // newWriteLoader returns a new writeLoader that can be used to generate the
     332             : // summary files for write-throughput benchmarking data.
     333           1 : func newWriteLoader(dataDir, summaryDir string) *writeLoader {
     334           1 :         return &writeLoader{
     335           1 :                 dataDir:         dataDir,
     336           1 :                 summaryDir:      summaryDir,
     337           1 :                 workloads:       make(writeWorkloads),
     338           1 :                 cooked:          make(map[nameDay]bool),
     339           1 :                 cookedSummaries: make(map[string]writeWorkloadSummary),
     340           1 :         }
     341           1 : }
     342             : 
     343             : // loadCooked loads previously summarized write throughput benchmark data.
     344           1 : func (l *writeLoader) loadCooked() error {
     345           1 :         b, err := os.ReadFile(filepath.Join(l.summaryDir, summaryFilename))
     346           1 :         if err != nil {
     347           1 :                 // The first ever run will not find the summary file. Return early in
     348           1 :                 // this case, and we'll start afresh.
     349           1 :                 if oserror.IsNotExist(err) {
     350           1 :                         return nil
     351           1 :                 }
     352           0 :                 return err
     353             :         }
     354             : 
     355             :         // Reconstruct the summary.
     356           1 :         summaries := make(map[string]writeWorkloadSummary)
     357           1 :         err = json.Unmarshal(b, &summaries)
     358           1 :         if err != nil {
     359           0 :                 return err
     360           0 :         }
     361             : 
     362             :         // Populate the cooked map.
     363           1 :         l.cookedSummaries = summaries
     364           1 : 
     365           1 :         // Populate the set used for determining whether we can skip a raw file.
     366           1 :         for name, workloadSummary := range summaries {
     367           1 :                 for _, runSummary := range workloadSummary {
     368           1 :                         l.cooked[nameDay{name, runSummary.Date}] = true
     369           1 :                 }
     370             :         }
     371             : 
     372           1 :         return nil
     373             : }
     374             : 
     375             : // loadRaw loads the raw data from the root data directory.
     376           1 : func (l *writeLoader) loadRaw() error {
     377           1 :         walkFn := func(path, pathRel string, info os.FileInfo) error {
     378           1 :                 // The relative directory structure is of the form:
     379           1 :                 //   $day/pebble/write/$name/$run/$file
     380           1 :                 parts := strings.Split(pathRel, string(os.PathSeparator))
     381           1 :                 if len(parts) < 6 {
     382           1 :                         return nil // stumble forward on invalid paths
     383           1 :                 }
     384             : 
     385             :                 // Filter out files that aren't in write benchmark directories.
     386           1 :                 if parts[2] != "write" {
     387           1 :                         return nil
     388           1 :                 }
     389           1 :                 day := parts[0]
     390           1 : 
     391           1 :                 f, err := os.Open(path)
     392           1 :                 if err != nil {
     393           0 :                         _, _ = fmt.Fprintf(os.Stderr, "%+v\n", err)
     394           0 :                         return nil // stumble forward on error
     395           0 :                 }
     396           1 :                 defer func() { _ = f.Close() }()
     397             : 
     398           1 :                 rd := io.Reader(f)
     399           1 :                 if strings.HasSuffix(path, ".bz2") {
     400           0 :                         rd = bzip2.NewReader(f)
     401           1 :                 } else if strings.HasSuffix(path, ".gz") {
     402           1 :                         var err error
     403           1 :                         rd, err = gzip.NewReader(f)
     404           1 :                         if err != nil {
     405           0 :                                 _, _ = fmt.Fprintf(os.Stderr, "%+v\n", err)
     406           0 :                                 return nil // stumble forward on error
     407           0 :                         }
     408             :                 }
     409             : 
     410             :                 // Parse the data for this file and add to the appropriate workload.
     411           1 :                 s := bufio.NewScanner(rd)
     412           1 :                 r := rawWriteRun{}
     413           1 :                 var name string
     414           1 :                 for s.Scan() {
     415           1 :                         line := s.Text()
     416           1 :                         if !strings.HasPrefix(line, "BenchmarkRaw") {
     417           0 :                                 continue
     418             :                         }
     419             : 
     420           1 :                         var p writePoint
     421           1 :                         var nameInner, elapsed string
     422           1 :                         n, err := fmt.Sscanf(line, rawRunFmt,
     423           1 :                                 &nameInner, &p.opsSec, &p.passed, &elapsed, &p.size, &p.levels, &p.writeAmp)
     424           1 :                         if err != nil || n != 7 {
     425           0 :                                 // Stumble forward on error.
     426           0 :                                 _, _ = fmt.Fprintf(os.Stderr, "%s: %v\n", s.Text(), err)
     427           0 :                                 continue
     428             :                         }
     429             : 
     430             :                         // The first datapoint we see in the file is assumed to be the same
     431             :                         // for all datapoints.
     432           1 :                         if name == "" {
     433           1 :                                 name = nameInner
     434           1 : 
     435           1 :                                 // Skip files for (workload, day) pairs that have been parsed
     436           1 :                                 // previously. Note that this relies on loadCooked having been
     437           1 :                                 // called previously to seed the map with cooked data.
     438           1 :                                 if ok := l.cooked[nameDay{name, day}]; ok {
     439           1 :                                         _, _ = fmt.Fprintf(os.Stderr,
     440           1 :                                                 "skipping previously cooked data in file %s (workload=%q, day=%q)\n",
     441           1 :                                                 pathRel, name, day)
     442           1 :                                         return nil
     443           1 :                                 }
     444           1 :                         } else if name != nameInner {
     445           0 :                                 _, _ = fmt.Fprintf(os.Stderr,
     446           0 :                                         "WARN: benchmark name %q differs from previously seen name %q: %s",
     447           0 :                                         nameInner, name, s.Text())
     448           0 :                         }
     449             : 
     450             :                         // Convert the elapsed time into seconds.
     451           1 :                         secs, err := time.ParseDuration(elapsed)
     452           1 :                         if err != nil {
     453           0 :                                 // Stumble forward on error.
     454           0 :                                 _, _ = fmt.Fprintf(os.Stderr, "%s: %v\n", s.Text(), err)
     455           0 :                                 continue
     456             :                         }
     457           1 :                         p.elapsedSecs = int(secs.Seconds())
     458           1 : 
     459           1 :                         // Add this data point to the collection of points for this run.
     460           1 :                         r.points = append(r.points, p)
     461             :                 }
     462             : 
     463             :                 // Add the raw run to the map.
     464           1 :                 l.addRawRun(name, day, pathRel, r)
     465           1 : 
     466           1 :                 return nil
     467             :         }
     468           1 :         return walkDir(l.dataDir, walkFn)
     469             : }
     470             : 
     471             : // addRawRun adds a rawWriteRun to the corresponding datastructures by looking
     472             : // up the workload name (i.e. "values=1024"), then appending the rawWriteRun to
     473             : // the corresponding slice of all rawWriteRuns.
     474           1 : func (l *writeLoader) addRawRun(name, day, path string, raw rawWriteRun) {
     475           1 :         // Skip files with no points (i.e. files that couldn't be parsed).
     476           1 :         if len(raw.points) == 0 {
     477           0 :                 return
     478           0 :         }
     479             : 
     480           1 :         _, _ = fmt.Fprintf(
     481           1 :                 os.Stderr, "adding raw run: (workload=%q, day=%q); nPoints=%d; file=%s\n",
     482           1 :                 name, day, len(raw.points), path)
     483           1 : 
     484           1 :         w := l.workloads[name]
     485           1 :         if w == nil {
     486           1 :                 w = &writeWorkload{days: make(map[string]*writeRun)}
     487           1 :                 l.workloads[name] = w
     488           1 :         }
     489             : 
     490           1 :         r := w.days[day]
     491           1 :         if r == nil {
     492           1 :                 r = &writeRun{
     493           1 :                         name:    name,
     494           1 :                         date:    day,
     495           1 :                         dir:     filepath.Dir(path),
     496           1 :                         rawRuns: make(map[string]rawWriteRun),
     497           1 :                 }
     498           1 :                 w.days[day] = r
     499           1 :         }
     500           1 :         r.rawRuns[path] = raw
     501             : }
     502             : 
     503             : // cookSummary writes out the data in the loader to the summary file (new or
     504             : // existing).
     505           1 : func (l *writeLoader) cookSummary() error {
     506           1 :         summary := make(map[string]writeWorkloadSummary)
     507           1 :         for name, w := range l.workloads {
     508           1 :                 summary[name] = cookWriteSummary(w)
     509           1 :         }
     510             : 
     511             :         // Mix in the previously cooked values.
     512           1 :         for name, cooked := range l.cookedSummaries {
     513           1 :                 existing, ok := summary[name]
     514           1 :                 if !ok {
     515           0 :                         summary[name] = cooked
     516           1 :                 } else {
     517           1 :                         // We must merge and re-sort by date.
     518           1 :                         existing = append(existing, cooked...)
     519           1 :                         sort.Slice(existing, func(i, j int) bool {
     520           1 :                                 return existing[i].Date < existing[j].Date
     521           1 :                         })
     522           1 :                         summary[name] = existing
     523             :                 }
     524             :         }
     525           1 :         b := prettyJSON(&summary)
     526           1 :         b = append(b, '\n')
     527           1 : 
     528           1 :         outputPath := filepath.Join(l.summaryDir, summaryFilename)
     529           1 :         err := os.WriteFile(outputPath, b, 0644)
     530           1 :         if err != nil {
     531           0 :                 return err
     532           0 :         }
     533             : 
     534           1 :         return nil
     535             : }
     536             : 
     537             : // cookWriteSummary is a helper that generates the summary for a write workload
     538             : // by computing the per-day summaries across all runs.
     539           1 : func cookWriteSummary(w *writeWorkload) writeWorkloadSummary {
     540           1 :         days := make([]string, 0, len(w.days))
     541           1 :         for day := range w.days {
     542           1 :                 days = append(days, day)
     543           1 :         }
     544           1 :         sort.Strings(days)
     545           1 : 
     546           1 :         var summary writeWorkloadSummary
     547           1 :         for _, day := range days {
     548           1 :                 r := w.days[day]
     549           1 :                 summary = append(summary, r.summarize())
     550           1 :         }
     551             : 
     552           1 :         return summary
     553             : }
     554             : 
     555             : // cookWriteRunSummaries writes out the per-run summary files.
     556           1 : func (l *writeLoader) cookWriteRunSummaries() error {
     557           1 :         for _, w := range l.workloads {
     558           1 :                 for _, r := range w.days {
     559           1 :                         // Write out files preserving the original directory structure for
     560           1 :                         // ease of understanding / debugging.
     561           1 :                         outputPath := filepath.Join(l.summaryDir, r.summaryFilename())
     562           1 :                         if err := outputWriteRunSummary(r, outputPath); err != nil {
     563           0 :                                 return err
     564           0 :                         }
     565             :                 }
     566             :         }
     567           1 :         return nil
     568             : }
     569             : 
     570             : // outputWriteRunSummary is a helper that generates the summary JSON for the
     571             : // writeRun and writes it to the given output path.
     572           1 : func outputWriteRunSummary(r *writeRun, outputPath string) error {
     573           1 :         f, err := os.OpenFile(outputPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
     574           1 :         if err != nil {
     575           0 :                 return err
     576           0 :         }
     577           1 :         defer func() { _ = f.Close() }()
     578             : 
     579           1 :         b, err := r.formatSummaryJSON()
     580           1 :         if err != nil {
     581           0 :                 return err
     582           0 :         }
     583           1 :         b = append(b, '\n')
     584           1 : 
     585           1 :         _, err = f.Write(b)
     586           1 :         return err
     587             : }
     588             : 
     589             : // parseWrite parses the raw write-throughput benchmark data and writes out the
     590             : // summary files.
     591           1 : func parseWrite(dataDir, summaryDir string) error {
     592           1 :         l := newWriteLoader(dataDir, summaryDir)
     593           1 :         if err := l.loadCooked(); err != nil {
     594           0 :                 return err
     595           0 :         }
     596             : 
     597           1 :         if err := l.loadRaw(); err != nil {
     598           0 :                 return err
     599           0 :         }
     600             : 
     601           1 :         if err := l.cookSummary(); err != nil {
     602           0 :                 return err
     603           0 :         }
     604             : 
     605           1 :         return l.cookWriteRunSummaries()
     606             : }

Generated by: LCOV version 1.14