From 5b490fde33654044f436db9abfd7e342dc2ecf62 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Fri, 8 Nov 2024 18:23:28 +0100 Subject: [PATCH 01/67] Init kvalobs importer --- migrations/kvalobs/dump.go | 513 +++++++++++++++++++++++++++++++++++ migrations/kvalobs/import.go | 106 ++++++++ migrations/kvalobs/main.go | 172 ++++++++++++ migrations/kvalobs/utils.go | 77 ++++++ 4 files changed, 868 insertions(+) create mode 100644 migrations/kvalobs/dump.go create mode 100644 migrations/kvalobs/import.go create mode 100644 migrations/kvalobs/main.go create mode 100644 migrations/kvalobs/utils.go diff --git a/migrations/kvalobs/dump.go b/migrations/kvalobs/dump.go new file mode 100644 index 00000000..b93420cc --- /dev/null +++ b/migrations/kvalobs/dump.go @@ -0,0 +1,513 @@ +package kvalobs + +import ( + "context" + "errors" + "fmt" + "log/slog" + "strconv" + "strings" + + // "migrate/lard" + "os" + "path/filepath" + "time" + + "github.com/gocarina/gocsv" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) + +// Same timeseries could be in both 'data' and 'text_data' tables +// First of all, why? +// Second, do we care? +// func readDataAndText(label *TSLabel, pool *pgxpool.Pool, config *DumpConfig) Data { +// // Supposed to join text anf number data to single slice +// return nil +// } + +type DumpConfig struct { + BaseConfig +} + +func (config *DumpConfig) Execute(_ []string) error { + config.setup() + + // dump kvalobs + config.Dump("KVALOBS_CONN_STRING", filepath.Join(config.BaseDir, "kvalobs")) + + // dump histkvalobs + config.Dump("HISTKVALOBS_CONN_STRING", filepath.Join(config.BaseDir, "histkvalobs")) + + return nil +} + +func (config *DumpConfig) Dump(envvar, path string) { + pool, err := pgxpool.New(context.Background(), os.Getenv(envvar)) + if err != nil { + slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) + return + } + defer pool.Close() + + config.DumpText(pool, path) + config.DumpData(pool, path) +} + +func (config *DumpConfig) DumpText(pool *pgxpool.Pool, path string) { + var labels []TSLabel + + textPath := filepath.Join(path, "text") + if err := os.MkdirAll(textPath, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } + + labelFile := filepath.Join(path, "text_labels.csv") + if _, err := os.Stat(labelFile); err != nil { + if labels, err = config.dumpLabels(pool, labelFile, getTextLabels); err != nil { + return + } + } else { + if labels, err = readCSVfile[TSLabel](labelFile); err != nil { + return + } + } + + for _, ts := range labels { + if !ts.ShouldBeDumped(config) { + continue + } + + data, err := readTextData(&ts, pool, config) + if err != nil { + continue + } + + filename := filepath.Join(textPath, ts.toFilename()) + file, err := os.Create(filename) + if err != nil { + slog.Error(err.Error()) + continue + } + + slog.Info("Writing text to " + filename) + if err = gocsv.MarshalFile(data, file); err != nil { + slog.Error(err.Error()) + continue + } + } +} + +func (config *DumpConfig) DumpData(pool *pgxpool.Pool, path string) { + var labels []TSLabel + + dataPath := filepath.Join(path, "data") + if err := os.MkdirAll(dataPath, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } + + labelFile := filepath.Join(path, "data_labels.csv") + if _, err := os.Stat(path); err != nil { + if labels, err = config.dumpLabels(pool, labelFile, getDataLabels); err != nil { + return + } + } else { + if labels, err = readCSVfile[TSLabel](labelFile); err != nil { + return + } + } + + for _, ts := range labels { + if !ts.ShouldBeDumped(config) { + continue + } + + data, err := readData(&ts, pool, config) + if err != nil { + continue + } + + filename := filepath.Join(dataPath, ts.toFilename()) + file, err := os.Create(filename) + if err != nil { + slog.Error(err.Error()) + continue + } + + slog.Info("Writing data to " + filename) + if err = gocsv.MarshalFile(data, file); err != nil { + slog.Error(err.Error()) + continue + } + } + +} + +type LabelDumpFunc = func(pool *pgxpool.Pool, config *DumpConfig) ([]TSLabel, error) + +func (config *DumpConfig) dumpLabels(pool *pgxpool.Pool, path string, fn LabelDumpFunc) ([]TSLabel, error) { + labels, err := fn(pool, config) + if err != nil { + // Error logged inside fn + return nil, err + } + + file, err := os.Create(path) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + slog.Info("Writing timeseries labels to " + path) + if err = gocsv.Marshal(labels, file); err != nil { + slog.Error(err.Error()) + return nil, err + } + + return labels, nil +} + +func (config *DumpConfig) dumpTextTS(pool *pgxpool.Pool) { + timeseries, err := getTextLabels(pool, config) + if err != nil { + // Error logged inside getTextTS + return + } + + if err := os.MkdirAll(config.BaseDir, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } + + path := filepath.Join(config.BaseDir, "text_timeseries.csv") + file, err := os.Create(path) + if err != nil { + slog.Error(err.Error()) + return + } + + slog.Info("Writing timeseries labels to CSV") + if err = gocsv.Marshal(timeseries, file); err != nil { + slog.Error(err.Error()) + return + } +} + +// This is basically the same as lard.Label (except for ParamCode) +type TSLabel struct { + StationID int32 `db:"stationid"` + TypeID int32 `db:"typeid"` + ParamID int32 `db:"paramid"` + Sensor *int32 `db:"sensor"` + Level *int32 `db:"level"` + // ParamCode string `db:"name,omitempty"` +} + +// Serialize Label to CSV file name +func (ts *TSLabel) toFilename() string { + var sensor, level string + if ts.Sensor != nil { + sensor = fmt.Sprint(ts.Sensor) + } + if ts.Level != nil { + level = fmt.Sprint(ts.Level) + } + return fmt.Sprintf("%v_%v_%v_%v_%v.csv", ts.StationID, ts.TypeID, ts.ParamID, sensor, level) +} + +func parseFilename(s *string) (*int32, error) { + // TODO: probably there is a better way to do this without defining a gazillion functions + if *s == "" { + return nil, nil + } + res, err := strconv.ParseInt(*s, 10, 32) + if err != nil { + return nil, err + } + out := int32(res) + return &out, nil +} + +func pf(s *string) *int32 { + // TODO: probably there is a better way to do this without defining a gazillion functions + if *s == "" { + return nil + } + res := toInt32(*s) + return &res +} + +// Deserialize filename to TSLabel struct +func (ts *TSLabel) fromFilename(filename string) error { + name := strings.TrimSuffix(filename, ".csv") + fields := strings.Split(name, "_") + if len(fields) < 5 { + return errors.New("Too few fields in file name: " + filename) + } + + ptrs := make([]*string, len(fields)) + for i := range ptrs { + ptrs[i] = &fields[i] + } + + converted, err := TryMap(ptrs, parseFilename) + if err != nil { + return err + } + + ts.StationID = *converted[0] + ts.TypeID = *converted[1] + ts.ParamID = *converted[2] + ts.Sensor = converted[3] + ts.Level = converted[4] + + return nil +} + +func LabelFromFilename(filename string) (TSLabel, error) { + name := strings.TrimSuffix(filename, ".csv") + fields := strings.Split(name, "_") + if len(fields) < 5 { + return TSLabel{}, errors.New("Too few fields in file name: " + filename) + } + + ptrs := make([]*string, len(fields)) + for i := range ptrs { + ptrs[i] = &fields[i] + } + + converted, err := TryMap(ptrs, parseFilename) + if err != nil { + return TSLabel{}, err + } + + return TSLabel{ + StationID: *converted[0], + TypeID: *converted[1], + ParamID: *converted[2], + Sensor: converted[3], + Level: converted[4], + }, nil +} + +func getTextLabels(pool *pgxpool.Pool, config *DumpConfig) ([]TSLabel, error) { + // OGquery := `SELECT DISTINCT + // stationid, + // typeid, + // paramid, + // 0 AS sensor, + // 0 AS level, + // name AS code + // FROM + // text_data + // LEFT JOIN + // param USING (paramid) + // WHERE + // obstime >= $1 + // TODO: probably don't need this? + // AND obstime <= $2 + // AND name IS NOT NULL + // TODO: do we need this order by? As far as I can see, + // it's used to compare text_data and scalar_data timeseries + // ORDER BY + // stationid, + // typeid, + // paramid, + // level, + // sensor` + + // NOTE: `param` table is empty in histkvalobs + // TODO: We probably don't even need the join, + // because `name` (`param_code`) is not present in our `labels.met`? + // query := `SELECT DISTINCT stationid, typeid, paramid, name FROM text_data + // LEFT JOIN param USING (paramid) + // WHERE name IS NOT NULL + // AND ($1::timestamp IS NULL OR obstime >= $1) + // AND ($2::timestamp IS NULL OR obstime < $2)` + // + // TODO: should sensor/level be NULL or 0 + query := `SELECT DISTINCT stationid, typeid, paramid, NULL AS sensor, NULL AS level FROM text_data + WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2)` + + slog.Info("Querying distinct timeseries labels") + rows, err := pool.Query(context.TODO(), query, config.FromTime, config.ToTime) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + slog.Info("Collecting rows to slice") + tsList, err := pgx.CollectRows(rows, pgx.RowToStructByName[TSLabel]) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return tsList, nil +} + +func getDataLabels(pool *pgxpool.Pool, config *DumpConfig) ([]TSLabel, error) { + // TODO: not sure about the sensor/level conditions, + // they should never be NULL since they have default values different from NULL? + // TODO: We probably don't even need the join, + // because `name` (`param_code`) is not present in our `labels.met`? + // query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level, name FROM data + // LEFT JOIN param USING (paramid) + // WHERE name IS NOT NUL + // AND sensor IS NOT NULL + // AND level IS NOT NULL + // AND ($1::timestamp IS NULL OR obstime >= $1) + // AND ($2::timestamp IS NULL OR obstime < $2)` + query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level FROM data + WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2)` + + rows, err := pool.Query(context.TODO(), query, config.FromTime, config.ToTime) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + tsList, err := pgx.CollectRows(rows, pgx.RowToStructByName[TSLabel]) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return tsList, nil +} + +// TODO: not sure what to do with this one +// func joinTS(first, second []TSLabel) + +// Kvalobs observation row +type Obs struct { + Obstime time.Time `db:"obstime"` + Original float64 `db:"original"` + Tbtime time.Time `db:"tbtime"` + Corrected float64 `db:"corrected"` + Controlinfo *string `db:"controlinfo"` + Useinfo *string `db:"useinfo"` + Cfailed *string `db:"cfailed"` +} + +type TextObs struct { + Obstime time.Time `db:"obstime"` + Original string `db:"original"` + Tbtime time.Time `db:"tbtime"` +} + +type Data = []Obs +type Text = []TextObs + +func readTextData(label *TSLabel, pool *pgxpool.Pool, config *DumpConfig) (Text, error) { + // query := ` + // SELECT + // obstime, + // original AS originaltext, + // tbtime + // FROM + // text_data + // WHERE + // stationid = $1 + // AND typeid = $2 + // AND paramid = $3 + // AND obstime >= $4 + // AND obstime <= $5 + // TODO: should we keep these? Maybe obstime is actually useful + // ORDER BY + // stationid, + // obstime` + query := `SELECT obstime, original, tbtime FROM text_data + WHERE stationid = $1 + AND typeid = $2 + AND paramid = $3 + AND ($4::timestamp IS NULL OR obstime >= $4) + AND ($5::timestamp IS NULL OR obstime < $5) + ORDER BY obstime` + + rows, err := pool.Query( + context.TODO(), + query, + label.StationID, + label.TypeID, + label.ParamID, + config.FromTime, + config.ToTime, + ) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + data, err := pgx.CollectRows(rows, pgx.RowToStructByName[TextObs]) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return data, nil +} + +func readData(label *TSLabel, pool *pgxpool.Pool, config *DumpConfig) (Data, error) { + // TODO: is the case useful here, we can just check for cfailed = '' in here + // query := `SELECT + // obstime, + // original, + // tbtime, + // CASE + // WHEN original = corrected AND cfailed = '' THEN NULL + // ELSE corrected + // END, + // controlinfo, + // useinfo, + // cfailed + // FROM + // data + // WHERE + // stationid = $1 + // AND typeid = $2 + // AND paramid = $3 + // AND sensor = $4 + // AND level = $5 + // AND obstime >= $6 + // TODO: should we keep these? Maybe obstime is actually useful + // ORDER BY + // stationid, + // obstime` + query := `SELECT obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed + FROM data + WHERE stationid = $1 + AND typeid = $2 + AND paramid = $3 + AND sensor = $4 + AND level = $5 + AND ($6::timestamp IS NULL OR obstime >= $6) + AND ($7::timestamp IS NULL OR obstime < $7) + ORDER BY obstime` + + rows, err := pool.Query( + context.TODO(), + query, + label.StationID, + label.TypeID, + label.ParamID, + label.Sensor, + label.Level, + config.FromTime, + config.ToTime, + ) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + data, err := pgx.CollectRows(rows, pgx.RowToStructByName[Obs]) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return data, nil +} diff --git a/migrations/kvalobs/import.go b/migrations/kvalobs/import.go new file mode 100644 index 00000000..464e6d58 --- /dev/null +++ b/migrations/kvalobs/import.go @@ -0,0 +1,106 @@ +package kvalobs + +import ( + "context" + "fmt" + "log/slog" + "migrate/lard" + "os" + "time" + + // "path/filepath" + + "github.com/jackc/pgx/v5/pgxpool" +) + +type ImportConfig struct { + BaseConfig +} + +func (config *ImportConfig) Execute(_ []string) error { + config.setup() + + pool, err := pgxpool.New(context.Background(), os.Getenv("KVALOBS_CONN_STRING")) + if err != nil { + slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) + } + defer pool.Close() + + return nil +} + +type TextTimeseries struct { + id int32 + obses []TextObs +} + +func (ts *TextTimeseries) Len() int { + return len(ts.obses) +} + +func (ts *TextTimeseries) ID() int32 { + return ts.id +} + +func (ts *TextTimeseries) Obstime(i int) time.Time { + return ts.obses[i].Obstime +} + +func (ts *TextTimeseries) Text(i int) string { + return ts.obses[i].Original +} + +func (config *ImportConfig) ImportText(pool *pgxpool.Pool, path string) error { + dir, err := os.ReadDir(path) + if err != nil { + slog.Error(err.Error()) + return err + } + + var totalRowsInserted int64 + for _, file := range dir { + label, err := LabelFromFilename(file.Name()) + if err != nil { + slog.Error(err.Error()) + continue + } + + if !label.ShouldBeImported(config) { + continue + } + + // TODO: should use lard.Label directly? + tsid, err := lard.GetTimeseriesID(lard.Label(label), *config.FromTime, pool) + if err != nil { + slog.Error(err.Error()) + continue + } + + if !contains(config.Ts, tsid) { + continue + } + + data, err := readCSVfile[TextObs](file.Name()) + if err != nil { + slog.Error(err.Error()) + continue + } + + // TODO: I probably need the interface don't I? + ts := &TextTimeseries{tsid, data} + count, err := lard.InsertNonscalarData(ts, pool, "") + if err != nil { + slog.Error("Failed bulk insertion: " + err.Error()) + continue + } + + totalRowsInserted += count + } + + return nil +} + +func readDataFiles() []TSLabel { + // TODO: + return nil +} diff --git a/migrations/kvalobs/main.go b/migrations/kvalobs/main.go new file mode 100644 index 00000000..0caae1e8 --- /dev/null +++ b/migrations/kvalobs/main.go @@ -0,0 +1,172 @@ +package kvalobs + +import ( + "strings" + "time" +) + +// Kvalobs is composed of two databases +// 1) `kvalobs` for fresh data +// 2) `histkvalobs` for data older than +// +// Both contain the same tables: +// - `algorithms`: empty (???) - stores procedure info for QC checks +// - `checks`: empty (???) +// - `data`: stores numerical observations, associated metadata, and QC info +// +// Column | Type | Collation | Nullable | Default +// -------------+-----------------------------+-----------+----------+---------------------------- +// stationid | integer | | not null | +// obstime | timestamp without time zone | | not null | +// original | double precision | | not null | +// paramid | integer | | not null | +// tbtime | timestamp without time zone | | not null | +// typeid | integer | | not null | +// sensor | character(1) | | | '0'::bpchar +// level | integer | | | 0 +// corrected | double precision | | not null | +// controlinfo | character(16) | | | '0000000000000000'::bpchar +// useinfo | character(16) | | | '0000000000000000'::bpchar +// cfailed | text | | | +// +// - `default_missing`: +// - `default_missing_values`: +// +// - `model`: +// Column | Type | Collation | Nullable | Default +// ---------+---------+-----------+----------+--------- +// modelid | integer | | not null | +// name | text | | | +// comment | text | | | +// +// - `model_data`: +// Column | Type | Collation | Nullable | Default +// -----------+-----------------------------+-----------+----------+--------- +// stationid | integer | | not null | +// obstime | timestamp without time zone | | not null | +// paramid | integer | | not null | +// level | integer | | not null | +// modelid | integer | | not null | +// original | double precision | | | +// +// - `param`: part of stinfosys `param` table +// Column | Type | Collation | Nullable | Default +// -------------+---------+-----------+----------+--------- +// paramid | integer | | not null | +// name | text | | not null | +// description | text | | | +// unit | text | | | +// level_scale | integer | | | 0 +// comment | text | | | +// scalar | boolean | | | true +// +// - `pdata`: same as `data` without the `original` column and all `paramid` null??? +// - `station`: station metadata such as (lat, lon, height, name, wmonr, etc) +// - `station_metadata`: this one seems to map well to our `labels.met`? +// Problem is `typeid`, `sensor`, and `level` are always NULL +// +// - `text_data`: Similar to `data`, but without QC info nor sensor/level +// +// Column | Type | Collation | Nullable | Default +// -----------+-----------------------------+-----------+----------+--------- +// stationid | integer | | not null | +// obstime | timestamp without time zone | | not null | +// original | text | | not null | +// paramid | integer | | not null | +// tbtime | timestamp without time zone | | not null | +// typeid | integer | | not null | +// +// In `histkvalobs` only data tables seem to be non-empty +// +// IMPORTANT: considerations for migrations to LARD +// - LARD stores Timeseries labels (stationid, paramid, typeid, sensor, level) in a separate table +// - (sensor, level) can be NULL, while in Kvalobs they have default values (0,0) +// => POSSIBLE INCONSISTENCY when importing to LARD +// - Timestamps are UTC +// - Kvalobs doesn't have the concept of timeseries ID, +// instead there is a sequential ID associated with each observation row + +var NULL_VALUES []float64 = []float64{-34767, -34766} + +type timespan struct { + fromtime time.Time + totime time.Time +} + +type Kvalobs struct { + Hosts []string + Ports []string + DBs []string + Usernames []string + Passwords []string +} + +// TODO: should we use this one as default or process all times +var FROMTIME time.Time = time.Date(2006, 01, 01, 00, 00, 00, 00, time.UTC) + +type BaseConfig struct { + BaseDir string `short:"p" long:"path" default:"./dumps" description:"Location the dumped data will be stored in"` + FromTime *time.Time `long:"from" description:"Fetch data only starting from this timestamp"` + ToTime *time.Time `long:"to" description:"Fetch data only until this timestamp"` + TsCmd string `long:"ts" description:"Optional comma separated list of timeseries. By default all available timeseries are processed"` + StationsCmd string `long:"station" description:"Optional comma separated list of station numbers. By default all available station numbers are processed"` + TypeIdsCmd string `long:"typeid" description:"Optional comma separated list of type IDs. By default all available type IDs are processed"` + ParamIdsCmd string `long:"paramid" description:"Optional comma separated list of param IDs. By default all available param IDs are processed"` + SensorsCmd string `long:"sensor" description:"Optional comma separated list of sensors. By default all available sensors are processed"` + LevelsCmd string `long:"level" description:"Optional comma separated list of levels. By default all available levels are processed"` + + Ts []int32 // Why did I set this one as int64? + Stations []int32 + TypeIds []int32 + ParamIds []int32 + Sensors []int32 + Levels []int32 +} + +func (config *BaseConfig) setup() { + if config.TsCmd != "" { + config.Ts = Map(strings.Split(config.TsCmd, ","), toInt32) + } + if config.StationsCmd != "" { + config.Stations = Map(strings.Split(config.StationsCmd, ","), toInt32) + } + if config.TypeIdsCmd != "" { + config.TypeIds = Map(strings.Split(config.TypeIdsCmd, ","), toInt32) + } + if config.ParamIdsCmd != "" { + config.ParamIds = Map(strings.Split(config.ParamIdsCmd, ","), toInt32) + } + if config.SensorsCmd != "" { + config.Sensors = Map(strings.Split(config.SensorsCmd, ","), toInt32) + } + if config.LevelsCmd != "" { + config.Levels = Map(strings.Split(config.LevelsCmd, ","), toInt32) + } +} + +func (ts *TSLabel) ShouldBeDumped(config *DumpConfig) bool { + // (config.Ts == nil || slices.Contains(config.Ts, ts.ID)) || + return contains(config.Stations, ts.StationID) || + contains(config.TypeIds, ts.TypeID) || + contains(config.ParamIds, ts.ParamID) || + // TODO: these two should never be null anyway + nullableContains(config.Sensors, ts.Sensor) || + nullableContains(config.Levels, ts.Level) +} + +func (ts *TSLabel) ShouldBeImported(config *ImportConfig) bool { + // TODO: there's no need to get the tsid if the other parameters don't match + // So extract the first condition + // return contains(config.Ts, tsid) || + return contains(config.Stations, ts.StationID) || + contains(config.TypeIds, ts.TypeID) || + contains(config.ParamIds, ts.ParamID) || + // TODO: these two should never be null anyway + nullableContains(config.Sensors, ts.Sensor) || + nullableContains(config.Levels, ts.Level) +} + +type Cmd struct { + Dump DumpConfig `command:"dump" description:"Dump tables from Kvalobs to CSV"` + Import ImportConfig `command:"import" description:"Import CSV file dumped from Kvalobs"` +} diff --git a/migrations/kvalobs/utils.go b/migrations/kvalobs/utils.go new file mode 100644 index 00000000..1a233360 --- /dev/null +++ b/migrations/kvalobs/utils.go @@ -0,0 +1,77 @@ +package kvalobs + +import ( + "log/slog" + "os" + "slices" + "strconv" + + "github.com/gocarina/gocsv" +) + +// Loads a CSV file where records (lines) are described by type T +func readCSVfile[T any](filename string) ([]T, error) { + file, err := os.Open(filename) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + defer file.Close() + + // TODO: maybe I should preallocate slice size if I can? + // Does UnmarshalFile allocate? + // labels := make([]T, 0, size) + var labels []T + err = gocsv.UnmarshalFile(file, &labels) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return labels, nil +} + +func toInt32(s string) int32 { + res, err := strconv.ParseInt(s, 10, 32) + if err != nil { + // Panic is fine here, because we use this function only at startup + panic("Could not parse to int") + } + return int32(res) +} + +func Map[T, V any](ts []T, fn func(T) V) []V { + result := make([]V, len(ts)) + for i, t := range ts { + result[i] = fn(t) + } + return result +} + +// Similar to Map, but bails immediately if an error occurs +func TryMap[T, V any](ts []T, fn func(T) (V, error)) ([]V, error) { + result := make([]V, len(ts)) + for i, t := range ts { + temp, err := fn(t) + if err != nil { + return nil, err + } + result[i] = temp + } + return result, nil +} + +func contains[T comparable](s []T, v T) bool { + if s == nil { + return true + } + return slices.Contains(s, v) +} + +// Returns true if the slice is empty or the value is null +func nullableContains[T comparable](s []T, v *T) bool { + if s == nil || v == nil { + return true + } + return slices.Contains(s, *v) +} From 761b9e064ad101fd2b0e9c3abe5d9ca77b4c7d3d Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 25 Nov 2024 16:11:21 +0100 Subject: [PATCH 02/67] Update directory structure --- migrations/kvalobs/db/csv.go | 83 +++++ migrations/kvalobs/db/labels.go | 62 ++++ migrations/kvalobs/db/main.go | 35 ++ migrations/kvalobs/dump.go | 513 ---------------------------- migrations/kvalobs/dump/data.go | 154 +++++++++ migrations/kvalobs/dump/labels.go | 35 ++ migrations/kvalobs/dump/main.go | 70 ++++ migrations/kvalobs/dump/text.go | 198 +++++++++++ migrations/kvalobs/import.go | 106 ------ migrations/kvalobs/import/data.go | 8 + migrations/kvalobs/import/import.go | 49 +++ migrations/kvalobs/import/text.go | 58 ++++ migrations/kvalobs/kvalobs_test.go | 1 + migrations/kvalobs/main.go | 103 ++---- migrations/kvalobs/utils.go | 77 ----- migrations/utils/utils.go | 46 +++ 16 files changed, 826 insertions(+), 772 deletions(-) create mode 100644 migrations/kvalobs/db/csv.go create mode 100644 migrations/kvalobs/db/labels.go create mode 100644 migrations/kvalobs/db/main.go delete mode 100644 migrations/kvalobs/dump.go create mode 100644 migrations/kvalobs/dump/data.go create mode 100644 migrations/kvalobs/dump/labels.go create mode 100644 migrations/kvalobs/dump/main.go create mode 100644 migrations/kvalobs/dump/text.go delete mode 100644 migrations/kvalobs/import.go create mode 100644 migrations/kvalobs/import/data.go create mode 100644 migrations/kvalobs/import/import.go create mode 100644 migrations/kvalobs/import/text.go create mode 100644 migrations/kvalobs/kvalobs_test.go delete mode 100644 migrations/kvalobs/utils.go diff --git a/migrations/kvalobs/db/csv.go b/migrations/kvalobs/db/csv.go new file mode 100644 index 00000000..042cea12 --- /dev/null +++ b/migrations/kvalobs/db/csv.go @@ -0,0 +1,83 @@ +package db + +import ( + "bufio" + "log/slog" + "migrate/lard" + "os" + + "github.com/gocarina/gocsv" +) + +func ReadLabelCSV(filename string) (labels []*lard.Label, err error) { + file, err := os.Open(filename) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + defer file.Close() + + // TODO: maybe I should preallocate slice size if I can? + err = gocsv.UnmarshalFile(file, labels) + return labels, nil +} + +func ReadDataCSV(tsid int32, filename string) ([][]any, error) { + file, err := os.Open(filename) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + defer file.Close() + + reader := bufio.NewScanner(file) + + // TODO: maybe I should preallocate slice size if I can? + var data [][]any + for reader.Scan() { + var obs lard.DataObs + + err = gocsv.UnmarshalString(reader.Text(), &obs) + if err != nil { + return nil, err + } + + // Kvalobs does not have IDs so we have to bootstrap it here + obs.Id = tsid + + row := obs.ToRow() + data = append(data, row) + } + + return data, nil +} + +func ReadTextCSV(tsid int32, filename string) ([][]any, error) { + file, err := os.Open(filename) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + defer file.Close() + + reader := bufio.NewScanner(file) + + // TODO: maybe I should preallocate slice size if I can? + var data [][]any + for reader.Scan() { + var obs lard.TextObs + + err = gocsv.UnmarshalString(reader.Text(), &obs) + if err != nil { + return nil, err + } + + // Kvalobs does not have IDs so we have to bootstrap it here + obs.Id = tsid + + row := obs.ToRow() + data = append(data, row) + } + + return data, nil +} diff --git a/migrations/kvalobs/db/labels.go b/migrations/kvalobs/db/labels.go new file mode 100644 index 00000000..273086d9 --- /dev/null +++ b/migrations/kvalobs/db/labels.go @@ -0,0 +1,62 @@ +package db + +import ( + "errors" + "fmt" + "migrate/lard" + "migrate/utils" + "strconv" + "strings" +) + +// Serialize lard.Label to CSV file name +func LabelToFilename(ts *lard.Label) string { + var sensor, level string + if ts.Sensor != nil { + sensor = fmt.Sprint(ts.Sensor) + } + if ts.Level != nil { + level = fmt.Sprint(ts.Level) + } + return fmt.Sprintf("%v_%v_%v_%v_%v.csv", ts.StationID, ts.TypeID, ts.ParamID, sensor, level) +} + +func parseFilename(s *string) (*int32, error) { + // TODO: probably there is a better way to do this without defining a gazillion functions + if *s == "" { + return nil, nil + } + res, err := strconv.ParseInt(*s, 10, 32) + if err != nil { + return nil, err + } + out := int32(res) + return &out, nil +} + +// Deserialize filename to lard.Label +func LabelFromFilename(filename string) (*lard.Label, error) { + name := strings.TrimSuffix(filename, ".csv") + fields := strings.Split(name, "_") + if len(fields) < 5 { + return nil, errors.New("Too few fields in file name: " + filename) + } + + ptrs := make([]*string, len(fields)) + for i := range ptrs { + ptrs[i] = &fields[i] + } + + converted, err := utils.TryMap(ptrs, parseFilename) + if err != nil { + return nil, err + } + + return &lard.Label{ + StationID: *converted[0], + TypeID: *converted[1], + ParamID: *converted[2], + Sensor: converted[3], + Level: converted[4], + }, nil +} diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go new file mode 100644 index 00000000..9268c53b --- /dev/null +++ b/migrations/kvalobs/db/main.go @@ -0,0 +1,35 @@ +package db + +import ( + "time" +) + +// This is basically the same as lard.Label (except for ParamCode) +// type TSLabel struct { +// StationID int32 `db:"stationid"` +// TypeID int32 `db:"typeid"` +// ParamID int32 `db:"paramid"` +// Sensor *int32 `db:"sensor"` +// Level *int32 `db:"level"` +// // ParamCode string `db:"name,omitempty"` +// } + +// Kvalobs observation row +type DataObs struct { + Obstime time.Time `db:"obstime"` + Original float64 `db:"original"` + Tbtime time.Time `db:"tbtime"` + Corrected float64 `db:"corrected"` + Controlinfo *string `db:"controlinfo"` + Useinfo *string `db:"useinfo"` + Cfailed *string `db:"cfailed"` +} + +type TextObs struct { + Obstime time.Time `db:"obstime"` + Original string `db:"original"` + Tbtime time.Time `db:"tbtime"` +} + +type Data = []*DataObs +type Text = []*TextObs diff --git a/migrations/kvalobs/dump.go b/migrations/kvalobs/dump.go deleted file mode 100644 index b93420cc..00000000 --- a/migrations/kvalobs/dump.go +++ /dev/null @@ -1,513 +0,0 @@ -package kvalobs - -import ( - "context" - "errors" - "fmt" - "log/slog" - "strconv" - "strings" - - // "migrate/lard" - "os" - "path/filepath" - "time" - - "github.com/gocarina/gocsv" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" -) - -// Same timeseries could be in both 'data' and 'text_data' tables -// First of all, why? -// Second, do we care? -// func readDataAndText(label *TSLabel, pool *pgxpool.Pool, config *DumpConfig) Data { -// // Supposed to join text anf number data to single slice -// return nil -// } - -type DumpConfig struct { - BaseConfig -} - -func (config *DumpConfig) Execute(_ []string) error { - config.setup() - - // dump kvalobs - config.Dump("KVALOBS_CONN_STRING", filepath.Join(config.BaseDir, "kvalobs")) - - // dump histkvalobs - config.Dump("HISTKVALOBS_CONN_STRING", filepath.Join(config.BaseDir, "histkvalobs")) - - return nil -} - -func (config *DumpConfig) Dump(envvar, path string) { - pool, err := pgxpool.New(context.Background(), os.Getenv(envvar)) - if err != nil { - slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) - return - } - defer pool.Close() - - config.DumpText(pool, path) - config.DumpData(pool, path) -} - -func (config *DumpConfig) DumpText(pool *pgxpool.Pool, path string) { - var labels []TSLabel - - textPath := filepath.Join(path, "text") - if err := os.MkdirAll(textPath, os.ModePerm); err != nil { - slog.Error(err.Error()) - return - } - - labelFile := filepath.Join(path, "text_labels.csv") - if _, err := os.Stat(labelFile); err != nil { - if labels, err = config.dumpLabels(pool, labelFile, getTextLabels); err != nil { - return - } - } else { - if labels, err = readCSVfile[TSLabel](labelFile); err != nil { - return - } - } - - for _, ts := range labels { - if !ts.ShouldBeDumped(config) { - continue - } - - data, err := readTextData(&ts, pool, config) - if err != nil { - continue - } - - filename := filepath.Join(textPath, ts.toFilename()) - file, err := os.Create(filename) - if err != nil { - slog.Error(err.Error()) - continue - } - - slog.Info("Writing text to " + filename) - if err = gocsv.MarshalFile(data, file); err != nil { - slog.Error(err.Error()) - continue - } - } -} - -func (config *DumpConfig) DumpData(pool *pgxpool.Pool, path string) { - var labels []TSLabel - - dataPath := filepath.Join(path, "data") - if err := os.MkdirAll(dataPath, os.ModePerm); err != nil { - slog.Error(err.Error()) - return - } - - labelFile := filepath.Join(path, "data_labels.csv") - if _, err := os.Stat(path); err != nil { - if labels, err = config.dumpLabels(pool, labelFile, getDataLabels); err != nil { - return - } - } else { - if labels, err = readCSVfile[TSLabel](labelFile); err != nil { - return - } - } - - for _, ts := range labels { - if !ts.ShouldBeDumped(config) { - continue - } - - data, err := readData(&ts, pool, config) - if err != nil { - continue - } - - filename := filepath.Join(dataPath, ts.toFilename()) - file, err := os.Create(filename) - if err != nil { - slog.Error(err.Error()) - continue - } - - slog.Info("Writing data to " + filename) - if err = gocsv.MarshalFile(data, file); err != nil { - slog.Error(err.Error()) - continue - } - } - -} - -type LabelDumpFunc = func(pool *pgxpool.Pool, config *DumpConfig) ([]TSLabel, error) - -func (config *DumpConfig) dumpLabels(pool *pgxpool.Pool, path string, fn LabelDumpFunc) ([]TSLabel, error) { - labels, err := fn(pool, config) - if err != nil { - // Error logged inside fn - return nil, err - } - - file, err := os.Create(path) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - slog.Info("Writing timeseries labels to " + path) - if err = gocsv.Marshal(labels, file); err != nil { - slog.Error(err.Error()) - return nil, err - } - - return labels, nil -} - -func (config *DumpConfig) dumpTextTS(pool *pgxpool.Pool) { - timeseries, err := getTextLabels(pool, config) - if err != nil { - // Error logged inside getTextTS - return - } - - if err := os.MkdirAll(config.BaseDir, os.ModePerm); err != nil { - slog.Error(err.Error()) - return - } - - path := filepath.Join(config.BaseDir, "text_timeseries.csv") - file, err := os.Create(path) - if err != nil { - slog.Error(err.Error()) - return - } - - slog.Info("Writing timeseries labels to CSV") - if err = gocsv.Marshal(timeseries, file); err != nil { - slog.Error(err.Error()) - return - } -} - -// This is basically the same as lard.Label (except for ParamCode) -type TSLabel struct { - StationID int32 `db:"stationid"` - TypeID int32 `db:"typeid"` - ParamID int32 `db:"paramid"` - Sensor *int32 `db:"sensor"` - Level *int32 `db:"level"` - // ParamCode string `db:"name,omitempty"` -} - -// Serialize Label to CSV file name -func (ts *TSLabel) toFilename() string { - var sensor, level string - if ts.Sensor != nil { - sensor = fmt.Sprint(ts.Sensor) - } - if ts.Level != nil { - level = fmt.Sprint(ts.Level) - } - return fmt.Sprintf("%v_%v_%v_%v_%v.csv", ts.StationID, ts.TypeID, ts.ParamID, sensor, level) -} - -func parseFilename(s *string) (*int32, error) { - // TODO: probably there is a better way to do this without defining a gazillion functions - if *s == "" { - return nil, nil - } - res, err := strconv.ParseInt(*s, 10, 32) - if err != nil { - return nil, err - } - out := int32(res) - return &out, nil -} - -func pf(s *string) *int32 { - // TODO: probably there is a better way to do this without defining a gazillion functions - if *s == "" { - return nil - } - res := toInt32(*s) - return &res -} - -// Deserialize filename to TSLabel struct -func (ts *TSLabel) fromFilename(filename string) error { - name := strings.TrimSuffix(filename, ".csv") - fields := strings.Split(name, "_") - if len(fields) < 5 { - return errors.New("Too few fields in file name: " + filename) - } - - ptrs := make([]*string, len(fields)) - for i := range ptrs { - ptrs[i] = &fields[i] - } - - converted, err := TryMap(ptrs, parseFilename) - if err != nil { - return err - } - - ts.StationID = *converted[0] - ts.TypeID = *converted[1] - ts.ParamID = *converted[2] - ts.Sensor = converted[3] - ts.Level = converted[4] - - return nil -} - -func LabelFromFilename(filename string) (TSLabel, error) { - name := strings.TrimSuffix(filename, ".csv") - fields := strings.Split(name, "_") - if len(fields) < 5 { - return TSLabel{}, errors.New("Too few fields in file name: " + filename) - } - - ptrs := make([]*string, len(fields)) - for i := range ptrs { - ptrs[i] = &fields[i] - } - - converted, err := TryMap(ptrs, parseFilename) - if err != nil { - return TSLabel{}, err - } - - return TSLabel{ - StationID: *converted[0], - TypeID: *converted[1], - ParamID: *converted[2], - Sensor: converted[3], - Level: converted[4], - }, nil -} - -func getTextLabels(pool *pgxpool.Pool, config *DumpConfig) ([]TSLabel, error) { - // OGquery := `SELECT DISTINCT - // stationid, - // typeid, - // paramid, - // 0 AS sensor, - // 0 AS level, - // name AS code - // FROM - // text_data - // LEFT JOIN - // param USING (paramid) - // WHERE - // obstime >= $1 - // TODO: probably don't need this? - // AND obstime <= $2 - // AND name IS NOT NULL - // TODO: do we need this order by? As far as I can see, - // it's used to compare text_data and scalar_data timeseries - // ORDER BY - // stationid, - // typeid, - // paramid, - // level, - // sensor` - - // NOTE: `param` table is empty in histkvalobs - // TODO: We probably don't even need the join, - // because `name` (`param_code`) is not present in our `labels.met`? - // query := `SELECT DISTINCT stationid, typeid, paramid, name FROM text_data - // LEFT JOIN param USING (paramid) - // WHERE name IS NOT NULL - // AND ($1::timestamp IS NULL OR obstime >= $1) - // AND ($2::timestamp IS NULL OR obstime < $2)` - // - // TODO: should sensor/level be NULL or 0 - query := `SELECT DISTINCT stationid, typeid, paramid, NULL AS sensor, NULL AS level FROM text_data - WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2)` - - slog.Info("Querying distinct timeseries labels") - rows, err := pool.Query(context.TODO(), query, config.FromTime, config.ToTime) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - slog.Info("Collecting rows to slice") - tsList, err := pgx.CollectRows(rows, pgx.RowToStructByName[TSLabel]) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - return tsList, nil -} - -func getDataLabels(pool *pgxpool.Pool, config *DumpConfig) ([]TSLabel, error) { - // TODO: not sure about the sensor/level conditions, - // they should never be NULL since they have default values different from NULL? - // TODO: We probably don't even need the join, - // because `name` (`param_code`) is not present in our `labels.met`? - // query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level, name FROM data - // LEFT JOIN param USING (paramid) - // WHERE name IS NOT NUL - // AND sensor IS NOT NULL - // AND level IS NOT NULL - // AND ($1::timestamp IS NULL OR obstime >= $1) - // AND ($2::timestamp IS NULL OR obstime < $2)` - query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level FROM data - WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2)` - - rows, err := pool.Query(context.TODO(), query, config.FromTime, config.ToTime) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - tsList, err := pgx.CollectRows(rows, pgx.RowToStructByName[TSLabel]) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - return tsList, nil -} - -// TODO: not sure what to do with this one -// func joinTS(first, second []TSLabel) - -// Kvalobs observation row -type Obs struct { - Obstime time.Time `db:"obstime"` - Original float64 `db:"original"` - Tbtime time.Time `db:"tbtime"` - Corrected float64 `db:"corrected"` - Controlinfo *string `db:"controlinfo"` - Useinfo *string `db:"useinfo"` - Cfailed *string `db:"cfailed"` -} - -type TextObs struct { - Obstime time.Time `db:"obstime"` - Original string `db:"original"` - Tbtime time.Time `db:"tbtime"` -} - -type Data = []Obs -type Text = []TextObs - -func readTextData(label *TSLabel, pool *pgxpool.Pool, config *DumpConfig) (Text, error) { - // query := ` - // SELECT - // obstime, - // original AS originaltext, - // tbtime - // FROM - // text_data - // WHERE - // stationid = $1 - // AND typeid = $2 - // AND paramid = $3 - // AND obstime >= $4 - // AND obstime <= $5 - // TODO: should we keep these? Maybe obstime is actually useful - // ORDER BY - // stationid, - // obstime` - query := `SELECT obstime, original, tbtime FROM text_data - WHERE stationid = $1 - AND typeid = $2 - AND paramid = $3 - AND ($4::timestamp IS NULL OR obstime >= $4) - AND ($5::timestamp IS NULL OR obstime < $5) - ORDER BY obstime` - - rows, err := pool.Query( - context.TODO(), - query, - label.StationID, - label.TypeID, - label.ParamID, - config.FromTime, - config.ToTime, - ) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - data, err := pgx.CollectRows(rows, pgx.RowToStructByName[TextObs]) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - return data, nil -} - -func readData(label *TSLabel, pool *pgxpool.Pool, config *DumpConfig) (Data, error) { - // TODO: is the case useful here, we can just check for cfailed = '' in here - // query := `SELECT - // obstime, - // original, - // tbtime, - // CASE - // WHEN original = corrected AND cfailed = '' THEN NULL - // ELSE corrected - // END, - // controlinfo, - // useinfo, - // cfailed - // FROM - // data - // WHERE - // stationid = $1 - // AND typeid = $2 - // AND paramid = $3 - // AND sensor = $4 - // AND level = $5 - // AND obstime >= $6 - // TODO: should we keep these? Maybe obstime is actually useful - // ORDER BY - // stationid, - // obstime` - query := `SELECT obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed - FROM data - WHERE stationid = $1 - AND typeid = $2 - AND paramid = $3 - AND sensor = $4 - AND level = $5 - AND ($6::timestamp IS NULL OR obstime >= $6) - AND ($7::timestamp IS NULL OR obstime < $7) - ORDER BY obstime` - - rows, err := pool.Query( - context.TODO(), - query, - label.StationID, - label.TypeID, - label.ParamID, - label.Sensor, - label.Level, - config.FromTime, - config.ToTime, - ) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - data, err := pgx.CollectRows(rows, pgx.RowToStructByName[Obs]) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - return data, nil -} diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go new file mode 100644 index 00000000..eb735051 --- /dev/null +++ b/migrations/kvalobs/dump/data.go @@ -0,0 +1,154 @@ +package dump + +import ( + "context" + "log/slog" + "migrate/kvalobs/db" + "migrate/lard" + "os" + "path/filepath" + + "github.com/gocarina/gocsv" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) + +const DATA_LABEL_CSV string = "data_labels.csv" + +func (config *Config) DumpData(outpath string, pool *pgxpool.Pool) { + var labels []*lard.Label + + dataPath := filepath.Join(outpath, "data") + if err := os.MkdirAll(dataPath, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } + + labelFile := filepath.Join(outpath, DATA_LABEL_CSV) + if _, err := os.Stat(outpath); err != nil { + if labels, err = dumpLabels(pool, labelFile, getDataLabels, config); err != nil { + return + } + } else { + if labels, err = db.ReadLabelCSV(labelFile); err != nil { + return + } + } + + for _, ts := range labels { + if !config.ShouldDumpLabel(ts) { + continue + } + + data, err := readData(ts, pool, config) + if err != nil { + continue + } + + filename := filepath.Join(dataPath, db.LabelToFilename(ts)) + file, err := os.Create(filename) + if err != nil { + slog.Error(err.Error()) + continue + } + + slog.Info("Writing data to " + filename) + if err = gocsv.MarshalFile(data, file); err != nil { + slog.Error(err.Error()) + continue + } + } +} + +func getDataLabels(pool *pgxpool.Pool, config *Config) ([]*lard.Label, error) { + // TODO: not sure about the sensor/level conditions, + // they should never be NULL since they have default values different from NULL? + // TODO: We probably don't even need the join, + // because `name` (`param_code`) is not present in our `labels.met`? + // query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level, name FROM data + // LEFT JOIN param USING (paramid) + // WHERE name IS NOT NUL + // AND sensor IS NOT NULL + // AND level IS NOT NULL + // AND ($1::timestamp IS NULL OR obstime >= $1) + // AND ($2::timestamp IS NULL OR obstime < $2)` + query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level FROM data + WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2)` + + rows, err := pool.Query(context.TODO(), query, config.FromTime, config.ToTime) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + tsList, err := pgx.CollectRows(rows, pgx.RowToStructByName[*lard.Label]) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return tsList, nil +} + +func readData(label *lard.Label, pool *pgxpool.Pool, config *Config) (db.Data, error) { + // TODO: is the case useful here, we can just check for cfailed = '' in here + // query := `SELECT + // obstime, + // original, + // tbtime, + // CASE + // WHEN original = corrected AND cfailed = '' THEN NULL + // ELSE corrected + // END, + // controlinfo, + // useinfo, + // cfailed + // FROM + // data + // WHERE + // stationid = $1 + // AND typeid = $2 + // AND paramid = $3 + // AND sensor = $4 + // AND level = $5 + // AND obstime >= $6 + // TODO: should we keep these? Maybe obstime is actually useful + // ORDER BY + // stationid, + // obstime` + query := `SELECT obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed + FROM data + WHERE stationid = $1 + AND typeid = $2 + AND paramid = $3 + AND sensor = $4 + AND level = $5 + AND ($6::timestamp IS NULL OR obstime >= $6) + AND ($7::timestamp IS NULL OR obstime < $7) + ORDER BY + stationid, obstime` + + rows, err := pool.Query( + context.TODO(), + query, + label.StationID, + label.TypeID, + label.ParamID, + label.Sensor, + label.Level, + config.FromTime, + config.ToTime, + ) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + data, err := pgx.CollectRows(rows, pgx.RowToStructByName[*db.DataObs]) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return data, nil +} diff --git a/migrations/kvalobs/dump/labels.go b/migrations/kvalobs/dump/labels.go new file mode 100644 index 00000000..c6d313f0 --- /dev/null +++ b/migrations/kvalobs/dump/labels.go @@ -0,0 +1,35 @@ +package dump + +import ( + "log/slog" + "migrate/lard" + "os" + + "github.com/gocarina/gocsv" + "github.com/jackc/pgx/v5/pgxpool" +) + +// Function used to du +type LabelDumpFunc = func(pool *pgxpool.Pool, config *Config) ([]*lard.Label, error) + +func dumpLabels(pool *pgxpool.Pool, path string, fn LabelDumpFunc, config *Config) ([]*lard.Label, error) { + labels, err := fn(pool, config) + if err != nil { + // Error logged inside fn + return nil, err + } + + file, err := os.Create(path) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + slog.Info("Writing timeseries labels to " + path) + if err = gocsv.Marshal(labels, file); err != nil { + slog.Error(err.Error()) + return nil, err + } + + return labels, nil +} diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go new file mode 100644 index 00000000..6eda54c1 --- /dev/null +++ b/migrations/kvalobs/dump/main.go @@ -0,0 +1,70 @@ +package dump + +import ( + "context" + "fmt" + "log/slog" + "os" + "path/filepath" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + + "migrate/lard" + "migrate/utils" +) + +// Same timeseries could be in both 'data' and 'text_data' tables +// First of all, why? +// Second, do we care? +// func readDataAndText(label *lard.Label, pool *pgxpool.Pool, config *DumpConfig) Data { +// // Supposed to join text anf number data to single slice +// return nil +// } +// TODO: not sure what to do with this one +// func joinTS(first, second []lard.Label) + +type Config struct { + BaseDir string `short:"p" long:"path" default:"./dumps" description:"Location the dumped data will be stored in"` + FromTime *time.Time `long:"from" description:"Fetch data only starting from this timestamp"` + ToTime *time.Time `long:"to" description:"Fetch data only until this timestamp"` + // Ts []int32 `long:"ts" description:"Optional comma separated list of timeseries. By default all available timeseries are processed"` + Stations []int32 `long:"station" description:"Optional comma separated list of station numbers. By default all available station numbers are processed"` + TypeIds []int32 `long:"typeid" description:"Optional comma separated list of type IDs. By default all available type IDs are processed"` + ParamIds []int32 `long:"paramid" description:"Optional comma separated list of param IDs. By default all available param IDs are processed"` + Sensors []int32 `long:"sensor" description:"Optional comma separated list of sensors. By default all available sensors are processed"` + Levels []int32 `long:"level" description:"Optional comma separated list of levels. By default all available levels are processed"` +} + +func (config *Config) ShouldDumpLabel(label *lard.Label) bool { + // (config.Ts == nil || slices.Contains(config.Ts, ts.ID)) || + return utils.Contains(config.Stations, label.StationID) || + utils.Contains(config.TypeIds, label.TypeID) || + utils.Contains(config.ParamIds, label.ParamID) || + // TODO: these two should never be null anyway + utils.NullableContains(config.Sensors, label.Sensor) || + utils.NullableContains(config.Levels, label.Level) +} + +func (config *Config) Execute(_ []string) error { + // dump kvalobs + config.dump("KVALOBS_CONN_STRING", filepath.Join(config.BaseDir, "kvalobs")) + + // dump histkvalobs + // TODO: maybe it's worth adding a separate flag? + config.dump("HISTKVALOBS_CONN_STRING", filepath.Join(config.BaseDir, "histkvalobs")) + + return nil +} + +func (config *Config) dump(envvar, path string) { + pool, err := pgxpool.New(context.Background(), os.Getenv(envvar)) + if err != nil { + slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) + return + } + defer pool.Close() + + dumpText(path, pool, config) + config.dumpData(path, pool) +} diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go new file mode 100644 index 00000000..8137da3c --- /dev/null +++ b/migrations/kvalobs/dump/text.go @@ -0,0 +1,198 @@ +package dump + +import ( + "context" + "log/slog" + "migrate/kvalobs/db" + "migrate/lard" + "os" + "path/filepath" + + "github.com/gocarina/gocsv" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) + +func DumpText(path string, pool *pgxpool.Pool, config *Config) { + var labels []*lard.Label + + textPath := filepath.Join(path, "text") + if err := os.MkdirAll(textPath, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } + + labelFile := filepath.Join(path, "labels.csv") + if _, err := os.Stat(labelFile); err != nil { + if labels, err = dumpLabels(pool, labelFile, getTextLabels, config); err != nil { + return + } + } else { + if labels, err = db.ReadLabelCSV(labelFile); err != nil { + return + } + } + + for _, ts := range labels { + if !config.ShouldDumpLabel(ts) { + continue + } + + // TODO: Dump per station? Not strictly necessary? But makes it more organized? + stationDir := filepath.Join(textPath, string(ts.StationID)) + if err := os.MkdirAll(stationDir, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } + + data, err := readTextData(ts, pool, config) + if err != nil { + continue + } + + filename := filepath.Join(textPath, string(ts.StationID), db.LabelToFilename(ts)) + file, err := os.Create(filename) + if err != nil { + slog.Error(err.Error()) + continue + } + + slog.Info("Writing text to " + filename) + if err = gocsv.MarshalFile(data, file); err != nil { + slog.Error(err.Error()) + continue + } + } +} + +func (config *Config) dumpTextTS(pool *pgxpool.Pool) { + timeseries, err := getTextLabels(pool, config) + if err != nil { + // Error logged inside getTextTS + return + } + + if err := os.MkdirAll(config.BaseDir, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } + + path := filepath.Join(config.BaseDir, "text_timeseries.csv") + file, err := os.Create(path) + if err != nil { + slog.Error(err.Error()) + return + } + + slog.Info("Writing timeseries labels to CSV") + if err = gocsv.Marshal(timeseries, file); err != nil { + slog.Error(err.Error()) + return + } +} + +func getTextLabels(pool *pgxpool.Pool, config *Config) ([]*lard.Label, error) { + // OGquery := `SELECT DISTINCT + // stationid, + // typeid, + // paramid, + // 0 AS sensor, + // 0 AS level, + // name AS code + // FROM + // text_data + // LEFT JOIN + // param USING (paramid) + // WHERE + // obstime >= $1 + // TODO: probably don't need this? + // AND obstime <= $2 + // AND name IS NOT NULL + // TODO: do we need this order by? As far as I can see, + // it's used to compare text_data and scalar_data timeseries + // ORDER BY + // stationid, + // typeid, + // paramid, + // level, + // sensor` + + // NOTE: `param` table is empty in histkvalobs + // TODO: We probably don't even need the join, + // because `name` (`param_code`) is not present in our `labels.met`? + // query := `SELECT DISTINCT stationid, typeid, paramid, name FROM text_data + // LEFT JOIN param USING (paramid) + // WHERE name IS NOT NULL + // AND ($1::timestamp IS NULL OR obstime >= $1) + // AND ($2::timestamp IS NULL OR obstime < $2)` + // + // TODO: should sensor/level be NULL or 0 + query := `SELECT DISTINCT stationid, typeid, paramid, NULL AS sensor, NULL AS level FROM text_data + WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2)` + + slog.Info("Querying distinct timeseries labels") + rows, err := pool.Query(context.TODO(), query, config.FromTime, config.ToTime) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + slog.Info("Collecting rows to slice") + tsList, err := pgx.CollectRows(rows, pgx.RowToStructByName[*lard.Label]) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return tsList, nil +} + +func readTextData(label *lard.Label, pool *pgxpool.Pool, config *Config) (db.Text, error) { + // query := ` + // SELECT + // obstime, + // original AS originaltext, + // tbtime + // FROM + // text_data + // WHERE + // stationid = $1 + // AND typeid = $2 + // AND paramid = $3 + // AND obstime >= $4 + // AND obstime <= $5 + // TODO: should we keep these? Maybe obstime is actually useful + // ORDER BY + // stationid, + // obstime` + query := `SELECT obstime, original, tbtime FROM text_data + WHERE stationid = $1 + AND typeid = $2 + AND paramid = $3 + AND ($4::timestamp IS NULL OR obstime >= $4) + AND ($5::timestamp IS NULL OR obstime < $5) + ORDER BY + stationid, obstime` + + rows, err := pool.Query( + context.TODO(), + query, + label.StationID, + label.TypeID, + label.ParamID, + config.FromTime, + config.ToTime, + ) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + data, err := pgx.CollectRows(rows, pgx.RowToStructByName[*db.TextObs]) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return data, nil +} diff --git a/migrations/kvalobs/import.go b/migrations/kvalobs/import.go deleted file mode 100644 index 464e6d58..00000000 --- a/migrations/kvalobs/import.go +++ /dev/null @@ -1,106 +0,0 @@ -package kvalobs - -import ( - "context" - "fmt" - "log/slog" - "migrate/lard" - "os" - "time" - - // "path/filepath" - - "github.com/jackc/pgx/v5/pgxpool" -) - -type ImportConfig struct { - BaseConfig -} - -func (config *ImportConfig) Execute(_ []string) error { - config.setup() - - pool, err := pgxpool.New(context.Background(), os.Getenv("KVALOBS_CONN_STRING")) - if err != nil { - slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) - } - defer pool.Close() - - return nil -} - -type TextTimeseries struct { - id int32 - obses []TextObs -} - -func (ts *TextTimeseries) Len() int { - return len(ts.obses) -} - -func (ts *TextTimeseries) ID() int32 { - return ts.id -} - -func (ts *TextTimeseries) Obstime(i int) time.Time { - return ts.obses[i].Obstime -} - -func (ts *TextTimeseries) Text(i int) string { - return ts.obses[i].Original -} - -func (config *ImportConfig) ImportText(pool *pgxpool.Pool, path string) error { - dir, err := os.ReadDir(path) - if err != nil { - slog.Error(err.Error()) - return err - } - - var totalRowsInserted int64 - for _, file := range dir { - label, err := LabelFromFilename(file.Name()) - if err != nil { - slog.Error(err.Error()) - continue - } - - if !label.ShouldBeImported(config) { - continue - } - - // TODO: should use lard.Label directly? - tsid, err := lard.GetTimeseriesID(lard.Label(label), *config.FromTime, pool) - if err != nil { - slog.Error(err.Error()) - continue - } - - if !contains(config.Ts, tsid) { - continue - } - - data, err := readCSVfile[TextObs](file.Name()) - if err != nil { - slog.Error(err.Error()) - continue - } - - // TODO: I probably need the interface don't I? - ts := &TextTimeseries{tsid, data} - count, err := lard.InsertNonscalarData(ts, pool, "") - if err != nil { - slog.Error("Failed bulk insertion: " + err.Error()) - continue - } - - totalRowsInserted += count - } - - return nil -} - -func readDataFiles() []TSLabel { - // TODO: - return nil -} diff --git a/migrations/kvalobs/import/data.go b/migrations/kvalobs/import/data.go new file mode 100644 index 00000000..7add78c1 --- /dev/null +++ b/migrations/kvalobs/import/data.go @@ -0,0 +1,8 @@ +package port + +import "migrate/lard" + +func readDataFiles() []lard.Label { + // TODO: + return nil +} diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go new file mode 100644 index 00000000..ea3e3935 --- /dev/null +++ b/migrations/kvalobs/import/import.go @@ -0,0 +1,49 @@ +package port + +import ( + "context" + + "fmt" + "log/slog" + "os" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + + "migrate/lard" + "migrate/utils" +) + +type Config struct { + BaseDir string `short:"p" long:"path" default:"./dumps" description:"Location the dumped data will be stored in"` + FromTime *time.Time `long:"from" description:"Fetch data only starting from this timestamp"` + ToTime *time.Time `long:"to" description:"Fetch data only until this timestamp"` + Ts []int32 `long:"ts" description:"Optional comma separated list of timeseries. By default all available timeseries are processed"` + Stations []int32 `long:"station" description:"Optional comma separated list of station numbers. By default all available station numbers are processed"` + TypeIds []int32 `long:"typeid" description:"Optional comma separated list of type IDs. By default all available type IDs are processed"` + ParamIds []int32 `long:"paramid" description:"Optional comma separated list of param IDs. By default all available param IDs are processed"` + Sensors []int32 `long:"sensor" description:"Optional comma separated list of sensors. By default all available sensors are processed"` + Levels []int32 `long:"level" description:"Optional comma separated list of levels. By default all available levels are processed"` +} + +func (config *Config) ShouldImport(ts *lard.Label) bool { + // TODO: there's no need to get the tsid if the other parameters don't match + // So extract the first condition + // return contains(config.Ts, tsid) || + return utils.Contains(config.Stations, ts.StationID) || + utils.Contains(config.TypeIds, ts.TypeID) || + utils.Contains(config.ParamIds, ts.ParamID) || + // TODO: these two should never be null anyway + utils.NullableContains(config.Sensors, ts.Sensor) || + utils.NullableContains(config.Levels, ts.Level) +} + +func (config *Config) Execute(_ []string) error { + pool, err := pgxpool.New(context.Background(), os.Getenv("KVALOBS_CONN_STRING")) + if err != nil { + slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) + } + defer pool.Close() + + return nil +} diff --git a/migrations/kvalobs/import/text.go b/migrations/kvalobs/import/text.go new file mode 100644 index 00000000..4645236a --- /dev/null +++ b/migrations/kvalobs/import/text.go @@ -0,0 +1,58 @@ +package port + +import ( + "log/slog" + "migrate/kvalobs/db" + "migrate/lard" + "migrate/utils" + "os" + + "github.com/jackc/pgx/v5/pgxpool" +) + +func (config *Config) ImportText(pool *pgxpool.Pool, path string) error { + dir, err := os.ReadDir(path) + if err != nil { + slog.Error(err.Error()) + return err + } + + var totalRowsInserted int64 + for _, file := range dir { + label, err := db.LabelFromFilename(file.Name()) + if err != nil { + slog.Error(err.Error()) + continue + } + + if !config.ShouldImport(label) { + continue + } + + tsid, err := lard.GetTimeseriesID(label, *config.FromTime, pool) + if err != nil { + slog.Error(err.Error()) + continue + } + + if !utils.Contains(config.Ts, tsid) { + continue + } + + data, err := db.ReadTextCSV(tsid, file.Name()) + if err != nil { + slog.Error(err.Error()) + continue + } + + count, err := lard.InsertTextData(data, pool, "") + if err != nil { + slog.Error("Failed bulk insertion: " + err.Error()) + continue + } + + totalRowsInserted += count + } + + return nil +} diff --git a/migrations/kvalobs/kvalobs_test.go b/migrations/kvalobs/kvalobs_test.go new file mode 100644 index 00000000..20fa279e --- /dev/null +++ b/migrations/kvalobs/kvalobs_test.go @@ -0,0 +1 @@ +package kvalobs diff --git a/migrations/kvalobs/main.go b/migrations/kvalobs/main.go index 0caae1e8..ce469bf0 100644 --- a/migrations/kvalobs/main.go +++ b/migrations/kvalobs/main.go @@ -1,7 +1,8 @@ package kvalobs import ( - "strings" + "migrate/kvalobs/dump" + port "migrate/kvalobs/import" "time" ) @@ -88,85 +89,35 @@ import ( var NULL_VALUES []float64 = []float64{-34767, -34766} -type timespan struct { - fromtime time.Time - totime time.Time -} - -type Kvalobs struct { - Hosts []string - Ports []string - DBs []string - Usernames []string - Passwords []string -} +// type timespan struct { +// fromtime time.Time +// totime time.Time +// } +// +// type Kvalobs struct { +// Hosts []string +// Ports []string +// DBs []string +// Usernames []string +// Passwords []string +// } // TODO: should we use this one as default or process all times var FROMTIME time.Time = time.Date(2006, 01, 01, 00, 00, 00, 00, time.UTC) -type BaseConfig struct { - BaseDir string `short:"p" long:"path" default:"./dumps" description:"Location the dumped data will be stored in"` - FromTime *time.Time `long:"from" description:"Fetch data only starting from this timestamp"` - ToTime *time.Time `long:"to" description:"Fetch data only until this timestamp"` - TsCmd string `long:"ts" description:"Optional comma separated list of timeseries. By default all available timeseries are processed"` - StationsCmd string `long:"station" description:"Optional comma separated list of station numbers. By default all available station numbers are processed"` - TypeIdsCmd string `long:"typeid" description:"Optional comma separated list of type IDs. By default all available type IDs are processed"` - ParamIdsCmd string `long:"paramid" description:"Optional comma separated list of param IDs. By default all available param IDs are processed"` - SensorsCmd string `long:"sensor" description:"Optional comma separated list of sensors. By default all available sensors are processed"` - LevelsCmd string `long:"level" description:"Optional comma separated list of levels. By default all available levels are processed"` - - Ts []int32 // Why did I set this one as int64? - Stations []int32 - TypeIds []int32 - ParamIds []int32 - Sensors []int32 - Levels []int32 -} - -func (config *BaseConfig) setup() { - if config.TsCmd != "" { - config.Ts = Map(strings.Split(config.TsCmd, ","), toInt32) - } - if config.StationsCmd != "" { - config.Stations = Map(strings.Split(config.StationsCmd, ","), toInt32) - } - if config.TypeIdsCmd != "" { - config.TypeIds = Map(strings.Split(config.TypeIdsCmd, ","), toInt32) - } - if config.ParamIdsCmd != "" { - config.ParamIds = Map(strings.Split(config.ParamIdsCmd, ","), toInt32) - } - if config.SensorsCmd != "" { - config.Sensors = Map(strings.Split(config.SensorsCmd, ","), toInt32) - } - if config.LevelsCmd != "" { - config.Levels = Map(strings.Split(config.LevelsCmd, ","), toInt32) - } -} - -func (ts *TSLabel) ShouldBeDumped(config *DumpConfig) bool { - // (config.Ts == nil || slices.Contains(config.Ts, ts.ID)) || - return contains(config.Stations, ts.StationID) || - contains(config.TypeIds, ts.TypeID) || - contains(config.ParamIds, ts.ParamID) || - // TODO: these two should never be null anyway - nullableContains(config.Sensors, ts.Sensor) || - nullableContains(config.Levels, ts.Level) -} - -func (ts *TSLabel) ShouldBeImported(config *ImportConfig) bool { - // TODO: there's no need to get the tsid if the other parameters don't match - // So extract the first condition - // return contains(config.Ts, tsid) || - return contains(config.Stations, ts.StationID) || - contains(config.TypeIds, ts.TypeID) || - contains(config.ParamIds, ts.ParamID) || - // TODO: these two should never be null anyway - nullableContains(config.Sensors, ts.Sensor) || - nullableContains(config.Levels, ts.Level) -} +// type BaseConfig struct { +// BaseDir string `short:"p" long:"path" default:"./dumps" description:"Location the dumped data will be stored in"` +// FromTime *time.Time `long:"from" description:"Fetch data only starting from this timestamp"` +// ToTime *time.Time `long:"to" description:"Fetch data only until this timestamp"` +// Ts []int32 `long:"ts" description:"Optional comma separated list of timeseries. By default all available timeseries are processed"` +// Stations []int32 `long:"station" description:"Optional comma separated list of station numbers. By default all available station numbers are processed"` +// TypeIds []int32 `long:"typeid" description:"Optional comma separated list of type IDs. By default all available type IDs are processed"` +// ParamIds []int32 `long:"paramid" description:"Optional comma separated list of param IDs. By default all available param IDs are processed"` +// Sensors []int32 `long:"sensor" description:"Optional comma separated list of sensors. By default all available sensors are processed"` +// Levels []int32 `long:"level" description:"Optional comma separated list of levels. By default all available levels are processed"` +// } type Cmd struct { - Dump DumpConfig `command:"dump" description:"Dump tables from Kvalobs to CSV"` - Import ImportConfig `command:"import" description:"Import CSV file dumped from Kvalobs"` + Dump dump.Config `command:"dump" description:"Dump tables from Kvalobs to CSV"` + Import port.Config `command:"import" description:"Import CSV file dumped from Kvalobs"` } diff --git a/migrations/kvalobs/utils.go b/migrations/kvalobs/utils.go deleted file mode 100644 index 1a233360..00000000 --- a/migrations/kvalobs/utils.go +++ /dev/null @@ -1,77 +0,0 @@ -package kvalobs - -import ( - "log/slog" - "os" - "slices" - "strconv" - - "github.com/gocarina/gocsv" -) - -// Loads a CSV file where records (lines) are described by type T -func readCSVfile[T any](filename string) ([]T, error) { - file, err := os.Open(filename) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - defer file.Close() - - // TODO: maybe I should preallocate slice size if I can? - // Does UnmarshalFile allocate? - // labels := make([]T, 0, size) - var labels []T - err = gocsv.UnmarshalFile(file, &labels) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - return labels, nil -} - -func toInt32(s string) int32 { - res, err := strconv.ParseInt(s, 10, 32) - if err != nil { - // Panic is fine here, because we use this function only at startup - panic("Could not parse to int") - } - return int32(res) -} - -func Map[T, V any](ts []T, fn func(T) V) []V { - result := make([]V, len(ts)) - for i, t := range ts { - result[i] = fn(t) - } - return result -} - -// Similar to Map, but bails immediately if an error occurs -func TryMap[T, V any](ts []T, fn func(T) (V, error)) ([]V, error) { - result := make([]V, len(ts)) - for i, t := range ts { - temp, err := fn(t) - if err != nil { - return nil, err - } - result[i] = temp - } - return result, nil -} - -func contains[T comparable](s []T, v T) bool { - if s == nil { - return true - } - return slices.Contains(s, v) -} - -// Returns true if the slice is empty or the value is null -func nullableContains[T comparable](s []T, v *T) bool { - if s == nil || v == nil { - return true - } - return slices.Contains(s, *v) -} diff --git a/migrations/utils/utils.go b/migrations/utils/utils.go index 31974362..b94eb52c 100644 --- a/migrations/utils/utils.go +++ b/migrations/utils/utils.go @@ -6,6 +6,7 @@ import ( "log/slog" "os" "slices" + "strconv" "strings" "github.com/schollz/progressbar/v3" @@ -72,3 +73,48 @@ func SetLogFile(table, procedure string) { } log.SetOutput(fh) } + +func ToInt32(s string) int32 { + res, err := strconv.ParseInt(s, 10, 32) + if err != nil { + // Panic is fine here, because we use this function only at startup + panic("Could not parse to int") + } + return int32(res) +} + +func Map[T, V any](ts []T, fn func(T) V) []V { + result := make([]V, len(ts)) + for i, t := range ts { + result[i] = fn(t) + } + return result +} + +// Similar to Map, but bails immediately if an error occurs +func TryMap[T, V any](ts []T, fn func(T) (V, error)) ([]V, error) { + result := make([]V, len(ts)) + for i, t := range ts { + temp, err := fn(t) + if err != nil { + return nil, err + } + result[i] = temp + } + return result, nil +} + +func Contains[T comparable](s []T, v T) bool { + if s == nil { + return true + } + return slices.Contains(s, v) +} + +// Returns true if the slice is empty or the value is null +func NullableContains[T comparable](s []T, v *T) bool { + if s == nil || v == nil { + return true + } + return slices.Contains(s, *v) +} From dab35ba4aaee4919744e3673f5f7fb13c0133055 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 25 Nov 2024 16:11:36 +0100 Subject: [PATCH 03/67] Use pointer in GetTimeseriesID --- migrations/kdvh/import/cache/main.go | 2 +- migrations/lard/timeseries.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/migrations/kdvh/import/cache/main.go b/migrations/kdvh/import/cache/main.go index 243c6f6a..c021ec2a 100644 --- a/migrations/kdvh/import/cache/main.go +++ b/migrations/kdvh/import/cache/main.go @@ -98,7 +98,7 @@ func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpoo Level: param.Hlevel, } - tsid, err := lard.GetTimeseriesID(label, param.Fromtime, pool) + tsid, err := lard.GetTimeseriesID(&label, param.Fromtime, pool) if err != nil { slog.Error(logstr + "could not obtain timeseries - " + err.Error()) return nil, err diff --git a/migrations/lard/timeseries.go b/migrations/lard/timeseries.go index 5629b3c4..73f24b2a 100644 --- a/migrations/lard/timeseries.go +++ b/migrations/lard/timeseries.go @@ -16,7 +16,7 @@ type Label struct { Level *int32 } -func GetTimeseriesID(label Label, fromtime time.Time, pool *pgxpool.Pool) (tsid int32, err error) { +func GetTimeseriesID(label *Label, fromtime time.Time, pool *pgxpool.Pool) (tsid int32, err error) { // Query LARD labels table err = pool.QueryRow( context.TODO(), From dea347aa71214392bff922d1561043dcb4fec070 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 26 Nov 2024 13:00:21 +0100 Subject: [PATCH 04/67] Use go-arg instead of go-flags --- migrations/go.mod | 5 ++--- migrations/go.sum | 11 +++++------ migrations/kdvh/dump/dump.go | 6 +++--- migrations/kdvh/dump/main.go | 22 ++++++++++------------ migrations/kdvh/import/main.go | 26 +++++++++++++------------- migrations/kdvh/list/main.go | 4 +--- migrations/kdvh/main.go | 25 ++++++++++++++++++++++--- migrations/kvalobs/dump/data.go | 2 +- migrations/kvalobs/dump/main.go | 23 ++++++++++------------- migrations/kvalobs/dump/text.go | 2 +- migrations/kvalobs/import/import.go | 20 ++++++++++---------- migrations/kvalobs/main.go | 23 ++++++++++++++++++++--- migrations/main.go | 28 ++++++++++++++++------------ migrations/utils/timestamp.go | 20 ++++++++++++++++++++ 14 files changed, 134 insertions(+), 83 deletions(-) create mode 100644 migrations/utils/timestamp.go diff --git a/migrations/go.mod b/migrations/go.mod index 4153ee93..c53e083b 100644 --- a/migrations/go.mod +++ b/migrations/go.mod @@ -3,15 +3,16 @@ module migrate go 1.22.3 require ( + github.com/alexflint/go-arg v1.5.1 github.com/gocarina/gocsv v0.0.0-20240520201108-78e41c74b4b1 github.com/jackc/pgx/v5 v5.6.0 - github.com/jessevdk/go-flags v1.6.1 github.com/joho/godotenv v1.5.1 github.com/rickb777/period v1.0.5 github.com/schollz/progressbar/v3 v3.16.1 ) require ( + github.com/alexflint/go-scalar v1.2.0 // indirect github.com/govalues/decimal v0.1.29 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect @@ -25,5 +26,3 @@ require ( golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.16.0 // indirect ) - -replace github.com/jessevdk/go-flags => github.com/Lun4m/go-flags v0.0.0-20241118100134-6375192b7985 diff --git a/migrations/go.sum b/migrations/go.sum index 54140c04..72aadfc2 100644 --- a/migrations/go.sum +++ b/migrations/go.sum @@ -1,7 +1,7 @@ -github.com/Lun4m/go-flags v0.0.0-20241113125827-68757125e949 h1:7xyEGIr1X5alOjBjlNTDF+aRBcRIo60YX5sdlziLE5w= -github.com/Lun4m/go-flags v0.0.0-20241113125827-68757125e949/go.mod h1:42/L0FDbP0qe91I+81tBqjU3uoz1tn1GDMZAhcCE2PE= -github.com/Lun4m/go-flags v0.0.0-20241118100134-6375192b7985 h1:eUA/sFZ1CtY9+9y/fPpUivYW8fJBlXqB4/8CjC+yXqk= -github.com/Lun4m/go-flags v0.0.0-20241118100134-6375192b7985/go.mod h1:42/L0FDbP0qe91I+81tBqjU3uoz1tn1GDMZAhcCE2PE= +github.com/alexflint/go-arg v1.5.1 h1:nBuWUCpuRy0snAG+uIJ6N0UvYxpxA0/ghA/AaHxlT8Y= +github.com/alexflint/go-arg v1.5.1/go.mod h1:A7vTJzvjoaSTypg4biM5uYNTkJ27SkNTArtYXnlqVO8= +github.com/alexflint/go-scalar v1.2.0 h1:WR7JPKkeNpnYIOfHRa7ivM21aWAdHD0gEWHCx+WQBRw= +github.com/alexflint/go-scalar v1.2.0/go.mod h1:LoFvNMqS1CPrMVltza4LvnGKhaSpc3oyLEBUZVhhS2o= github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -21,8 +21,6 @@ github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4= -github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= @@ -42,6 +40,7 @@ github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/schollz/progressbar/v3 v3.16.1 h1:RnF1neWZFzLCoGx8yp1yF7SDl4AzNDI5y4I0aUJRrZQ= github.com/schollz/progressbar/v3 v3.16.1/go.mod h1:I2ILR76gz5VXqYMIY/LdLecvMHDPVcQm3W/MSKi1TME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= diff --git a/migrations/kdvh/dump/dump.go b/migrations/kdvh/dump/dump.go index 23898e61..6c62f775 100644 --- a/migrations/kdvh/dump/dump.go +++ b/migrations/kdvh/dump/dump.go @@ -19,7 +19,7 @@ import ( // List of columns that we do not need to select when extracting the element codes from a KDVH table var INVALID_COLUMNS = []string{"dato", "stnr", "typeid", "season", "xxx"} -func DumpTable(table *db.Table, pool *pgxpool.Pool, config *DumpConfig) { +func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { if err := os.MkdirAll(filepath.Join(config.BaseDir, table.Path), os.ModePerm); err != nil { slog.Error(err.Error()) return @@ -83,7 +83,7 @@ func DumpTable(table *db.Table, pool *pgxpool.Pool, config *DumpConfig) { } // Fetches elements and filters them based on user input -func getElements(table *db.Table, pool *pgxpool.Pool, config *DumpConfig) ([]string, error) { +func getElements(table *db.Table, pool *pgxpool.Pool, config *Config) ([]string, error) { elements, err := fetchElements(table, pool) if err != nil { return nil, err @@ -138,7 +138,7 @@ func fetchElements(table *db.Table, pool *pgxpool.Pool) (elements []string, err } // Fetches station numbers and filters them based on user input -func getStations(table *db.Table, pool *pgxpool.Pool, config *DumpConfig) ([]string, error) { +func getStations(table *db.Table, pool *pgxpool.Pool, config *Config) ([]string, error) { stations, err := fetchStnrFromElemTable(table, pool) if err != nil { return nil, err diff --git a/migrations/kdvh/dump/main.go b/migrations/kdvh/dump/main.go index 6227b989..273de50d 100644 --- a/migrations/kdvh/dump/main.go +++ b/migrations/kdvh/dump/main.go @@ -12,21 +12,21 @@ import ( "migrate/utils" ) -type DumpConfig struct { - BaseDir string `short:"p" long:"path" default:"./dumps/kdvh" description:"Location the dumped data will be stored in"` - Tables []string `short:"t" delimiter:"," long:"table" default:"" description:"Optional comma separated list of table names. By default all available tables are processed"` - Stations []string `short:"s" delimiter:"," long:"stnr" default:"" description:"Optional comma separated list of stations IDs. By default all station IDs are processed"` - Elements []string `short:"e" delimiter:"," long:"elem" default:"" description:"Optional comma separated list of element codes. By default all element codes are processed"` - Overwrite bool `long:"overwrite" description:"Overwrite any existing dumped files"` - Email []string `long:"email" delimiter:"," description:"Optional comma separated list of email addresses used to notify if the program crashed"` - MaxConn int `short:"n" long:"conn" default:"4" description:"Max number of concurrent connections allowed to KDVH"` +type Config struct { + BaseDir string `arg:"-p,--path" default:"./dumps/kdvh" help:"Location the dumped data will be stored in"` + Tables []string `arg:"-t" help:"Optional comma separated list of table names. By default all available tables are processed"` + Stations []string `arg:"-s" help:"Optional comma separated list of stations IDs. By default all station IDs are processed"` + Elements []string `arg:"-e" help:"Optional comma separated list of element codes. By default all element codes are processed"` + Overwrite bool `help:"Overwrite any existing dumped files"` + Email []string `help:"Optional comma separated list of email addresses used to notify if the program crashed"` + MaxConn int `arg:"-n,--conn" default:"4" help:"Max number of concurrent connections allowed to KDVH"` } -func (config *DumpConfig) Execute([]string) error { +func (config *Config) Execute() { pool, err := pgxpool.New(context.Background(), os.Getenv("KDVH_PROXY_CONN")) if err != nil { slog.Error(err.Error()) - return nil + return } kdvh := db.Init() @@ -38,6 +38,4 @@ func (config *DumpConfig) Execute([]string) error { utils.SetLogFile(table.TableName, "dump") DumpTable(table, pool, config) } - - return nil } diff --git a/migrations/kdvh/import/main.go b/migrations/kdvh/import/main.go index e45f9dd9..d41ded51 100644 --- a/migrations/kdvh/import/main.go +++ b/migrations/kdvh/import/main.go @@ -16,19 +16,20 @@ import ( ) type Config struct { - Verbose bool `short:"v" description:"Increase verbosity level"` - BaseDir string `short:"p" long:"path" default:"./dumps/kdvh" description:"Location the dumped data will be stored in"` - Tables []string `short:"t" long:"table" delimiter:"," default:"" description:"Optional comma separated list of table names. By default all available tables are processed"` - Stations []string `short:"s" long:"station" delimiter:"," default:"" description:"Optional comma separated list of stations IDs. By default all station IDs are processed"` - Elements []string `short:"e" long:"elemcode" delimiter:"," default:"" description:"Optional comma separated list of element codes. By default all element codes are processed"` - Sep string `long:"sep" default:"," description:"Separator character in the dumped files. Needs to be quoted"` - HasHeader bool `long:"header" description:"Add this flag if the dumped files have a header row"` - Skip string `long:"skip" choice:"data" choice:"flags" description:"Skip import of data or flags"` - Email []string `long:"email" delimiter:"," description:"Optional comma separated list of email addresses used to notify if the program crashed"` - Reindex bool `long:"reindex" description:"Drops PG indices before insertion. Might improve performance"` + Verbose bool `arg:"-v" help:"Increase verbosity level"` + BaseDir string `arg:"-p,--path" default:"./dumps/kdvh" help:"Location the dumped data will be stored in"` + Tables []string `arg:"-t" help:"Optional comma separated list of table names. By default all available tables are processed"` + Stations []string `arg:"-s" help:"Optional comma separated list of stations IDs. By default all station IDs are processed"` + Elements []string `arg:"-e" help:"Optional comma separated list of element codes. By default all element codes are processed"` + Sep string `default:"," help:"Separator character in the dumped files. Needs to be quoted"` + HasHeader bool `help:"Add this flag if the dumped files have a header row"` + // TODO: this isn't implemented in go-arg + // Skip string `choice:"data" choice:"flags" help:"Skip import of data or flags"` + Email []string `help:"Optional comma separated list of email addresses used to notify if the program crashed"` + Reindex bool `help:"Drops PG indices before insertion. Might improve performance"` } -func (config *Config) Execute([]string) error { +func (config *Config) Execute() { if len(config.Sep) > 1 { fmt.Printf("Error: '--sep' only accepts single-byte characters. Got %s", config.Sep) os.Exit(1) @@ -44,7 +45,7 @@ func (config *Config) Execute([]string) error { pool, err := pgxpool.New(context.TODO(), os.Getenv("LARD_STRING")) if err != nil { slog.Error(fmt.Sprint("Could not connect to Lard:", err)) - return err + return } defer pool.Close() @@ -82,7 +83,6 @@ func (config *Config) Execute([]string) error { log.SetOutput(os.Stdout) slog.Info("Import complete!") - return nil } func dropIndices(pool *pgxpool.Pool) { diff --git a/migrations/kdvh/list/main.go b/migrations/kdvh/list/main.go index 579d620f..4774f55c 100644 --- a/migrations/kdvh/list/main.go +++ b/migrations/kdvh/list/main.go @@ -9,7 +9,7 @@ import ( type Config struct{} -func (config *Config) Execute(_ []string) error { +func (config *Config) Execute() { fmt.Println("Available tables in KDVH:") kdvh := db.Init() @@ -23,6 +23,4 @@ func (config *Config) Execute(_ []string) error { for _, table := range tables { fmt.Println(" -", table) } - - return nil } diff --git a/migrations/kdvh/main.go b/migrations/kdvh/main.go index 2ad6c06f..bf146326 100644 --- a/migrations/kdvh/main.go +++ b/migrations/kdvh/main.go @@ -1,6 +1,11 @@ package kdvh import ( + "fmt" + "os" + + "github.com/alexflint/go-arg" + "migrate/kdvh/dump" port "migrate/kdvh/import" "migrate/kdvh/list" @@ -8,7 +13,21 @@ import ( // Command line arguments for KDVH migrations type Cmd struct { - Dump dump.DumpConfig `command:"dump" description:"Dump tables from KDVH to CSV"` - Import port.Config `command:"import" description:"Import CSV file dumped from KDVH"` - List list.Config `command:"list" description:"List available KDVH tables"` + Dump *dump.Config `arg:"subcommand" help:"Dump tables from KDVH to CSV"` + Import *port.Config `arg:"subcommand" help:"Import CSV file dumped from KDVH"` + List *list.Config `arg:"subcommand" help:"List available KDVH tables"` +} + +func (c *Cmd) Execute(parser *arg.Parser) { + switch { + case c.Dump != nil: + c.Dump.Execute() + case c.Import != nil: + c.Import.Execute() + case c.List != nil: + c.List.Execute() + default: + fmt.Println("Error: passing a subcommand is required.\n") + parser.WriteHelpForSubcommand(os.Stdout, "kdvh") + } } diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go index eb735051..6b31be12 100644 --- a/migrations/kvalobs/dump/data.go +++ b/migrations/kvalobs/dump/data.go @@ -15,7 +15,7 @@ import ( const DATA_LABEL_CSV string = "data_labels.csv" -func (config *Config) DumpData(outpath string, pool *pgxpool.Pool) { +func (config *Config) dumpData(outpath string, pool *pgxpool.Pool) { var labels []*lard.Label dataPath := filepath.Join(outpath, "data") diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index 6eda54c1..59d850a1 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -6,7 +6,6 @@ import ( "log/slog" "os" "path/filepath" - "time" "github.com/jackc/pgx/v5/pgxpool" @@ -25,15 +24,15 @@ import ( // func joinTS(first, second []lard.Label) type Config struct { - BaseDir string `short:"p" long:"path" default:"./dumps" description:"Location the dumped data will be stored in"` - FromTime *time.Time `long:"from" description:"Fetch data only starting from this timestamp"` - ToTime *time.Time `long:"to" description:"Fetch data only until this timestamp"` - // Ts []int32 `long:"ts" description:"Optional comma separated list of timeseries. By default all available timeseries are processed"` - Stations []int32 `long:"station" description:"Optional comma separated list of station numbers. By default all available station numbers are processed"` - TypeIds []int32 `long:"typeid" description:"Optional comma separated list of type IDs. By default all available type IDs are processed"` - ParamIds []int32 `long:"paramid" description:"Optional comma separated list of param IDs. By default all available param IDs are processed"` - Sensors []int32 `long:"sensor" description:"Optional comma separated list of sensors. By default all available sensors are processed"` - Levels []int32 `long:"level" description:"Optional comma separated list of levels. By default all available levels are processed"` + BaseDir string `arg:"-p,--path" default:"./dumps" help:"Location the dumped data will be stored in."` + FromTime *utils.Timestamp `arg:"--from" help:"Fetch data only starting from this (date-only) timestamp. For example, '2006-01-01'"` + ToTime *utils.Timestamp `arg:"--to" help:"Fetch data only until this (date-only) timestamp. For example, '2006-01-01'"` + // Ts []int32 `long:"ts" help:"Optional comma separated list of timeseries. By default all available timeseries are processed"` + Stations []int32 `help:"Optional space separated list of station numbers."` + TypeIds []int32 `help:"Optional space separated list of type IDs."` + ParamIds []int32 `help:"Optional space separated list of param IDs."` + Sensors []int32 `help:"Optional space separated list of sensors."` + Levels []int32 `help:"Optional space separated list of levels."` } func (config *Config) ShouldDumpLabel(label *lard.Label) bool { @@ -46,15 +45,13 @@ func (config *Config) ShouldDumpLabel(label *lard.Label) bool { utils.NullableContains(config.Levels, label.Level) } -func (config *Config) Execute(_ []string) error { +func (config *Config) Execute() { // dump kvalobs config.dump("KVALOBS_CONN_STRING", filepath.Join(config.BaseDir, "kvalobs")) // dump histkvalobs // TODO: maybe it's worth adding a separate flag? config.dump("HISTKVALOBS_CONN_STRING", filepath.Join(config.BaseDir, "histkvalobs")) - - return nil } func (config *Config) dump(envvar, path string) { diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index 8137da3c..b4a27e4d 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -13,7 +13,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" ) -func DumpText(path string, pool *pgxpool.Pool, config *Config) { +func dumpText(path string, pool *pgxpool.Pool, config *Config) { var labels []*lard.Label textPath := filepath.Join(path, "text") diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index ea3e3935..f1529f12 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -15,15 +15,15 @@ import ( ) type Config struct { - BaseDir string `short:"p" long:"path" default:"./dumps" description:"Location the dumped data will be stored in"` - FromTime *time.Time `long:"from" description:"Fetch data only starting from this timestamp"` - ToTime *time.Time `long:"to" description:"Fetch data only until this timestamp"` - Ts []int32 `long:"ts" description:"Optional comma separated list of timeseries. By default all available timeseries are processed"` - Stations []int32 `long:"station" description:"Optional comma separated list of station numbers. By default all available station numbers are processed"` - TypeIds []int32 `long:"typeid" description:"Optional comma separated list of type IDs. By default all available type IDs are processed"` - ParamIds []int32 `long:"paramid" description:"Optional comma separated list of param IDs. By default all available param IDs are processed"` - Sensors []int32 `long:"sensor" description:"Optional comma separated list of sensors. By default all available sensors are processed"` - Levels []int32 `long:"level" description:"Optional comma separated list of levels. By default all available levels are processed"` + BaseDir string `arg:"-p,--path" default:"./dumps" help:"Location the dumped data will be stored in"` + FromTime *time.Time `arg:"--from" help:"Fetch data only starting from this timestamp"` + ToTime *time.Time `arg:"--to" help:"Fetch data only until this timestamp"` + Ts []int32 `help:"Optional comma separated list of timeseries. By default all available timeseries are processed"` + Stations []int32 `help:"Optional comma separated list of station numbers. By default all available station numbers are processed"` + TypeIds []int32 `help:"Optional comma separated list of type IDs. By default all available type IDs are processed"` + ParamIds []int32 `help:"Optional comma separated list of param IDs. By default all available param IDs are processed"` + Sensors []int32 `help:"Optional comma separated list of sensors. By default all available sensors are processed"` + Levels []int32 `help:"Optional comma separated list of levels. By default all available levels are processed"` } func (config *Config) ShouldImport(ts *lard.Label) bool { @@ -38,7 +38,7 @@ func (config *Config) ShouldImport(ts *lard.Label) bool { utils.NullableContains(config.Levels, ts.Level) } -func (config *Config) Execute(_ []string) error { +func (config *Config) Execute() error { pool, err := pgxpool.New(context.Background(), os.Getenv("KVALOBS_CONN_STRING")) if err != nil { slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) diff --git a/migrations/kvalobs/main.go b/migrations/kvalobs/main.go index ce469bf0..fee334b1 100644 --- a/migrations/kvalobs/main.go +++ b/migrations/kvalobs/main.go @@ -1,9 +1,14 @@ package kvalobs import ( + "fmt" + "os" + "time" + + "github.com/alexflint/go-arg" + "migrate/kvalobs/dump" port "migrate/kvalobs/import" - "time" ) // Kvalobs is composed of two databases @@ -118,6 +123,18 @@ var FROMTIME time.Time = time.Date(2006, 01, 01, 00, 00, 00, 00, time.UTC) // } type Cmd struct { - Dump dump.Config `command:"dump" description:"Dump tables from Kvalobs to CSV"` - Import port.Config `command:"import" description:"Import CSV file dumped from Kvalobs"` + Dump *dump.Config `arg:"subcommand" help:"Dump tables from Kvalobs to CSV"` + Import *port.Config `arg:"subcommand" help:"Import CSV file dumped from Kvalobs"` +} + +func (c *Cmd) Execute(parser *arg.Parser) { + switch { + case c.Dump != nil: + c.Dump.Execute() + case c.Import != nil: + c.Import.Execute() + default: + fmt.Println("Error: passing a subcommand is required.\n") + parser.WriteHelpForSubcommand(os.Stdout, "kvalobs") + } } diff --git a/migrations/main.go b/migrations/main.go index 78ae62c8..4b285884 100644 --- a/migrations/main.go +++ b/migrations/main.go @@ -3,15 +3,18 @@ package main import ( "fmt" "log" + "os" - "github.com/jessevdk/go-flags" + "github.com/alexflint/go-arg" "github.com/joho/godotenv" "migrate/kdvh" + "migrate/kvalobs" ) type CmdArgs struct { - KDVH kdvh.Cmd `command:"kdvh" description:"Perform KDVH migrations"` + KDVH *kdvh.Cmd `arg:"subcommand" help:"Perform KDVH migrations"` + Kvalobs *kvalobs.Cmd `arg:"subcommand" help:"Perform Kvalobs migrations"` } func main() { @@ -29,15 +32,16 @@ func main() { return } - // NOTE: go-flags calls the Execute method on the parsed subcommand - _, err = flags.Parse(&CmdArgs{}) - if err != nil { - if flagsErr, ok := err.(*flags.Error); ok { - if flagsErr.Type == flags.ErrHelp { - return - } - } - fmt.Println("Type './migrate -h' for help") - return + args := CmdArgs{} + parser := arg.MustParse(&args) + + switch { + case args.KDVH != nil: + args.KDVH.Execute(parser) + case args.Kvalobs != nil: + args.Kvalobs.Execute(parser) + default: + fmt.Println("Error: passing a subcommand is required.\n") + parser.WriteHelp(os.Stdout) } } diff --git a/migrations/utils/timestamp.go b/migrations/utils/timestamp.go new file mode 100644 index 00000000..137e6410 --- /dev/null +++ b/migrations/utils/timestamp.go @@ -0,0 +1,20 @@ +package utils + +import "time" + +type Timestamp struct { + t time.Time +} + +func (ts *Timestamp) UnmarshalText(b []byte) error { + t, err := time.Parse(time.DateOnly, string(b)) + if err != nil { + return err + } + ts.t = t + return nil +} + +func (ts *Timestamp) Format(layout string) string { + return ts.t.Format(layout) +} From e64d1dd0301e6cc72ad5bdedcc65c950bd8e76db Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 26 Nov 2024 13:01:24 +0100 Subject: [PATCH 05/67] Remove Skip argument and don't insert flags for text data --- migrations/kdvh/import/import.go | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index dd48fbf0..7561d6d7 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -77,28 +77,21 @@ func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config } var count int64 - if !(config.Skip == "data") { - if tsInfo.Param.IsScalar { - count, err = lard.InsertData(data, pool, tsInfo.Logstr) - if err != nil { - slog.Error(tsInfo.Logstr + "failed data bulk insertion - " + err.Error()) - return - } - } else { - count, err = lard.InsertTextData(text, pool, tsInfo.Logstr) - if err != nil { - slog.Error(tsInfo.Logstr + "failed non-scalar data bulk insertion - " + err.Error()) - return - } - // TODO: should we skip inserting flags here? In kvalobs there are no flags for text data - // return count, nil + if tsInfo.Param.IsScalar { + count, err = lard.InsertData(data, pool, tsInfo.Logstr) + if err != nil { + slog.Error(tsInfo.Logstr + "failed data bulk insertion - " + err.Error()) + return } - } - - if !(config.Skip == "flags") { if err := lard.InsertFlags(flag, pool, tsInfo.Logstr); err != nil { slog.Error(tsInfo.Logstr + "failed flag bulk insertion - " + err.Error()) } + } else { + count, err = lard.InsertTextData(text, pool, tsInfo.Logstr) + if err != nil { + slog.Error(tsInfo.Logstr + "failed non-scalar data bulk insertion - " + err.Error()) + return + } } rowsInserted += count From 308140d872d4bfe95a1b33b899570e2e10596e14 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 26 Nov 2024 13:12:16 +0100 Subject: [PATCH 06/67] Add helpful error for timestamp parsing --- migrations/utils/timestamp.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/migrations/utils/timestamp.go b/migrations/utils/timestamp.go index 137e6410..cf1e1498 100644 --- a/migrations/utils/timestamp.go +++ b/migrations/utils/timestamp.go @@ -1,6 +1,9 @@ package utils -import "time" +import ( + "fmt" + "time" +) type Timestamp struct { t time.Time @@ -9,7 +12,7 @@ type Timestamp struct { func (ts *Timestamp) UnmarshalText(b []byte) error { t, err := time.Parse(time.DateOnly, string(b)) if err != nil { - return err + return fmt.Errorf("Only the date-only format (\"YYYY-MM-DD\") is allowed. Got \"%s\"", b) } ts.t = t return nil From 8079a6185315e9bcc3fe4687951f4d2a4098e7f4 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 26 Nov 2024 13:14:59 +0100 Subject: [PATCH 07/67] Change BaseDir to Path --- migrations/kdvh/dump/dump.go | 8 ++++---- migrations/kdvh/dump/main.go | 11 +++++------ migrations/kdvh/import/import.go | 4 ++-- migrations/kdvh/import/main.go | 4 ++-- migrations/kdvh/kdvh_test.go | 2 +- migrations/kvalobs/dump/main.go | 10 +++++----- migrations/kvalobs/dump/text.go | 4 ++-- migrations/kvalobs/import/import.go | 2 +- 8 files changed, 22 insertions(+), 23 deletions(-) diff --git a/migrations/kdvh/dump/dump.go b/migrations/kdvh/dump/dump.go index 6c62f775..59619be4 100644 --- a/migrations/kdvh/dump/dump.go +++ b/migrations/kdvh/dump/dump.go @@ -20,7 +20,7 @@ import ( var INVALID_COLUMNS = []string{"dato", "stnr", "typeid", "season", "xxx"} func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { - if err := os.MkdirAll(filepath.Join(config.BaseDir, table.Path), os.ModePerm); err != nil { + if err := os.MkdirAll(filepath.Join(config.Path, table.Path), os.ModePerm); err != nil { slog.Error(err.Error()) return } @@ -43,7 +43,7 @@ func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { bar := utils.NewBar(len(stations), table.TableName) bar.RenderBlank() for _, station := range stations { - path := filepath.Join(config.BaseDir, table.Path, string(station)) + path := filepath.Join(config.Path, table.Path, string(station)) if err := os.MkdirAll(path, os.ModePerm); err != nil { slog.Error(err.Error()) return @@ -89,7 +89,7 @@ func getElements(table *db.Table, pool *pgxpool.Pool, config *Config) ([]string, return nil, err } - filename := filepath.Join(config.BaseDir, table.Path, "elements.txt") + filename := filepath.Join(config.Path, table.Path, "elements.txt") if err := utils.SaveToFile(elements, filename); err != nil { slog.Warn(err.Error()) } @@ -144,7 +144,7 @@ func getStations(table *db.Table, pool *pgxpool.Pool, config *Config) ([]string, return nil, err } - filename := filepath.Join(config.BaseDir, table.Path, "stations.txt") + filename := filepath.Join(config.Path, table.Path, "stations.txt") if err := utils.SaveToFile(stations, filename); err != nil { slog.Warn(err.Error()) } diff --git a/migrations/kdvh/dump/main.go b/migrations/kdvh/dump/main.go index 273de50d..e31c9e9d 100644 --- a/migrations/kdvh/dump/main.go +++ b/migrations/kdvh/dump/main.go @@ -13,13 +13,12 @@ import ( ) type Config struct { - BaseDir string `arg:"-p,--path" default:"./dumps/kdvh" help:"Location the dumped data will be stored in"` - Tables []string `arg:"-t" help:"Optional comma separated list of table names. By default all available tables are processed"` - Stations []string `arg:"-s" help:"Optional comma separated list of stations IDs. By default all station IDs are processed"` - Elements []string `arg:"-e" help:"Optional comma separated list of element codes. By default all element codes are processed"` + Path string `arg:"-p" default:"./dumps/kdvh" help:"Location the dumped data will be stored in"` + Tables []string `arg:"-t" help:"Optional space separated list of table names"` + Stations []string `arg:"-s" help:"Optional space separated list of stations IDs"` + Elements []string `arg:"-e" help:"Optional space separated list of element codes"` Overwrite bool `help:"Overwrite any existing dumped files"` - Email []string `help:"Optional comma separated list of email addresses used to notify if the program crashed"` - MaxConn int `arg:"-n,--conn" default:"4" help:"Max number of concurrent connections allowed to KDVH"` + MaxConn int `arg:"-n" default:"4" help:"Max number of concurrent connections allowed to KDVH"` } func (config *Config) Execute() { diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index 7561d6d7..86d6a97d 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -25,7 +25,7 @@ import ( var INVALID_ELEMENTS = []string{"TYPEID", "TAM_NORMAL_9120", "RRA_NORMAL_9120", "OT", "OTN", "OTX", "DD06", "DD12", "DD18"} func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config *Config) (rowsInserted int64) { - stations, err := os.ReadDir(filepath.Join(config.BaseDir, table.Path)) + stations, err := os.ReadDir(filepath.Join(config.Path, table.Path)) if err != nil { slog.Warn(err.Error()) return 0 @@ -44,7 +44,7 @@ func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config continue } - dir := filepath.Join(config.BaseDir, table.Path, station.Name()) + dir := filepath.Join(config.Path, table.Path, station.Name()) elements, err := os.ReadDir(dir) if err != nil { slog.Warn(err.Error()) diff --git a/migrations/kdvh/import/main.go b/migrations/kdvh/import/main.go index d41ded51..3fddb355 100644 --- a/migrations/kdvh/import/main.go +++ b/migrations/kdvh/import/main.go @@ -17,7 +17,7 @@ import ( type Config struct { Verbose bool `arg:"-v" help:"Increase verbosity level"` - BaseDir string `arg:"-p,--path" default:"./dumps/kdvh" help:"Location the dumped data will be stored in"` + Path string `arg:"-p" default:"./dumps/kdvh" help:"Location the dumped data will be stored in"` Tables []string `arg:"-t" help:"Optional comma separated list of table names. By default all available tables are processed"` Stations []string `arg:"-s" help:"Optional comma separated list of stations IDs. By default all station IDs are processed"` Elements []string `arg:"-e" help:"Optional comma separated list of element codes. By default all element codes are processed"` @@ -26,7 +26,7 @@ type Config struct { // TODO: this isn't implemented in go-arg // Skip string `choice:"data" choice:"flags" help:"Skip import of data or flags"` Email []string `help:"Optional comma separated list of email addresses used to notify if the program crashed"` - Reindex bool `help:"Drops PG indices before insertion. Might improve performance"` + Reindex bool `help:"Drop PG indices before insertion. Might improve performance"` } func (config *Config) Execute() { diff --git a/migrations/kdvh/kdvh_test.go b/migrations/kdvh/kdvh_test.go index 196f12c5..fa1b9be9 100644 --- a/migrations/kdvh/kdvh_test.go +++ b/migrations/kdvh/kdvh_test.go @@ -29,7 +29,7 @@ func (t *ImportTest) mockConfig() (*port.Config, *cache.Cache) { Tables: []string{t.table}, Stations: []string{fmt.Sprint(t.station)}, Elements: []string{t.elem}, - BaseDir: "./tests", + Path: "./tests", HasHeader: true, Sep: ";", }, diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index 59d850a1..ad7e74df 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -24,9 +24,9 @@ import ( // func joinTS(first, second []lard.Label) type Config struct { - BaseDir string `arg:"-p,--path" default:"./dumps" help:"Location the dumped data will be stored in."` - FromTime *utils.Timestamp `arg:"--from" help:"Fetch data only starting from this (date-only) timestamp. For example, '2006-01-01'"` - ToTime *utils.Timestamp `arg:"--to" help:"Fetch data only until this (date-only) timestamp. For example, '2006-01-01'"` + Path string `arg:"-p" default:"./dumps" help:"Location the dumped data will be stored in"` + FromTime *utils.Timestamp `arg:"--from" help:"Fetch data only starting from this date-only timestamp"` + ToTime *utils.Timestamp `arg:"--to" help:"Fetch data only until this date-only timestamp"` // Ts []int32 `long:"ts" help:"Optional comma separated list of timeseries. By default all available timeseries are processed"` Stations []int32 `help:"Optional space separated list of station numbers."` TypeIds []int32 `help:"Optional space separated list of type IDs."` @@ -47,11 +47,11 @@ func (config *Config) ShouldDumpLabel(label *lard.Label) bool { func (config *Config) Execute() { // dump kvalobs - config.dump("KVALOBS_CONN_STRING", filepath.Join(config.BaseDir, "kvalobs")) + config.dump("KVALOBS_CONN_STRING", filepath.Join(config.Path, "kvalobs")) // dump histkvalobs // TODO: maybe it's worth adding a separate flag? - config.dump("HISTKVALOBS_CONN_STRING", filepath.Join(config.BaseDir, "histkvalobs")) + config.dump("HISTKVALOBS_CONN_STRING", filepath.Join(config.Path, "histkvalobs")) } func (config *Config) dump(envvar, path string) { diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index b4a27e4d..0509d6f0 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -72,12 +72,12 @@ func (config *Config) dumpTextTS(pool *pgxpool.Pool) { return } - if err := os.MkdirAll(config.BaseDir, os.ModePerm); err != nil { + if err := os.MkdirAll(config.Path, os.ModePerm); err != nil { slog.Error(err.Error()) return } - path := filepath.Join(config.BaseDir, "text_timeseries.csv") + path := filepath.Join(config.Path, "text_timeseries.csv") file, err := os.Create(path) if err != nil { slog.Error(err.Error()) diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index f1529f12..85ae9fa7 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -15,7 +15,7 @@ import ( ) type Config struct { - BaseDir string `arg:"-p,--path" default:"./dumps" help:"Location the dumped data will be stored in"` + Path string `arg:"-p" default:"./dumps" help:"Location the dumped data will be stored in"` FromTime *time.Time `arg:"--from" help:"Fetch data only starting from this timestamp"` ToTime *time.Time `arg:"--to" help:"Fetch data only until this timestamp"` Ts []int32 `help:"Optional comma separated list of timeseries. By default all available timeseries are processed"` From c47718b70c73fd722a36d97462bd3d52fca3fe78 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 26 Nov 2024 13:12:05 +0100 Subject: [PATCH 08/67] Simplify help messages --- migrations/kdvh/import/main.go | 10 +++++----- migrations/kvalobs/dump/main.go | 11 ++++++----- migrations/kvalobs/import/import.go | 12 ++++++------ 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/migrations/kdvh/import/main.go b/migrations/kdvh/import/main.go index 3fddb355..83daef8c 100644 --- a/migrations/kdvh/import/main.go +++ b/migrations/kdvh/import/main.go @@ -18,15 +18,15 @@ import ( type Config struct { Verbose bool `arg:"-v" help:"Increase verbosity level"` Path string `arg:"-p" default:"./dumps/kdvh" help:"Location the dumped data will be stored in"` - Tables []string `arg:"-t" help:"Optional comma separated list of table names. By default all available tables are processed"` - Stations []string `arg:"-s" help:"Optional comma separated list of stations IDs. By default all station IDs are processed"` - Elements []string `arg:"-e" help:"Optional comma separated list of element codes. By default all element codes are processed"` + BaseDir string `arg:"-p,--path" default:"./dumps/kdvh" help:"Location the dumped data will be stored in"` + Tables []string `arg:"-t" help:"Optional space separated list of table names"` + Stations []string `arg:"-s" help:"Optional space separated list of stations IDs"` + Elements []string `arg:"-e" help:"Optional space separated list of element codes"` Sep string `default:"," help:"Separator character in the dumped files. Needs to be quoted"` HasHeader bool `help:"Add this flag if the dumped files have a header row"` // TODO: this isn't implemented in go-arg // Skip string `choice:"data" choice:"flags" help:"Skip import of data or flags"` - Email []string `help:"Optional comma separated list of email addresses used to notify if the program crashed"` - Reindex bool `help:"Drop PG indices before insertion. Might improve performance"` + Reindex bool `help:"Drop PG indices before insertion. Might improve performance"` } func (config *Config) Execute() { diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index ad7e74df..d7fffd21 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -20,6 +20,7 @@ import ( // // Supposed to join text anf number data to single slice // return nil // } +// // TODO: not sure what to do with this one // func joinTS(first, second []lard.Label) @@ -28,11 +29,11 @@ type Config struct { FromTime *utils.Timestamp `arg:"--from" help:"Fetch data only starting from this date-only timestamp"` ToTime *utils.Timestamp `arg:"--to" help:"Fetch data only until this date-only timestamp"` // Ts []int32 `long:"ts" help:"Optional comma separated list of timeseries. By default all available timeseries are processed"` - Stations []int32 `help:"Optional space separated list of station numbers."` - TypeIds []int32 `help:"Optional space separated list of type IDs."` - ParamIds []int32 `help:"Optional space separated list of param IDs."` - Sensors []int32 `help:"Optional space separated list of sensors."` - Levels []int32 `help:"Optional space separated list of levels."` + Stations []int32 `help:"Optional space separated list of station numbers"` + TypeIds []int32 `help:"Optional space separated list of type IDs"` + ParamIds []int32 `help:"Optional space separated list of param IDs"` + Sensors []int32 `help:"Optional space separated list of sensors"` + Levels []int32 `help:"Optional space separated list of levels"` } func (config *Config) ShouldDumpLabel(label *lard.Label) bool { diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 85ae9fa7..de491e83 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -18,12 +18,12 @@ type Config struct { Path string `arg:"-p" default:"./dumps" help:"Location the dumped data will be stored in"` FromTime *time.Time `arg:"--from" help:"Fetch data only starting from this timestamp"` ToTime *time.Time `arg:"--to" help:"Fetch data only until this timestamp"` - Ts []int32 `help:"Optional comma separated list of timeseries. By default all available timeseries are processed"` - Stations []int32 `help:"Optional comma separated list of station numbers. By default all available station numbers are processed"` - TypeIds []int32 `help:"Optional comma separated list of type IDs. By default all available type IDs are processed"` - ParamIds []int32 `help:"Optional comma separated list of param IDs. By default all available param IDs are processed"` - Sensors []int32 `help:"Optional comma separated list of sensors. By default all available sensors are processed"` - Levels []int32 `help:"Optional comma separated list of levels. By default all available levels are processed"` + Ts []int32 `help:"Optional space separated list of timeseries."` + Stations []int32 `help:"Optional space separated list of station numbers."` + TypeIds []int32 `help:"Optional space separated list of type IDs."` + ParamIds []int32 `help:"Optional space separated list of param IDs."` + Sensors []int32 `help:"Optional space separated list of sensors."` + Levels []int32 `help:"Optional space separated list of levels."` } func (config *Config) ShouldImport(ts *lard.Label) bool { From d0a08d7c62eb19cdbb00ed10b809ebec4da6268e Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 27 Nov 2024 11:06:01 +0100 Subject: [PATCH 09/67] Need to add another fmt.Println() to make tests work :clown_face: --- migrations/kdvh/main.go | 3 ++- migrations/kvalobs/main.go | 3 ++- migrations/main.go | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/migrations/kdvh/main.go b/migrations/kdvh/main.go index bf146326..7cb41c17 100644 --- a/migrations/kdvh/main.go +++ b/migrations/kdvh/main.go @@ -27,7 +27,8 @@ func (c *Cmd) Execute(parser *arg.Parser) { case c.List != nil: c.List.Execute() default: - fmt.Println("Error: passing a subcommand is required.\n") + fmt.Println("Error: passing a subcommand is required.") + fmt.Println() parser.WriteHelpForSubcommand(os.Stdout, "kdvh") } } diff --git a/migrations/kvalobs/main.go b/migrations/kvalobs/main.go index fee334b1..88e8e9b7 100644 --- a/migrations/kvalobs/main.go +++ b/migrations/kvalobs/main.go @@ -134,7 +134,8 @@ func (c *Cmd) Execute(parser *arg.Parser) { case c.Import != nil: c.Import.Execute() default: - fmt.Println("Error: passing a subcommand is required.\n") + fmt.Println("Error: passing a subcommand is required.") + fmt.Println() parser.WriteHelpForSubcommand(os.Stdout, "kvalobs") } } diff --git a/migrations/main.go b/migrations/main.go index 4b285884..2d7f5c26 100644 --- a/migrations/main.go +++ b/migrations/main.go @@ -41,7 +41,8 @@ func main() { case args.Kvalobs != nil: args.Kvalobs.Execute(parser) default: - fmt.Println("Error: passing a subcommand is required.\n") + fmt.Println("Error: passing a subcommand is required.") + fmt.Println() parser.WriteHelp(os.Stdout) } } From ecf65d5a9823808025b9883aa5113b639a0b2065 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 27 Nov 2024 13:07:30 +0100 Subject: [PATCH 10/67] Use common Config and add test --- migrations/kvalobs/config_test.go | 60 ++++++++++++++++++++++++++++ migrations/kvalobs/db/base_config.go | 33 +++++++++++++++ migrations/kvalobs/dump/main.go | 24 ++--------- migrations/kvalobs/import/main.go | 28 +++++++++++++ migrations/utils/utils.go | 13 ++++-- 5 files changed, 134 insertions(+), 24 deletions(-) create mode 100644 migrations/kvalobs/config_test.go create mode 100644 migrations/kvalobs/db/base_config.go create mode 100644 migrations/kvalobs/import/main.go diff --git a/migrations/kvalobs/config_test.go b/migrations/kvalobs/config_test.go new file mode 100644 index 00000000..79b756ac --- /dev/null +++ b/migrations/kvalobs/config_test.go @@ -0,0 +1,60 @@ +package kvalobs + +import ( + "migrate/kvalobs/db" + "migrate/lard" + "testing" +) + +func TestShouldProcessLabel(t *testing.T) { + type TestCase struct { + tag string + label lard.Label + config db.BaseConfig + expected bool + } + + cases := []TestCase{ + { + tag: "empty config", + label: lard.Label{StationID: 18700}, + config: db.BaseConfig{}, + expected: true, + }, + { + tag: "station specified", + label: lard.Label{StationID: 18700}, + config: db.BaseConfig{Stations: []int32{18700}}, + expected: true, + }, + { + tag: "station not in label", + label: lard.Label{StationID: 18700}, + config: db.BaseConfig{Stations: []int32{20000}}, + expected: false, + }, + { + tag: "label without level", + label: lard.Label{}, + config: db.BaseConfig{Levels: []int32{2}}, + expected: false, + }, + { + tag: "valid level", + label: func() lard.Label { + var level int32 = 2 + return lard.Label{Level: &level} + }(), + config: db.BaseConfig{Levels: []int32{2}}, + expected: true, + }, + } + + for _, c := range cases { + res := c.config.ShouldProcessLabel(&c.label) + if res != c.expected { + t.Log(c.tag) + t.Fail() + } + } +} diff --git a/migrations/kvalobs/db/base_config.go b/migrations/kvalobs/db/base_config.go new file mode 100644 index 00000000..5b1d0b2b --- /dev/null +++ b/migrations/kvalobs/db/base_config.go @@ -0,0 +1,33 @@ +package db + +import ( + "time" + + "migrate/lard" + "migrate/utils" +) + +// TODO: should we use this one as default or process all times +var FROMTIME time.Time = time.Date(2006, 01, 01, 00, 00, 00, 00, time.UTC) + +type BaseConfig struct { + Path string `arg:"-p" default:"./dumps" help:"Location the dumped data will be stored in"` + FromTime *utils.Timestamp `arg:"--from" help:"Fetch data only starting from this date-only timestamp"` + ToTime *utils.Timestamp `arg:"--to" help:"Fetch data only until this date-only timestamp"` + // Ts []int32 `long:"ts" help:"Optional comma separated list of timeseries. By default all available timeseries are processed"` + Stations []int32 `help:"Optional space separated list of station numbers"` + TypeIds []int32 `help:"Optional space separated list of type IDs"` + ParamIds []int32 `help:"Optional space separated list of param IDs"` + Sensors []int32 `help:"Optional space separated list of sensors"` + Levels []int32 `help:"Optional space separated list of levels"` +} + +func (config *BaseConfig) ShouldProcessLabel(label *lard.Label) bool { + // (config.Ts == nil || slices.Contains(config.Ts, ts.ID)) || + return utils.Contains(config.Stations, label.StationID) && + utils.Contains(config.TypeIds, label.TypeID) && + utils.Contains(config.ParamIds, label.ParamID) && + // TODO: these two should never be null anyway + utils.NullableContains(config.Sensors, label.Sensor) && + utils.NullableContains(config.Levels, label.Level) +} diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index d7fffd21..b063d72e 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -9,8 +9,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" - "migrate/lard" - "migrate/utils" + "migrate/kvalobs/db" ) // Same timeseries could be in both 'data' and 'text_data' tables @@ -25,25 +24,8 @@ import ( // func joinTS(first, second []lard.Label) type Config struct { - Path string `arg:"-p" default:"./dumps" help:"Location the dumped data will be stored in"` - FromTime *utils.Timestamp `arg:"--from" help:"Fetch data only starting from this date-only timestamp"` - ToTime *utils.Timestamp `arg:"--to" help:"Fetch data only until this date-only timestamp"` - // Ts []int32 `long:"ts" help:"Optional comma separated list of timeseries. By default all available timeseries are processed"` - Stations []int32 `help:"Optional space separated list of station numbers"` - TypeIds []int32 `help:"Optional space separated list of type IDs"` - ParamIds []int32 `help:"Optional space separated list of param IDs"` - Sensors []int32 `help:"Optional space separated list of sensors"` - Levels []int32 `help:"Optional space separated list of levels"` -} - -func (config *Config) ShouldDumpLabel(label *lard.Label) bool { - // (config.Ts == nil || slices.Contains(config.Ts, ts.ID)) || - return utils.Contains(config.Stations, label.StationID) || - utils.Contains(config.TypeIds, label.TypeID) || - utils.Contains(config.ParamIds, label.ParamID) || - // TODO: these two should never be null anyway - utils.NullableContains(config.Sensors, label.Sensor) || - utils.NullableContains(config.Levels, label.Level) + db.BaseConfig + UpdateLabels bool `help:"Overwrites the label CSV files"` } func (config *Config) Execute() { diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go new file mode 100644 index 00000000..b372b787 --- /dev/null +++ b/migrations/kvalobs/import/main.go @@ -0,0 +1,28 @@ +package port + +import ( + "context" + + "fmt" + "log/slog" + "os" + + "github.com/jackc/pgx/v5/pgxpool" + + "migrate/kvalobs/db" +) + +type Config struct { + db.BaseConfig + Ts []int32 `help:"Optional space separated list of timeseries."` +} + +func (config *Config) Execute() error { + pool, err := pgxpool.New(context.Background(), os.Getenv("KVALOBS_CONN_STRING")) + if err != nil { + slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) + } + defer pool.Close() + + return nil +} diff --git a/migrations/utils/utils.go b/migrations/utils/utils.go index b94eb52c..3e155162 100644 --- a/migrations/utils/utils.go +++ b/migrations/utils/utils.go @@ -91,7 +91,7 @@ func Map[T, V any](ts []T, fn func(T) V) []V { return result } -// Similar to Map, but bails immediately if an error occurs +// Similar to Map, but bails immediately if any error occurs func TryMap[T, V any](ts []T, fn func(T) (V, error)) ([]V, error) { result := make([]V, len(ts)) for i, t := range ts { @@ -104,6 +104,8 @@ func TryMap[T, V any](ts []T, fn func(T) (V, error)) ([]V, error) { return result, nil } +// Same as slices.Contains but return `true` if the slice is nil, +// meaning that upstream the slice is optional func Contains[T comparable](s []T, v T) bool { if s == nil { return true @@ -111,10 +113,15 @@ func Contains[T comparable](s []T, v T) bool { return slices.Contains(s, v) } -// Returns true if the slice is empty or the value is null func NullableContains[T comparable](s []T, v *T) bool { - if s == nil || v == nil { + if s == nil { return true } + + if v == nil { + // Non-nil slice does not contain nil + return false + } + return slices.Contains(s, *v) } From a85171a3bfc56d24fa769f7f22e683bd3cec7bc4 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 27 Nov 2024 13:07:59 +0100 Subject: [PATCH 11/67] Move doc comment --- migrations/kvalobs/db/main.go | 101 ++++++++++++++++++++++++++---- migrations/kvalobs/main.go | 112 ---------------------------------- 2 files changed, 88 insertions(+), 125 deletions(-) diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go index 9268c53b..53e4997f 100644 --- a/migrations/kvalobs/db/main.go +++ b/migrations/kvalobs/db/main.go @@ -4,17 +4,92 @@ import ( "time" ) -// This is basically the same as lard.Label (except for ParamCode) -// type TSLabel struct { -// StationID int32 `db:"stationid"` -// TypeID int32 `db:"typeid"` -// ParamID int32 `db:"paramid"` -// Sensor *int32 `db:"sensor"` -// Level *int32 `db:"level"` -// // ParamCode string `db:"name,omitempty"` -// } +// Kvalobs is composed of two databases +// 1) `kvalobs` for fresh data +// 2) `histkvalobs` for data older than +// +// Both contain the same tables: +// - `algorithms`: empty (???) - stores procedure info for QC checks +// - `checks`: empty (???) +// - `data`: stores numerical observations, associated metadata, and QC info +// +// Column | Type | Collation | Nullable | Default +// -------------+-----------------------------+-----------+----------+---------------------------- +// stationid | integer | | not null | +// obstime | timestamp without time zone | | not null | +// original | double precision | | not null | +// paramid | integer | | not null | +// tbtime | timestamp without time zone | | not null | +// typeid | integer | | not null | +// sensor | character(1) | | | '0'::bpchar +// level | integer | | | 0 +// corrected | double precision | | not null | +// controlinfo | character(16) | | | '0000000000000000'::bpchar +// useinfo | character(16) | | | '0000000000000000'::bpchar +// cfailed | text | | | +// +// - `default_missing`: +// - `default_missing_values`: +// +// - `model`: +// Column | Type | Collation | Nullable | Default +// ---------+---------+-----------+----------+--------- +// modelid | integer | | not null | +// name | text | | | +// comment | text | | | +// +// - `model_data`: +// Column | Type | Collation | Nullable | Default +// -----------+-----------------------------+-----------+----------+--------- +// stationid | integer | | not null | +// obstime | timestamp without time zone | | not null | +// paramid | integer | | not null | +// level | integer | | not null | +// modelid | integer | | not null | +// original | double precision | | | +// +// - `param`: part of stinfosys `param` table +// Column | Type | Collation | Nullable | Default +// -------------+---------+-----------+----------+--------- +// paramid | integer | | not null | +// name | text | | not null | +// description | text | | | +// unit | text | | | +// level_scale | integer | | | 0 +// comment | text | | | +// scalar | boolean | | | true +// +// - `pdata`: same as `data` without the `original` column and all `paramid` null??? +// - `station`: station metadata such as (lat, lon, height, name, wmonr, etc) +// - `station_metadata`: this one seems to map well to our `labels.met`? +// Problem is `typeid`, `sensor`, and `level` are always NULL +// +// - `text_data`: Similar to `data`, but without QC info nor sensor/level +// +// Column | Type | Collation | Nullable | Default +// -----------+-----------------------------+-----------+----------+--------- +// stationid | integer | | not null | +// obstime | timestamp without time zone | | not null | +// original | text | | not null | +// paramid | integer | | not null | +// tbtime | timestamp without time zone | | not null | +// typeid | integer | | not null | +// +// In `histkvalobs` only data tables seem to be non-empty +// +// IMPORTANT: considerations for migrations to LARD +// - LARD stores Timeseries labels (stationid, paramid, typeid, sensor, level) in a separate table +// - (sensor, level) can be NULL, while in Kvalobs they have default values (0,0) +// => POSSIBLE INCONSISTENCY when importing to LARD +// - Timestamps are UTC +// - Kvalobs doesn't have the concept of timeseries ID, +// instead there is a sequential ID associated with each observation row -// Kvalobs observation row +var NULL_VALUES []float64 = []float64{-34767, -34766} + +type DataSeries = []*DataObs + +// Kvalobs data observation row type DataObs struct { Obstime time.Time `db:"obstime"` Original float64 `db:"original"` @@ -25,11 +100,11 @@ type DataObs struct { Cfailed *string `db:"cfailed"` } +type TextSeries = []*TextObs + +// Kvalobs text observation row type TextObs struct { Obstime time.Time `db:"obstime"` Original string `db:"original"` Tbtime time.Time `db:"tbtime"` } - -type Data = []*DataObs -type Text = []*TextObs diff --git a/migrations/kvalobs/main.go b/migrations/kvalobs/main.go index 88e8e9b7..ef60d60b 100644 --- a/migrations/kvalobs/main.go +++ b/migrations/kvalobs/main.go @@ -3,7 +3,6 @@ package kvalobs import ( "fmt" "os" - "time" "github.com/alexflint/go-arg" @@ -11,117 +10,6 @@ import ( port "migrate/kvalobs/import" ) -// Kvalobs is composed of two databases -// 1) `kvalobs` for fresh data -// 2) `histkvalobs` for data older than -// -// Both contain the same tables: -// - `algorithms`: empty (???) - stores procedure info for QC checks -// - `checks`: empty (???) -// - `data`: stores numerical observations, associated metadata, and QC info -// -// Column | Type | Collation | Nullable | Default -// -------------+-----------------------------+-----------+----------+---------------------------- -// stationid | integer | | not null | -// obstime | timestamp without time zone | | not null | -// original | double precision | | not null | -// paramid | integer | | not null | -// tbtime | timestamp without time zone | | not null | -// typeid | integer | | not null | -// sensor | character(1) | | | '0'::bpchar -// level | integer | | | 0 -// corrected | double precision | | not null | -// controlinfo | character(16) | | | '0000000000000000'::bpchar -// useinfo | character(16) | | | '0000000000000000'::bpchar -// cfailed | text | | | -// -// - `default_missing`: -// - `default_missing_values`: -// -// - `model`: -// Column | Type | Collation | Nullable | Default -// ---------+---------+-----------+----------+--------- -// modelid | integer | | not null | -// name | text | | | -// comment | text | | | -// -// - `model_data`: -// Column | Type | Collation | Nullable | Default -// -----------+-----------------------------+-----------+----------+--------- -// stationid | integer | | not null | -// obstime | timestamp without time zone | | not null | -// paramid | integer | | not null | -// level | integer | | not null | -// modelid | integer | | not null | -// original | double precision | | | -// -// - `param`: part of stinfosys `param` table -// Column | Type | Collation | Nullable | Default -// -------------+---------+-----------+----------+--------- -// paramid | integer | | not null | -// name | text | | not null | -// description | text | | | -// unit | text | | | -// level_scale | integer | | | 0 -// comment | text | | | -// scalar | boolean | | | true -// -// - `pdata`: same as `data` without the `original` column and all `paramid` null??? -// - `station`: station metadata such as (lat, lon, height, name, wmonr, etc) -// - `station_metadata`: this one seems to map well to our `labels.met`? -// Problem is `typeid`, `sensor`, and `level` are always NULL -// -// - `text_data`: Similar to `data`, but without QC info nor sensor/level -// -// Column | Type | Collation | Nullable | Default -// -----------+-----------------------------+-----------+----------+--------- -// stationid | integer | | not null | -// obstime | timestamp without time zone | | not null | -// original | text | | not null | -// paramid | integer | | not null | -// tbtime | timestamp without time zone | | not null | -// typeid | integer | | not null | -// -// In `histkvalobs` only data tables seem to be non-empty -// -// IMPORTANT: considerations for migrations to LARD -// - LARD stores Timeseries labels (stationid, paramid, typeid, sensor, level) in a separate table -// - (sensor, level) can be NULL, while in Kvalobs they have default values (0,0) -// => POSSIBLE INCONSISTENCY when importing to LARD -// - Timestamps are UTC -// - Kvalobs doesn't have the concept of timeseries ID, -// instead there is a sequential ID associated with each observation row - -var NULL_VALUES []float64 = []float64{-34767, -34766} - -// type timespan struct { -// fromtime time.Time -// totime time.Time -// } -// -// type Kvalobs struct { -// Hosts []string -// Ports []string -// DBs []string -// Usernames []string -// Passwords []string -// } - -// TODO: should we use this one as default or process all times -var FROMTIME time.Time = time.Date(2006, 01, 01, 00, 00, 00, 00, time.UTC) - -// type BaseConfig struct { -// BaseDir string `short:"p" long:"path" default:"./dumps" description:"Location the dumped data will be stored in"` -// FromTime *time.Time `long:"from" description:"Fetch data only starting from this timestamp"` -// ToTime *time.Time `long:"to" description:"Fetch data only until this timestamp"` -// Ts []int32 `long:"ts" description:"Optional comma separated list of timeseries. By default all available timeseries are processed"` -// Stations []int32 `long:"station" description:"Optional comma separated list of station numbers. By default all available station numbers are processed"` -// TypeIds []int32 `long:"typeid" description:"Optional comma separated list of type IDs. By default all available type IDs are processed"` -// ParamIds []int32 `long:"paramid" description:"Optional comma separated list of param IDs. By default all available param IDs are processed"` -// Sensors []int32 `long:"sensor" description:"Optional comma separated list of sensors. By default all available sensors are processed"` -// Levels []int32 `long:"level" description:"Optional comma separated list of levels. By default all available levels are processed"` -// } - type Cmd struct { Dump *dump.Config `arg:"subcommand" help:"Dump tables from Kvalobs to CSV"` Import *port.Config `arg:"subcommand" help:"Import CSV file dumped from Kvalobs"` From 8112708f9283d31b7f81c45f6045560e1bf98f99 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 27 Nov 2024 13:08:30 +0100 Subject: [PATCH 12/67] Update function name --- migrations/kvalobs/db/labels.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/migrations/kvalobs/db/labels.go b/migrations/kvalobs/db/labels.go index 273086d9..78ad586e 100644 --- a/migrations/kvalobs/db/labels.go +++ b/migrations/kvalobs/db/labels.go @@ -21,7 +21,7 @@ func LabelToFilename(ts *lard.Label) string { return fmt.Sprintf("%v_%v_%v_%v_%v.csv", ts.StationID, ts.TypeID, ts.ParamID, sensor, level) } -func parseFilename(s *string) (*int32, error) { +func parseFilenameFields(s *string) (*int32, error) { // TODO: probably there is a better way to do this without defining a gazillion functions if *s == "" { return nil, nil @@ -37,6 +37,7 @@ func parseFilename(s *string) (*int32, error) { // Deserialize filename to lard.Label func LabelFromFilename(filename string) (*lard.Label, error) { name := strings.TrimSuffix(filename, ".csv") + fields := strings.Split(name, "_") if len(fields) < 5 { return nil, errors.New("Too few fields in file name: " + filename) @@ -47,7 +48,7 @@ func LabelFromFilename(filename string) (*lard.Label, error) { ptrs[i] = &fields[i] } - converted, err := utils.TryMap(ptrs, parseFilename) + converted, err := utils.TryMap(ptrs, parseFilenameFields) if err != nil { return nil, err } From ba7b31942ef55d4ca59ef89a7bc8a45bdb7e4dd4 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 27 Nov 2024 16:45:41 +0100 Subject: [PATCH 13/67] Change GetTimeseriesID signature --- migrations/kdvh/import/cache/main.go | 2 +- migrations/lard/timeseries.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/migrations/kdvh/import/cache/main.go b/migrations/kdvh/import/cache/main.go index c021ec2a..2183e27b 100644 --- a/migrations/kdvh/import/cache/main.go +++ b/migrations/kdvh/import/cache/main.go @@ -98,7 +98,7 @@ func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpoo Level: param.Hlevel, } - tsid, err := lard.GetTimeseriesID(&label, param.Fromtime, pool) + tsid, err := lard.GetTimeseriesID(&label, ¶m.Fromtime, pool) if err != nil { slog.Error(logstr + "could not obtain timeseries - " + err.Error()) return nil, err diff --git a/migrations/lard/timeseries.go b/migrations/lard/timeseries.go index 73f24b2a..a67f3828 100644 --- a/migrations/lard/timeseries.go +++ b/migrations/lard/timeseries.go @@ -16,7 +16,7 @@ type Label struct { Level *int32 } -func GetTimeseriesID(label *Label, fromtime time.Time, pool *pgxpool.Pool) (tsid int32, err error) { +func GetTimeseriesID(label *Label, fromtime *time.Time, pool *pgxpool.Pool) (tsid int32, err error) { // Query LARD labels table err = pool.QueryRow( context.TODO(), From b600490ff6a0a7bedbbcfbf9d4b7c4e092bc4f07 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 27 Nov 2024 16:45:58 +0100 Subject: [PATCH 14/67] Generic overload --- migrations/kvalobs/config_test.go | 34 ++++---- migrations/kvalobs/db/base_config.go | 17 ++-- migrations/kvalobs/db/csv.go | 45 ++++++++-- migrations/kvalobs/db/labels.go | 45 +++++++--- migrations/kvalobs/db/main.go | 5 ++ migrations/kvalobs/dump/data.go | 80 +++-------------- migrations/kvalobs/dump/dump.go | 123 +++++++++++++++++++++++++++ migrations/kvalobs/dump/labels.go | 35 -------- migrations/kvalobs/dump/main.go | 78 ++++++++++++----- migrations/kvalobs/dump/text.go | 113 +++--------------------- migrations/kvalobs/import/import.go | 49 ----------- migrations/kvalobs/import/main.go | 2 +- migrations/kvalobs/import/text.go | 6 +- migrations/utils/timestamp.go | 8 ++ 14 files changed, 325 insertions(+), 315 deletions(-) create mode 100644 migrations/kvalobs/dump/dump.go delete mode 100644 migrations/kvalobs/dump/labels.go delete mode 100644 migrations/kvalobs/import/import.go diff --git a/migrations/kvalobs/config_test.go b/migrations/kvalobs/config_test.go index 79b756ac..5fa65d34 100644 --- a/migrations/kvalobs/config_test.go +++ b/migrations/kvalobs/config_test.go @@ -1,51 +1,51 @@ package kvalobs import ( - "migrate/kvalobs/db" - "migrate/lard" "testing" + + "migrate/kvalobs/db" ) func TestShouldProcessLabel(t *testing.T) { - type TestCase struct { + type TestCase[T string] struct { tag string - label lard.Label - config db.BaseConfig + label db.Label[T] + config db.BaseConfig[T] expected bool } - cases := []TestCase{ + cases := []TestCase[string]{ { tag: "empty config", - label: lard.Label{StationID: 18700}, - config: db.BaseConfig{}, + label: db.Label[string]{StationID: 18700}, + config: db.BaseConfig[string]{}, expected: true, }, { tag: "station specified", - label: lard.Label{StationID: 18700}, - config: db.BaseConfig{Stations: []int32{18700}}, + label: db.Label[string]{StationID: 18700}, + config: db.BaseConfig[string]{Stations: []int32{18700}}, expected: true, }, { tag: "station not in label", - label: lard.Label{StationID: 18700}, - config: db.BaseConfig{Stations: []int32{20000}}, + label: db.Label[string]{StationID: 18700}, + config: db.BaseConfig[string]{Stations: []int32{20000}}, expected: false, }, { tag: "label without level", - label: lard.Label{}, - config: db.BaseConfig{Levels: []int32{2}}, + label: db.Label[string]{}, + config: db.BaseConfig[string]{Levels: []int32{2}}, expected: false, }, { tag: "valid level", - label: func() lard.Label { + label: func() db.Label[string] { var level int32 = 2 - return lard.Label{Level: &level} + return db.Label[string]{Level: &level} }(), - config: db.BaseConfig{Levels: []int32{2}}, + config: db.BaseConfig[string]{Levels: []int32{2}}, expected: true, }, } diff --git a/migrations/kvalobs/db/base_config.go b/migrations/kvalobs/db/base_config.go index 5b1d0b2b..37ff5537 100644 --- a/migrations/kvalobs/db/base_config.go +++ b/migrations/kvalobs/db/base_config.go @@ -3,26 +3,25 @@ package db import ( "time" - "migrate/lard" "migrate/utils" ) // TODO: should we use this one as default or process all times +// TODO: it looks like histkvalobs has data only starting from 2023-06-01? var FROMTIME time.Time = time.Date(2006, 01, 01, 00, 00, 00, 00, time.UTC) -type BaseConfig struct { +type BaseConfig[T int32 | string] struct { Path string `arg:"-p" default:"./dumps" help:"Location the dumped data will be stored in"` FromTime *utils.Timestamp `arg:"--from" help:"Fetch data only starting from this date-only timestamp"` ToTime *utils.Timestamp `arg:"--to" help:"Fetch data only until this date-only timestamp"` - // Ts []int32 `long:"ts" help:"Optional comma separated list of timeseries. By default all available timeseries are processed"` - Stations []int32 `help:"Optional space separated list of station numbers"` - TypeIds []int32 `help:"Optional space separated list of type IDs"` - ParamIds []int32 `help:"Optional space separated list of param IDs"` - Sensors []int32 `help:"Optional space separated list of sensors"` - Levels []int32 `help:"Optional space separated list of levels"` + Stations []int32 `help:"Optional space separated list of station numbers"` + TypeIds []int32 `help:"Optional space separated list of type IDs"` + ParamIds []int32 `help:"Optional space separated list of param IDs"` + Sensors []T `help:"Optional space separated list of sensors"` + Levels []int32 `help:"Optional space separated list of levels"` } -func (config *BaseConfig) ShouldProcessLabel(label *lard.Label) bool { +func (config *BaseConfig[T]) ShouldProcessLabel(label *Label[T]) bool { // (config.Ts == nil || slices.Contains(config.Ts, ts.ID)) || return utils.Contains(config.Stations, label.StationID) && utils.Contains(config.TypeIds, label.TypeID) && diff --git a/migrations/kvalobs/db/csv.go b/migrations/kvalobs/db/csv.go index 042cea12..ae385c5a 100644 --- a/migrations/kvalobs/db/csv.go +++ b/migrations/kvalobs/db/csv.go @@ -9,17 +9,16 @@ import ( "github.com/gocarina/gocsv" ) -func ReadLabelCSV(filename string) (labels []*lard.Label, err error) { +func ReadLabelCSV(filename string) (labels []*Label[string], err error) { file, err := os.Open(filename) if err != nil { - slog.Error(err.Error()) return nil, err } defer file.Close() // TODO: maybe I should preallocate slice size if I can? - err = gocsv.UnmarshalFile(file, labels) - return labels, nil + err = gocsv.UnmarshalFile(file, &labels) + return labels, err } func ReadDataCSV(tsid int32, filename string) ([][]any, error) { @@ -42,7 +41,7 @@ func ReadDataCSV(tsid int32, filename string) ([][]any, error) { return nil, err } - // Kvalobs does not have IDs so we have to bootstrap it here + // Kvalobs does not have IDs so we have to add it here obs.Id = tsid row := obs.ToRow() @@ -72,7 +71,7 @@ func ReadTextCSV(tsid int32, filename string) ([][]any, error) { return nil, err } - // Kvalobs does not have IDs so we have to bootstrap it here + // Kvalobs does not have IDs so we have to add it here obs.Id = tsid row := obs.ToRow() @@ -81,3 +80,37 @@ func ReadTextCSV(tsid int32, filename string) ([][]any, error) { return data, nil } + +type Rower interface { + ToRow() []any +} + +func ReadSeriesCSV[T Rower](tsid int32, filename string) ([][]any, error) { + file, err := os.Open(filename) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + defer file.Close() + + reader := bufio.NewScanner(file) + + // TODO: maybe I should preallocate slice size if I can? + var data [][]any + for reader.Scan() { + var obs T + + err = gocsv.UnmarshalString(reader.Text(), &obs) + if err != nil { + return nil, err + } + + // Kvalobs does not have IDs so we have to add it here + // obs.Id = tsid + + row := obs.ToRow() + data = append(data, row) + } + + return data, nil +} diff --git a/migrations/kvalobs/db/labels.go b/migrations/kvalobs/db/labels.go index 78ad586e..fa2e5051 100644 --- a/migrations/kvalobs/db/labels.go +++ b/migrations/kvalobs/db/labels.go @@ -3,22 +3,47 @@ package db import ( "errors" "fmt" - "migrate/lard" "migrate/utils" "strconv" "strings" ) -// Serialize lard.Label to CSV file name -func LabelToFilename(ts *lard.Label) string { +// Kvalobs specific label +type Label[T int32 | string] struct { + StationID int32 + TypeID int32 + ParamID int32 + // These two are not present in the `text_data` tabl + Sensor *T // bpchar(1) in `data` table + Level *int32 +} + +func (l *Label[T]) sensorLevelString() (string, string) { var sensor, level string - if ts.Sensor != nil { - sensor = fmt.Sprint(ts.Sensor) + if l.Sensor != nil { + sensor = fmt.Sprint(*l.Sensor) } - if ts.Level != nil { - level = fmt.Sprint(ts.Level) + if l.Level != nil { + level = fmt.Sprint(*l.Level) } - return fmt.Sprintf("%v_%v_%v_%v_%v.csv", ts.StationID, ts.TypeID, ts.ParamID, sensor, level) + return sensor, level +} + +func (l *Label[T]) ToFilename() string { + sensor, level := l.sensorLevelString() + return fmt.Sprintf("%v_%v_%v_%v_%v.csv", l.StationID, l.TypeID, l.ParamID, sensor, level) +} + +func (l *Label[T]) ToString() string { + sensor, level := l.sensorLevelString() + return fmt.Sprintf( + "%v - %v - %v - %v - %v", + l.StationID, + l.ParamID, + l.TypeID, + sensor, + level, + ) } func parseFilenameFields(s *string) (*int32, error) { @@ -35,7 +60,7 @@ func parseFilenameFields(s *string) (*int32, error) { } // Deserialize filename to lard.Label -func LabelFromFilename(filename string) (*lard.Label, error) { +func LabelFromFilename(filename string) (*Label[int32], error) { name := strings.TrimSuffix(filename, ".csv") fields := strings.Split(name, "_") @@ -53,7 +78,7 @@ func LabelFromFilename(filename string) (*lard.Label, error) { return nil, err } - return &lard.Label{ + return &Label[int32]{ StationID: *converted[0], TypeID: *converted[1], ParamID: *converted[2], diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go index 53e4997f..1410f0be 100644 --- a/migrations/kvalobs/db/main.go +++ b/migrations/kvalobs/db/main.go @@ -108,3 +108,8 @@ type TextObs struct { Original string `db:"original"` Tbtime time.Time `db:"tbtime"` } + +type Kvalobs struct { + Name string + ConnEnvVar string +} diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go index 6b31be12..8e947181 100644 --- a/migrations/kvalobs/dump/data.go +++ b/migrations/kvalobs/dump/data.go @@ -3,64 +3,14 @@ package dump import ( "context" "log/slog" - "migrate/kvalobs/db" - "migrate/lard" - "os" - "path/filepath" - "github.com/gocarina/gocsv" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" -) - -const DATA_LABEL_CSV string = "data_labels.csv" - -func (config *Config) dumpData(outpath string, pool *pgxpool.Pool) { - var labels []*lard.Label - - dataPath := filepath.Join(outpath, "data") - if err := os.MkdirAll(dataPath, os.ModePerm); err != nil { - slog.Error(err.Error()) - return - } - - labelFile := filepath.Join(outpath, DATA_LABEL_CSV) - if _, err := os.Stat(outpath); err != nil { - if labels, err = dumpLabels(pool, labelFile, getDataLabels, config); err != nil { - return - } - } else { - if labels, err = db.ReadLabelCSV(labelFile); err != nil { - return - } - } - for _, ts := range labels { - if !config.ShouldDumpLabel(ts) { - continue - } - - data, err := readData(ts, pool, config) - if err != nil { - continue - } - - filename := filepath.Join(dataPath, db.LabelToFilename(ts)) - file, err := os.Create(filename) - if err != nil { - slog.Error(err.Error()) - continue - } - - slog.Info("Writing data to " + filename) - if err = gocsv.MarshalFile(data, file); err != nil { - slog.Error(err.Error()) - continue - } - } -} + "migrate/kvalobs/db" +) -func getDataLabels(pool *pgxpool.Pool, config *Config) ([]*lard.Label, error) { +func getDataLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.Label[string], error) { // TODO: not sure about the sensor/level conditions, // they should never be NULL since they have default values different from NULL? // TODO: We probably don't even need the join, @@ -75,22 +25,23 @@ func getDataLabels(pool *pgxpool.Pool, config *Config) ([]*lard.Label, error) { query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level FROM data WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2)` - rows, err := pool.Query(context.TODO(), query, config.FromTime, config.ToTime) + slog.Info("Querying data labels...") + rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) if err != nil { - slog.Error(err.Error()) return nil, err } - tsList, err := pgx.CollectRows(rows, pgx.RowToStructByName[*lard.Label]) + slog.Info("Collecting data labels...") + labels := make([]*db.Label[string], 0, rows.CommandTag().RowsAffected()) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.Label[string]]) if err != nil { - slog.Error(err.Error()) return nil, err } - return tsList, nil + return labels, nil } -func readData(label *lard.Label, pool *pgxpool.Pool, config *Config) (db.Data, error) { +func getDataSeries(label *db.Label[string], timespan *TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { // TODO: is the case useful here, we can just check for cfailed = '' in here // query := `SELECT // obstime, @@ -125,8 +76,7 @@ func readData(label *lard.Label, pool *pgxpool.Pool, config *Config) (db.Data, e AND level = $5 AND ($6::timestamp IS NULL OR obstime >= $6) AND ($7::timestamp IS NULL OR obstime < $7) - ORDER BY - stationid, obstime` + ORDER BY obstime` rows, err := pool.Query( context.TODO(), @@ -136,17 +86,15 @@ func readData(label *lard.Label, pool *pgxpool.Pool, config *Config) (db.Data, e label.ParamID, label.Sensor, label.Level, - config.FromTime, - config.ToTime, + timespan.From, + timespan.To, ) if err != nil { - slog.Error(err.Error()) return nil, err } - data, err := pgx.CollectRows(rows, pgx.RowToStructByName[*db.DataObs]) + data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[db.DataObs]) if err != nil { - slog.Error(err.Error()) return nil, err } diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go new file mode 100644 index 00000000..c2f1184e --- /dev/null +++ b/migrations/kvalobs/dump/dump.go @@ -0,0 +1,123 @@ +package dump + +import ( + "context" + "fmt" + "log/slog" + "os" + "path/filepath" + + "github.com/gocarina/gocsv" + "github.com/jackc/pgx/v5/pgxpool" + + "migrate/kvalobs/db" + "migrate/utils" +) + +func writeLabels[T int32 | string](path string, labels []*db.Label[T]) error { + file, err := os.Create(path) + if err != nil { + return err + } + + slog.Info("Writing timeseries labels to " + path) + if err = gocsv.Marshal(labels, file); err != nil { + return err + } + + return nil +} + +func writeSeries[T int32 | string, S db.DataSeries | db.TextSeries](series S, path, table string, label *db.Label[T]) error { + filename := filepath.Join(path, label.ToFilename()) + file, err := os.Create(filename) + if err != nil { + return err + } + + slog.Info(fmt.Sprintf("Writing %s observations to '%s'", table, filename)) + if err = gocsv.MarshalFile(series, file); err != nil { + slog.Error(err.Error()) + return err + } + + return nil +} + +// TODO: switch to log file +func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[string, S], pool *pgxpool.Pool, config *Config) { + var labels []*db.Label[string] + + timespan := config.TimeSpan() + + labelFile := filepath.Join(path, table.Name+"_labels.csv") + if _, err := os.Stat(labelFile); err != nil || config.UpdateLabels { + labels, err = table.LabelFn(timespan, pool) + if err != nil { + slog.Error(err.Error()) + return + } + if err = writeLabels(labelFile, labels); err != nil { + slog.Error(err.Error()) + return + } + } else { + if labels, err = db.ReadLabelCSV(labelFile); err != nil { + slog.Error(err.Error()) + return + } + } + + // TODO: this bar is a bit deceiving if you don't dump all the labels + bar := utils.NewBar(len(labels), path) + + path = filepath.Join(path, table.Name) + if err := os.MkdirAll(path, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } + + for _, label := range labels { + bar.Add(1) + + if !config.ShouldProcessLabel(label) { + continue + } + + series, err := table.ObsFn(label, timespan, pool) + if err != nil { + slog.Error(err.Error()) + continue + } + + if err := writeSeries(series, path, table.Name, label); err != nil { + slog.Error(err.Error()) + continue + } + + slog.Info(label.ToString() + ": dumped successfully") + } +} + +func dumpDB(database DB, dataTable Table[string, db.DataSeries], textTable Table[string, db.TextSeries], config *Config) { + pool, err := pgxpool.New(context.Background(), os.Getenv(database.ConnEnvVar)) + if err != nil { + slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) + return + } + defer pool.Close() + + path := filepath.Join(config.Path, database.Name) + if err := os.MkdirAll(path, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } + + if config.ChosenTable(dataTable.Name) { + dumpTable(path, dataTable, pool, config) + } + + if config.ChosenTable(textTable.Name) { + dumpTable(path, textTable, pool, config) + } +} diff --git a/migrations/kvalobs/dump/labels.go b/migrations/kvalobs/dump/labels.go deleted file mode 100644 index c6d313f0..00000000 --- a/migrations/kvalobs/dump/labels.go +++ /dev/null @@ -1,35 +0,0 @@ -package dump - -import ( - "log/slog" - "migrate/lard" - "os" - - "github.com/gocarina/gocsv" - "github.com/jackc/pgx/v5/pgxpool" -) - -// Function used to du -type LabelDumpFunc = func(pool *pgxpool.Pool, config *Config) ([]*lard.Label, error) - -func dumpLabels(pool *pgxpool.Pool, path string, fn LabelDumpFunc, config *Config) ([]*lard.Label, error) { - labels, err := fn(pool, config) - if err != nil { - // Error logged inside fn - return nil, err - } - - file, err := os.Create(path) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - slog.Info("Writing timeseries labels to " + path) - if err = gocsv.Marshal(labels, file); err != nil { - slog.Error(err.Error()) - return nil, err - } - - return labels, nil -} diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index b063d72e..557bb646 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -1,11 +1,7 @@ package dump import ( - "context" - "fmt" - "log/slog" - "os" - "path/filepath" + "time" "github.com/jackc/pgx/v5/pgxpool" @@ -23,28 +19,68 @@ import ( // TODO: not sure what to do with this one // func joinTS(first, second []lard.Label) +type Table[T int32 | string, S db.DataSeries | db.TextSeries] struct { + Name string + LabelFn LabelFunc[T] + ObsFn ObsFunc[T, S] +} + +// Function used to query labels from kvalobs given an optional timespan +type LabelFunc[T int32 | string] func(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.Label[T], error) + +// Function used to query timeseries from kvalobs for a specific label +type ObsFunc[T int32 | string, S db.DataSeries | db.TextSeries] func(label *db.Label[T], timespan *TimeSpan, pool *pgxpool.Pool) (S, error) + +type DB struct { + Name string + ConnEnvVar string +} + type Config struct { - db.BaseConfig - UpdateLabels bool `help:"Overwrites the label CSV files"` + db.BaseConfig[string] + UpdateLabels bool `help:"Overwrites the label CSV files"` + Database string `arg:"--db" help:"Which database to dump from. Choices: ['kvalobs', 'histkvalobs']"` + Table string `help:"Which table to dump. Choices: ['data', 'text']"` } -func (config *Config) Execute() { - // dump kvalobs - config.dump("KVALOBS_CONN_STRING", filepath.Join(config.Path, "kvalobs")) +type TimeSpan struct { + From *time.Time + To *time.Time +} + +func (config *Config) TimeSpan() *TimeSpan { + return &TimeSpan{From: config.FromTime.Inner(), To: config.ToTime.Inner()} +} - // dump histkvalobs - // TODO: maybe it's worth adding a separate flag? - config.dump("HISTKVALOBS_CONN_STRING", filepath.Join(config.Path, "histkvalobs")) +func (config *Config) ChosenDB(name string) bool { + return config.Database == "" || config.Database == name } -func (config *Config) dump(envvar, path string) { - pool, err := pgxpool.New(context.Background(), os.Getenv(envvar)) - if err != nil { - slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) - return +func (config *Config) ChosenTable(name string) bool { + return config.Table == "" || config.Table == name +} + +func (config *Config) Execute() { + kvalobs := DB{Name: "kvalobs", ConnEnvVar: "KVALOBS_CONN_STRING"} + histkvalobs := DB{Name: "histkvalobs", ConnEnvVar: "HISTKVALOBS_CONN_STRING"} + + dataTable := Table[string, db.DataSeries]{ + Name: "data", + LabelFn: getDataLabels, + ObsFn: getDataSeries, } - defer pool.Close() - dumpText(path, pool, config) - config.dumpData(path, pool) + textTable := Table[string, db.TextSeries]{ + Name: "text", + LabelFn: getTextLabels, + ObsFn: getTextSeries, + } + + if config.ChosenDB(kvalobs.Name) { + dumpDB(kvalobs, dataTable, textTable, config) + } + + if config.ChosenDB(histkvalobs.Name) { + dumpDB(histkvalobs, dataTable, textTable, config) + } } diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index 0509d6f0..d84b970a 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -3,95 +3,14 @@ package dump import ( "context" "log/slog" - "migrate/kvalobs/db" - "migrate/lard" - "os" - "path/filepath" - "github.com/gocarina/gocsv" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" -) - -func dumpText(path string, pool *pgxpool.Pool, config *Config) { - var labels []*lard.Label - - textPath := filepath.Join(path, "text") - if err := os.MkdirAll(textPath, os.ModePerm); err != nil { - slog.Error(err.Error()) - return - } - - labelFile := filepath.Join(path, "labels.csv") - if _, err := os.Stat(labelFile); err != nil { - if labels, err = dumpLabels(pool, labelFile, getTextLabels, config); err != nil { - return - } - } else { - if labels, err = db.ReadLabelCSV(labelFile); err != nil { - return - } - } - - for _, ts := range labels { - if !config.ShouldDumpLabel(ts) { - continue - } - - // TODO: Dump per station? Not strictly necessary? But makes it more organized? - stationDir := filepath.Join(textPath, string(ts.StationID)) - if err := os.MkdirAll(stationDir, os.ModePerm); err != nil { - slog.Error(err.Error()) - return - } - - data, err := readTextData(ts, pool, config) - if err != nil { - continue - } - - filename := filepath.Join(textPath, string(ts.StationID), db.LabelToFilename(ts)) - file, err := os.Create(filename) - if err != nil { - slog.Error(err.Error()) - continue - } - - slog.Info("Writing text to " + filename) - if err = gocsv.MarshalFile(data, file); err != nil { - slog.Error(err.Error()) - continue - } - } -} - -func (config *Config) dumpTextTS(pool *pgxpool.Pool) { - timeseries, err := getTextLabels(pool, config) - if err != nil { - // Error logged inside getTextTS - return - } - if err := os.MkdirAll(config.Path, os.ModePerm); err != nil { - slog.Error(err.Error()) - return - } - - path := filepath.Join(config.Path, "text_timeseries.csv") - file, err := os.Create(path) - if err != nil { - slog.Error(err.Error()) - return - } - - slog.Info("Writing timeseries labels to CSV") - if err = gocsv.Marshal(timeseries, file); err != nil { - slog.Error(err.Error()) - return - } -} + "migrate/kvalobs/db" +) -func getTextLabels(pool *pgxpool.Pool, config *Config) ([]*lard.Label, error) { +func getTextLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.Label[string], error) { // OGquery := `SELECT DISTINCT // stationid, // typeid, @@ -130,24 +49,23 @@ func getTextLabels(pool *pgxpool.Pool, config *Config) ([]*lard.Label, error) { query := `SELECT DISTINCT stationid, typeid, paramid, NULL AS sensor, NULL AS level FROM text_data WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2)` - slog.Info("Querying distinct timeseries labels") - rows, err := pool.Query(context.TODO(), query, config.FromTime, config.ToTime) + slog.Info("Querying text labels...") + rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) if err != nil { - slog.Error(err.Error()) return nil, err } - slog.Info("Collecting rows to slice") - tsList, err := pgx.CollectRows(rows, pgx.RowToStructByName[*lard.Label]) + slog.Info("Collecting text labels...") + labels := make([]*db.Label[string], 0, rows.CommandTag().RowsAffected()) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.Label[string]]) if err != nil { - slog.Error(err.Error()) return nil, err } - return tsList, nil + return labels, nil } -func readTextData(label *lard.Label, pool *pgxpool.Pool, config *Config) (db.Text, error) { +func getTextSeries(label *db.Label[string], timespan *TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { // query := ` // SELECT // obstime, @@ -171,8 +89,7 @@ func readTextData(label *lard.Label, pool *pgxpool.Pool, config *Config) (db.Tex AND paramid = $3 AND ($4::timestamp IS NULL OR obstime >= $4) AND ($5::timestamp IS NULL OR obstime < $5) - ORDER BY - stationid, obstime` + ORDER BY obstime` rows, err := pool.Query( context.TODO(), @@ -180,17 +97,15 @@ func readTextData(label *lard.Label, pool *pgxpool.Pool, config *Config) (db.Tex label.StationID, label.TypeID, label.ParamID, - config.FromTime, - config.ToTime, + timespan.From, + timespan.To, ) if err != nil { - slog.Error(err.Error()) return nil, err } - data, err := pgx.CollectRows(rows, pgx.RowToStructByName[*db.TextObs]) + data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[db.TextObs]) if err != nil { - slog.Error(err.Error()) return nil, err } diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go deleted file mode 100644 index de491e83..00000000 --- a/migrations/kvalobs/import/import.go +++ /dev/null @@ -1,49 +0,0 @@ -package port - -import ( - "context" - - "fmt" - "log/slog" - "os" - "time" - - "github.com/jackc/pgx/v5/pgxpool" - - "migrate/lard" - "migrate/utils" -) - -type Config struct { - Path string `arg:"-p" default:"./dumps" help:"Location the dumped data will be stored in"` - FromTime *time.Time `arg:"--from" help:"Fetch data only starting from this timestamp"` - ToTime *time.Time `arg:"--to" help:"Fetch data only until this timestamp"` - Ts []int32 `help:"Optional space separated list of timeseries."` - Stations []int32 `help:"Optional space separated list of station numbers."` - TypeIds []int32 `help:"Optional space separated list of type IDs."` - ParamIds []int32 `help:"Optional space separated list of param IDs."` - Sensors []int32 `help:"Optional space separated list of sensors."` - Levels []int32 `help:"Optional space separated list of levels."` -} - -func (config *Config) ShouldImport(ts *lard.Label) bool { - // TODO: there's no need to get the tsid if the other parameters don't match - // So extract the first condition - // return contains(config.Ts, tsid) || - return utils.Contains(config.Stations, ts.StationID) || - utils.Contains(config.TypeIds, ts.TypeID) || - utils.Contains(config.ParamIds, ts.ParamID) || - // TODO: these two should never be null anyway - utils.NullableContains(config.Sensors, ts.Sensor) || - utils.NullableContains(config.Levels, ts.Level) -} - -func (config *Config) Execute() error { - pool, err := pgxpool.New(context.Background(), os.Getenv("KVALOBS_CONN_STRING")) - if err != nil { - slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) - } - defer pool.Close() - - return nil -} diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go index b372b787..1e5c8557 100644 --- a/migrations/kvalobs/import/main.go +++ b/migrations/kvalobs/import/main.go @@ -13,7 +13,7 @@ import ( ) type Config struct { - db.BaseConfig + db.BaseConfig[int32] Ts []int32 `help:"Optional space separated list of timeseries."` } diff --git a/migrations/kvalobs/import/text.go b/migrations/kvalobs/import/text.go index 4645236a..a56d51e5 100644 --- a/migrations/kvalobs/import/text.go +++ b/migrations/kvalobs/import/text.go @@ -25,11 +25,13 @@ func (config *Config) ImportText(pool *pgxpool.Pool, path string) error { continue } - if !config.ShouldImport(label) { + if !config.ShouldProcessLabel(label) { continue } - tsid, err := lard.GetTimeseriesID(label, *config.FromTime, pool) + // FIXME: FromTime can be nil + lardLabel := lard.Label(*label) + tsid, err := lard.GetTimeseriesID(&lardLabel, config.FromTime.Inner(), pool) if err != nil { slog.Error(err.Error()) continue diff --git a/migrations/utils/timestamp.go b/migrations/utils/timestamp.go index cf1e1498..195e5550 100644 --- a/migrations/utils/timestamp.go +++ b/migrations/utils/timestamp.go @@ -21,3 +21,11 @@ func (ts *Timestamp) UnmarshalText(b []byte) error { func (ts *Timestamp) Format(layout string) string { return ts.t.Format(layout) } + +func (ts *Timestamp) Inner() *time.Time { + if ts == nil { + return nil + } + + return &ts.t +} From 471d7aba77690e5da8b5863bce05a0b1f0b55c58 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 28 Nov 2024 09:40:05 +0100 Subject: [PATCH 15/67] Add some type alias --- migrations/kvalobs/db/csv.go | 2 +- migrations/kvalobs/db/labels.go | 10 +++++++--- migrations/kvalobs/dump/data.go | 8 ++++---- migrations/kvalobs/dump/dump.go | 10 +++++----- migrations/kvalobs/dump/main.go | 15 ++++++++------- migrations/kvalobs/dump/text.go | 8 ++++---- 6 files changed, 29 insertions(+), 24 deletions(-) diff --git a/migrations/kvalobs/db/csv.go b/migrations/kvalobs/db/csv.go index ae385c5a..3ef72c1e 100644 --- a/migrations/kvalobs/db/csv.go +++ b/migrations/kvalobs/db/csv.go @@ -9,7 +9,7 @@ import ( "github.com/gocarina/gocsv" ) -func ReadLabelCSV(filename string) (labels []*Label[string], err error) { +func ReadLabelCSV(filename string) (labels []*KvLabel, err error) { file, err := os.Open(filename) if err != nil { return nil, err diff --git a/migrations/kvalobs/db/labels.go b/migrations/kvalobs/db/labels.go index fa2e5051..11189cd5 100644 --- a/migrations/kvalobs/db/labels.go +++ b/migrations/kvalobs/db/labels.go @@ -18,6 +18,10 @@ type Label[T int32 | string] struct { Level *int32 } +// Can be directly casted to lard.Label +type LardLabel = Label[int32] +type KvLabel = Label[string] + func (l *Label[T]) sensorLevelString() (string, string) { var sensor, level string if l.Sensor != nil { @@ -59,8 +63,8 @@ func parseFilenameFields(s *string) (*int32, error) { return &out, nil } -// Deserialize filename to lard.Label -func LabelFromFilename(filename string) (*Label[int32], error) { +// Deserialize filename to LardLabel +func LabelFromFilename(filename string) (*LardLabel, error) { name := strings.TrimSuffix(filename, ".csv") fields := strings.Split(name, "_") @@ -78,7 +82,7 @@ func LabelFromFilename(filename string) (*Label[int32], error) { return nil, err } - return &Label[int32]{ + return &LardLabel{ StationID: *converted[0], TypeID: *converted[1], ParamID: *converted[2], diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go index 8e947181..17c241dc 100644 --- a/migrations/kvalobs/dump/data.go +++ b/migrations/kvalobs/dump/data.go @@ -10,7 +10,7 @@ import ( "migrate/kvalobs/db" ) -func getDataLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.Label[string], error) { +func getDataLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { // TODO: not sure about the sensor/level conditions, // they should never be NULL since they have default values different from NULL? // TODO: We probably don't even need the join, @@ -32,8 +32,8 @@ func getDataLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.Label[string], } slog.Info("Collecting data labels...") - labels := make([]*db.Label[string], 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.Label[string]]) + labels := make([]*db.KvLabel, 0, rows.CommandTag().RowsAffected()) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.KvLabel]) if err != nil { return nil, err } @@ -41,7 +41,7 @@ func getDataLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.Label[string], return labels, nil } -func getDataSeries(label *db.Label[string], timespan *TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { +func getDataSeries(label *db.KvLabel, timespan *TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { // TODO: is the case useful here, we can just check for cfailed = '' in here // query := `SELECT // obstime, diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index c2f1184e..87085d9d 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -14,7 +14,7 @@ import ( "migrate/utils" ) -func writeLabels[T int32 | string](path string, labels []*db.Label[T]) error { +func writeLabels(path string, labels []*db.KvLabel) error { file, err := os.Create(path) if err != nil { return err @@ -28,7 +28,7 @@ func writeLabels[T int32 | string](path string, labels []*db.Label[T]) error { return nil } -func writeSeries[T int32 | string, S db.DataSeries | db.TextSeries](series S, path, table string, label *db.Label[T]) error { +func writeSeries[S db.DataSeries | db.TextSeries](series S, path, table string, label *db.KvLabel) error { filename := filepath.Join(path, label.ToFilename()) file, err := os.Create(filename) if err != nil { @@ -45,8 +45,8 @@ func writeSeries[T int32 | string, S db.DataSeries | db.TextSeries](series S, pa } // TODO: switch to log file -func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[string, S], pool *pgxpool.Pool, config *Config) { - var labels []*db.Label[string] +func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[S], pool *pgxpool.Pool, config *Config) { + var labels []*db.KvLabel timespan := config.TimeSpan() @@ -99,7 +99,7 @@ func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[string, } } -func dumpDB(database DB, dataTable Table[string, db.DataSeries], textTable Table[string, db.TextSeries], config *Config) { +func dumpDB(database DB, dataTable Table[db.DataSeries], textTable Table[db.TextSeries], config *Config) { pool, err := pgxpool.New(context.Background(), os.Getenv(database.ConnEnvVar)) if err != nil { slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index 557bb646..07204427 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -19,17 +19,17 @@ import ( // TODO: not sure what to do with this one // func joinTS(first, second []lard.Label) -type Table[T int32 | string, S db.DataSeries | db.TextSeries] struct { +type Table[S db.DataSeries | db.TextSeries] struct { Name string - LabelFn LabelFunc[T] - ObsFn ObsFunc[T, S] + LabelFn LabelFunc + ObsFn ObsFunc[S] } // Function used to query labels from kvalobs given an optional timespan -type LabelFunc[T int32 | string] func(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.Label[T], error) +type LabelFunc func(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) // Function used to query timeseries from kvalobs for a specific label -type ObsFunc[T int32 | string, S db.DataSeries | db.TextSeries] func(label *db.Label[T], timespan *TimeSpan, pool *pgxpool.Pool) (S, error) +type ObsFunc[S db.DataSeries | db.TextSeries] func(label *db.KvLabel, timespan *TimeSpan, pool *pgxpool.Pool) (S, error) type DB struct { Name string @@ -41,6 +41,7 @@ type Config struct { UpdateLabels bool `help:"Overwrites the label CSV files"` Database string `arg:"--db" help:"Which database to dump from. Choices: ['kvalobs', 'histkvalobs']"` Table string `help:"Which table to dump. Choices: ['data', 'text']"` + MaxConn int `arg:"-n" default:"4" help:"Max number of concurrent connections allowed to KDVH"` } type TimeSpan struct { @@ -64,13 +65,13 @@ func (config *Config) Execute() { kvalobs := DB{Name: "kvalobs", ConnEnvVar: "KVALOBS_CONN_STRING"} histkvalobs := DB{Name: "histkvalobs", ConnEnvVar: "HISTKVALOBS_CONN_STRING"} - dataTable := Table[string, db.DataSeries]{ + dataTable := Table[db.DataSeries]{ Name: "data", LabelFn: getDataLabels, ObsFn: getDataSeries, } - textTable := Table[string, db.TextSeries]{ + textTable := Table[db.TextSeries]{ Name: "text", LabelFn: getTextLabels, ObsFn: getTextSeries, diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index d84b970a..ccae51b3 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -10,7 +10,7 @@ import ( "migrate/kvalobs/db" ) -func getTextLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.Label[string], error) { +func getTextLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { // OGquery := `SELECT DISTINCT // stationid, // typeid, @@ -56,8 +56,8 @@ func getTextLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.Label[string], } slog.Info("Collecting text labels...") - labels := make([]*db.Label[string], 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.Label[string]]) + labels := make([]*db.KvLabel, 0, rows.CommandTag().RowsAffected()) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.KvLabel]) if err != nil { return nil, err } @@ -65,7 +65,7 @@ func getTextLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.Label[string], return labels, nil } -func getTextSeries(label *db.Label[string], timespan *TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { +func getTextSeries(label *db.KvLabel, timespan *TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { // query := ` // SELECT // obstime, From 56c491a9a9c4eede697672fb117a0f4c5f68827f Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 28 Nov 2024 09:43:44 +0100 Subject: [PATCH 16/67] Dump one directory per station and use multiple connections --- migrations/kvalobs/dump/data.go | 3 +- migrations/kvalobs/dump/dump.go | 57 +++++++++++++++++++++++---------- migrations/kvalobs/dump/text.go | 3 +- 3 files changed, 44 insertions(+), 19 deletions(-) diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go index 17c241dc..00df07e6 100644 --- a/migrations/kvalobs/dump/data.go +++ b/migrations/kvalobs/dump/data.go @@ -23,7 +23,8 @@ func getDataLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error // AND ($1::timestamp IS NULL OR obstime >= $1) // AND ($2::timestamp IS NULL OR obstime < $2)` query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level FROM data - WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2)` + WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2) + ORDER BY stationid` slog.Info("Querying data labels...") rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 87085d9d..27391350 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -6,6 +6,7 @@ import ( "log/slog" "os" "path/filepath" + "sync" "github.com/gocarina/gocsv" "github.com/jackc/pgx/v5/pgxpool" @@ -44,7 +45,6 @@ func writeSeries[S db.DataSeries | db.TextSeries](series S, path, table string, return nil } -// TODO: switch to log file func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[S], pool *pgxpool.Pool, config *Config) { var labels []*db.KvLabel @@ -68,35 +68,58 @@ func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[S], poo } } + path = filepath.Join(path, table.Name) + utils.SetLogFile(path, "dump") + // TODO: this bar is a bit deceiving if you don't dump all the labels + // Maybe should only cache the ones requested from cli? bar := utils.NewBar(len(labels), path) - path = filepath.Join(path, table.Name) - if err := os.MkdirAll(path, os.ModePerm); err != nil { - slog.Error(err.Error()) - return - } + // Used to limit connections to the database + semaphore := make(chan struct{}, config.MaxConn) + var wg sync.WaitGroup + var stationPath string for _, label := range labels { bar.Add(1) - if !config.ShouldProcessLabel(label) { - continue - } - - series, err := table.ObsFn(label, timespan, pool) - if err != nil { - slog.Error(err.Error()) - continue + thisPath := filepath.Join(path, fmt.Sprint(label.StationID)) + if thisPath != stationPath { + stationPath = thisPath + if err := os.MkdirAll(stationPath, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } } - if err := writeSeries(series, path, table.Name, label); err != nil { - slog.Error(err.Error()) + if !config.ShouldProcessLabel(label) { continue } - slog.Info(label.ToString() + ": dumped successfully") + wg.Add(1) + semaphore <- struct{}{} + go func() { + defer func() { + wg.Done() + // Release semaphore + <-semaphore + }() + + series, err := table.ObsFn(label, timespan, pool) + if err != nil { + slog.Error(err.Error()) + return + } + + if err := writeSeries(series, stationPath, table.Name, label); err != nil { + slog.Error(err.Error()) + return + } + + slog.Info(label.ToString() + ": dumped successfully") + }() } + wg.Wait() } func dumpDB(database DB, dataTable Table[db.DataSeries], textTable Table[db.TextSeries], config *Config) { diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index ccae51b3..d89b4ae9 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -47,7 +47,8 @@ func getTextLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error // // TODO: should sensor/level be NULL or 0 query := `SELECT DISTINCT stationid, typeid, paramid, NULL AS sensor, NULL AS level FROM text_data - WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2)` + WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2) + ORDER BY stationid` slog.Info("Querying text labels...") rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) From 23ea484fbaf8b579c2d4a08d5caadc7d23f2d2a8 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 28 Nov 2024 09:54:24 +0100 Subject: [PATCH 17/67] Remove log call --- migrations/kvalobs/dump/dump.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 27391350..588044ea 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -29,14 +29,13 @@ func writeLabels(path string, labels []*db.KvLabel) error { return nil } -func writeSeries[S db.DataSeries | db.TextSeries](series S, path, table string, label *db.KvLabel) error { +func writeSeries[S db.DataSeries | db.TextSeries](series S, path string, label *db.KvLabel) error { filename := filepath.Join(path, label.ToFilename()) file, err := os.Create(filename) if err != nil { return err } - slog.Info(fmt.Sprintf("Writing %s observations to '%s'", table, filename)) if err = gocsv.MarshalFile(series, file); err != nil { slog.Error(err.Error()) return err @@ -111,7 +110,7 @@ func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[S], poo return } - if err := writeSeries(series, stationPath, table.Name, label); err != nil { + if err := writeSeries(series, stationPath, label); err != nil { slog.Error(err.Error()) return } From d5cff05d0efe18ba7e43c0a8b5e0c94aa2cd9e69 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 28 Nov 2024 10:23:54 +0100 Subject: [PATCH 18/67] Reset log output when returning from dumpTable --- migrations/kvalobs/dump/dump.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 588044ea..326e95f1 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -3,6 +3,7 @@ package dump import ( "context" "fmt" + "log" "log/slog" "os" "path/filepath" @@ -119,6 +120,8 @@ func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[S], poo }() } wg.Wait() + + log.SetOutput(os.Stdout) } func dumpDB(database DB, dataTable Table[db.DataSeries], textTable Table[db.TextSeries], config *Config) { From ff5650d26411caa140a7d906645346c50291e241 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 28 Nov 2024 10:24:11 +0100 Subject: [PATCH 19/67] Change position of if check --- migrations/kvalobs/dump/dump.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 326e95f1..4627982c 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -83,19 +83,19 @@ func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[S], poo for _, label := range labels { bar.Add(1) + if !config.ShouldProcessLabel(label) { + continue + } + thisPath := filepath.Join(path, fmt.Sprint(label.StationID)) if thisPath != stationPath { stationPath = thisPath if err := os.MkdirAll(stationPath, os.ModePerm); err != nil { slog.Error(err.Error()) - return + continue } } - if !config.ShouldProcessLabel(label) { - continue - } - wg.Add(1) semaphore <- struct{}{} go func() { From cd4b1ac89a1ec205bfae2de864f18ebe59973eb2 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 28 Nov 2024 10:24:19 +0100 Subject: [PATCH 20/67] Update help description --- migrations/kvalobs/dump/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index 07204427..92c89207 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -39,9 +39,9 @@ type DB struct { type Config struct { db.BaseConfig[string] UpdateLabels bool `help:"Overwrites the label CSV files"` - Database string `arg:"--db" help:"Which database to dump from. Choices: ['kvalobs', 'histkvalobs']"` - Table string `help:"Which table to dump. Choices: ['data', 'text']"` - MaxConn int `arg:"-n" default:"4" help:"Max number of concurrent connections allowed to KDVH"` + Database string `arg:"--db" help:"Which database to dump from, all by default. Choices: ['kvalobs', 'histkvalobs']"` + Table string `help:"Which table to dump, all by default. Choices: ['data', 'text']"` + MaxConn int `arg:"-n" default:"4" help:"Max number of concurrent connections allowed to Kvalobs"` } type TimeSpan struct { From 1603e7757d50fd97f0c80aa2ee38470aeb6cc16c Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 28 Nov 2024 11:10:52 +0100 Subject: [PATCH 21/67] Move kvalobs Table and use consts for envars --- migrations/kdvh/db/main.go | 3 ++ migrations/kdvh/dump/main.go | 4 +- migrations/kdvh/import/cache/kdvh.go | 2 +- migrations/kdvh/import/cache/main.go | 2 +- migrations/kdvh/import/main.go | 3 +- migrations/kvalobs/db/base_config.go | 28 +++++++--- migrations/kvalobs/db/main.go | 17 ++++-- migrations/kvalobs/dump/data.go | 5 +- migrations/kvalobs/dump/dump.go | 14 ++++- migrations/kvalobs/dump/main.go | 63 ++-------------------- migrations/kvalobs/dump/table.go | 22 ++++++++ migrations/kvalobs/dump/text.go | 5 +- migrations/kvalobs/import/main.go | 13 ++++- migrations/kvalobs/import/text.go | 4 +- migrations/lard/main.go | 2 + migrations/utils/{timestamp.go => time.go} | 7 ++- migrations/utils/utils.go | 13 +++-- 17 files changed, 120 insertions(+), 87 deletions(-) create mode 100644 migrations/kvalobs/dump/table.go rename migrations/utils/{timestamp.go => time.go} (84%) diff --git a/migrations/kdvh/db/main.go b/migrations/kdvh/db/main.go index 81141221..0c1967bb 100644 --- a/migrations/kdvh/db/main.go +++ b/migrations/kdvh/db/main.go @@ -1,5 +1,8 @@ package db +const KDVH_ENV_VAR string = "KDVH_PROXY_CONN" +const STINFO_ENV_VAR string = "STINFO_STRING" + // Map of all tables found in KDVH, with set max import year type KDVH struct { Tables map[string]*Table diff --git a/migrations/kdvh/dump/main.go b/migrations/kdvh/dump/main.go index e31c9e9d..51d4541a 100644 --- a/migrations/kdvh/dump/main.go +++ b/migrations/kdvh/dump/main.go @@ -18,11 +18,11 @@ type Config struct { Stations []string `arg:"-s" help:"Optional space separated list of stations IDs"` Elements []string `arg:"-e" help:"Optional space separated list of element codes"` Overwrite bool `help:"Overwrite any existing dumped files"` - MaxConn int `arg:"-n" default:"4" help:"Max number of concurrent connections allowed to KDVH"` + MaxConn int `arg:"-n" default:"4" help:"Max number of allowed concurrent connections to KDVH"` } func (config *Config) Execute() { - pool, err := pgxpool.New(context.Background(), os.Getenv("KDVH_PROXY_CONN")) + pool, err := pgxpool.New(context.Background(), os.Getenv(db.KDVH_ENV_VAR)) if err != nil { slog.Error(err.Error()) return diff --git a/migrations/kdvh/import/cache/kdvh.go b/migrations/kdvh/import/cache/kdvh.go index 0ca938cd..7b896755 100644 --- a/migrations/kdvh/import/cache/kdvh.go +++ b/migrations/kdvh/import/cache/kdvh.go @@ -39,7 +39,7 @@ func cacheKDVH(tables, stations, elements []string, kdvh *db.KDVH) KDVHMap { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - conn, err := pgx.Connect(ctx, os.Getenv("KDVH_PROXY_CONN")) + conn, err := pgx.Connect(ctx, os.Getenv(db.KDVH_ENV_VAR)) if err != nil { slog.Error("Could not connect to KDVH proxy. Make sure to be connected to the VPN: " + err.Error()) os.Exit(1) diff --git a/migrations/kdvh/import/cache/main.go b/migrations/kdvh/import/cache/main.go index 2183e27b..e69a9957 100644 --- a/migrations/kdvh/import/cache/main.go +++ b/migrations/kdvh/import/cache/main.go @@ -31,7 +31,7 @@ func CacheMetadata(tables, stations, elements []string, kdvh *db.KDVH) *Cache { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - conn, err := pgx.Connect(ctx, os.Getenv("STINFO_STRING")) + conn, err := pgx.Connect(ctx, os.Getenv(db.STINFO_ENV_VAR)) if err != nil { slog.Error("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) os.Exit(1) diff --git a/migrations/kdvh/import/main.go b/migrations/kdvh/import/main.go index 83daef8c..941ffcb1 100644 --- a/migrations/kdvh/import/main.go +++ b/migrations/kdvh/import/main.go @@ -12,6 +12,7 @@ import ( "migrate/kdvh/db" "migrate/kdvh/import/cache" + "migrate/lard" "migrate/utils" ) @@ -42,7 +43,7 @@ func (config *Config) Execute() { cache := cache.CacheMetadata(config.Tables, config.Stations, config.Elements, kdvh) // Create connection pool for LARD - pool, err := pgxpool.New(context.TODO(), os.Getenv("LARD_STRING")) + pool, err := pgxpool.New(context.TODO(), os.Getenv(lard.LARD_ENV_VAR)) if err != nil { slog.Error(fmt.Sprint("Could not connect to Lard:", err)) return diff --git a/migrations/kvalobs/db/base_config.go b/migrations/kvalobs/db/base_config.go index 37ff5537..3473eb13 100644 --- a/migrations/kvalobs/db/base_config.go +++ b/migrations/kvalobs/db/base_config.go @@ -14,6 +14,8 @@ type BaseConfig[T int32 | string] struct { Path string `arg:"-p" default:"./dumps" help:"Location the dumped data will be stored in"` FromTime *utils.Timestamp `arg:"--from" help:"Fetch data only starting from this date-only timestamp"` ToTime *utils.Timestamp `arg:"--to" help:"Fetch data only until this date-only timestamp"` + Database string `arg:"--db" help:"Which database to process, all by default. Choices: ['kvalobs', 'histkvalobs']"` + Table string `help:"Which table to process, all by default. Choices: ['data', 'text']"` Stations []int32 `help:"Optional space separated list of station numbers"` TypeIds []int32 `help:"Optional space separated list of type IDs"` ParamIds []int32 `help:"Optional space separated list of param IDs"` @@ -23,10 +25,24 @@ type BaseConfig[T int32 | string] struct { func (config *BaseConfig[T]) ShouldProcessLabel(label *Label[T]) bool { // (config.Ts == nil || slices.Contains(config.Ts, ts.ID)) || - return utils.Contains(config.Stations, label.StationID) && - utils.Contains(config.TypeIds, label.TypeID) && - utils.Contains(config.ParamIds, label.ParamID) && - // TODO: these two should never be null anyway - utils.NullableContains(config.Sensors, label.Sensor) && - utils.NullableContains(config.Levels, label.Level) + return utils.IsEmptyOrContains(config.Stations, label.StationID) && + utils.IsEmptyOrContains(config.TypeIds, label.TypeID) && + utils.IsEmptyOrContains(config.ParamIds, label.ParamID) && + // TODO: these two should never be null anyway? + utils.IsEmptyOrContainsPtr(config.Sensors, label.Sensor) && + utils.IsEmptyOrContainsPtr(config.Levels, label.Level) +} + +func (config *BaseConfig[T]) TimeSpan() *utils.TimeSpan { + return &utils.TimeSpan{From: config.FromTime.Inner(), To: config.ToTime.Inner()} +} + +// Check if the `--db` flag was passed in +func (config *BaseConfig[T]) ChosenDB(name string) bool { + return config.Database == "" || config.Database == name +} + +// Check if the `--table` flag was passed in +func (config *BaseConfig[T]) ChosenTable(name string) bool { + return config.Table == "" || config.Table == name } diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go index 1410f0be..bc9909c8 100644 --- a/migrations/kvalobs/db/main.go +++ b/migrations/kvalobs/db/main.go @@ -5,7 +5,7 @@ import ( ) // Kvalobs is composed of two databases -// 1) `kvalobs` for fresh data +// 1) `kvalobs` for fresh data? // 2) `histkvalobs` for data older than // // Both contain the same tables: @@ -85,11 +85,14 @@ import ( // - Kvalobs doesn't have the concept of timeseries ID, // instead there is a sequential ID associated with each observation row +const DATA_TABLE_NAME string = "data" +const TEXT_TABLE_NAME string = "text" + var NULL_VALUES []float64 = []float64{-34767, -34766} type DataSeries = []*DataObs -// Kvalobs data observation row +// Kvalobs data table observation row type DataObs struct { Obstime time.Time `db:"obstime"` Original float64 `db:"original"` @@ -102,14 +105,20 @@ type DataObs struct { type TextSeries = []*TextObs -// Kvalobs text observation row +// Kvalobs text_data table observation row type TextObs struct { Obstime time.Time `db:"obstime"` Original string `db:"original"` Tbtime time.Time `db:"tbtime"` } -type Kvalobs struct { +type DB struct { Name string ConnEnvVar string } + +func InitDBs() (DB, DB) { + kvalobs := DB{Name: "kvalobs", ConnEnvVar: "KVALOBS_CONN_STRING"} + histkvalobs := DB{Name: "histkvalobs", ConnEnvVar: "HISTKVALOBS_CONN_STRING"} + return kvalobs, histkvalobs +} diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go index 00df07e6..d05a21cb 100644 --- a/migrations/kvalobs/dump/data.go +++ b/migrations/kvalobs/dump/data.go @@ -8,9 +8,10 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "migrate/kvalobs/db" + "migrate/utils" ) -func getDataLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { +func getDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { // TODO: not sure about the sensor/level conditions, // they should never be NULL since they have default values different from NULL? // TODO: We probably don't even need the join, @@ -42,7 +43,7 @@ func getDataLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error return labels, nil } -func getDataSeries(label *db.KvLabel, timespan *TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { +func getDataSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { // TODO: is the case useful here, we can just check for cfailed = '' in here // query := `SELECT // obstime, diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 4627982c..715b5416 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -124,7 +124,7 @@ func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[S], poo log.SetOutput(os.Stdout) } -func dumpDB(database DB, dataTable Table[db.DataSeries], textTable Table[db.TextSeries], config *Config) { +func dumpDB(database db.DB, config *Config) { pool, err := pgxpool.New(context.Background(), os.Getenv(database.ConnEnvVar)) if err != nil { slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) @@ -138,6 +138,18 @@ func dumpDB(database DB, dataTable Table[db.DataSeries], textTable Table[db.Text return } + dataTable := Table[db.DataSeries]{ + Name: db.DATA_TABLE_NAME, + LabelFn: getDataLabels, + ObsFn: getDataSeries, + } + + textTable := Table[db.TextSeries]{ + Name: db.TEXT_TABLE_NAME, + LabelFn: getTextLabels, + ObsFn: getTextSeries, + } + if config.ChosenTable(dataTable.Name) { dumpTable(path, dataTable, pool, config) } diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index 92c89207..794af68a 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -1,10 +1,6 @@ package dump import ( - "time" - - "github.com/jackc/pgx/v5/pgxpool" - "migrate/kvalobs/db" ) @@ -19,69 +15,20 @@ import ( // TODO: not sure what to do with this one // func joinTS(first, second []lard.Label) -type Table[S db.DataSeries | db.TextSeries] struct { - Name string - LabelFn LabelFunc - ObsFn ObsFunc[S] -} - -// Function used to query labels from kvalobs given an optional timespan -type LabelFunc func(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) - -// Function used to query timeseries from kvalobs for a specific label -type ObsFunc[S db.DataSeries | db.TextSeries] func(label *db.KvLabel, timespan *TimeSpan, pool *pgxpool.Pool) (S, error) - -type DB struct { - Name string - ConnEnvVar string -} - type Config struct { db.BaseConfig[string] - UpdateLabels bool `help:"Overwrites the label CSV files"` - Database string `arg:"--db" help:"Which database to dump from, all by default. Choices: ['kvalobs', 'histkvalobs']"` - Table string `help:"Which table to dump, all by default. Choices: ['data', 'text']"` - MaxConn int `arg:"-n" default:"4" help:"Max number of concurrent connections allowed to Kvalobs"` -} - -type TimeSpan struct { - From *time.Time - To *time.Time -} - -func (config *Config) TimeSpan() *TimeSpan { - return &TimeSpan{From: config.FromTime.Inner(), To: config.ToTime.Inner()} -} - -func (config *Config) ChosenDB(name string) bool { - return config.Database == "" || config.Database == name -} - -func (config *Config) ChosenTable(name string) bool { - return config.Table == "" || config.Table == name + UpdateLabels bool `help:"Overwrites the label CSV files"` + MaxConn int `arg:"-n" default:"4" help:"Max number of allowed concurrent connections to Kvalobs"` } func (config *Config) Execute() { - kvalobs := DB{Name: "kvalobs", ConnEnvVar: "KVALOBS_CONN_STRING"} - histkvalobs := DB{Name: "histkvalobs", ConnEnvVar: "HISTKVALOBS_CONN_STRING"} - - dataTable := Table[db.DataSeries]{ - Name: "data", - LabelFn: getDataLabels, - ObsFn: getDataSeries, - } - - textTable := Table[db.TextSeries]{ - Name: "text", - LabelFn: getTextLabels, - ObsFn: getTextSeries, - } + kvalobs, histkvalobs := db.InitDBs() if config.ChosenDB(kvalobs.Name) { - dumpDB(kvalobs, dataTable, textTable, config) + dumpDB(kvalobs, config) } if config.ChosenDB(histkvalobs.Name) { - dumpDB(histkvalobs, dataTable, textTable, config) + dumpDB(histkvalobs, config) } } diff --git a/migrations/kvalobs/dump/table.go b/migrations/kvalobs/dump/table.go new file mode 100644 index 00000000..b295cb4a --- /dev/null +++ b/migrations/kvalobs/dump/table.go @@ -0,0 +1,22 @@ +package dump + +import ( + "migrate/kvalobs/db" + "migrate/utils" + + "github.com/jackc/pgx/v5/pgxpool" +) + +// Maps to `data` and `text_data` tables in Kvalobs +type Table[S db.DataSeries | db.TextSeries] struct { + Name string // Name of the table + LabelFn LabelDumpFunc // Function that dumps labels from the table + ObsFn ObsDumpFunc[S] // Function that dumps observations from the table + ImportFn func() +} + +// Function used to query labels from kvalobs given an optional timespan +type LabelDumpFunc func(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) + +// Function used to query timeseries from kvalobs for a specific label +type ObsDumpFunc[S db.DataSeries | db.TextSeries] func(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (S, error) diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index d89b4ae9..05d6b0cc 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -8,9 +8,10 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "migrate/kvalobs/db" + "migrate/utils" ) -func getTextLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { +func getTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { // OGquery := `SELECT DISTINCT // stationid, // typeid, @@ -66,7 +67,7 @@ func getTextLabels(timespan *TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error return labels, nil } -func getTextSeries(label *db.KvLabel, timespan *TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { +func getTextSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { // query := ` // SELECT // obstime, diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go index 1e5c8557..3ad4beca 100644 --- a/migrations/kvalobs/import/main.go +++ b/migrations/kvalobs/import/main.go @@ -10,6 +10,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "migrate/kvalobs/db" + "migrate/lard" ) type Config struct { @@ -18,11 +19,21 @@ type Config struct { } func (config *Config) Execute() error { - pool, err := pgxpool.New(context.Background(), os.Getenv("KVALOBS_CONN_STRING")) + pool, err := pgxpool.New(context.Background(), os.Getenv(lard.LARD_ENV_VAR)) if err != nil { slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) } defer pool.Close() + kvalobs, histkvalobs := db.InitDBs() + + if config.ChosenDB(kvalobs.Name) { + // dumpDB(kvalobs, dataTable, textTable, config) + } + + if config.ChosenDB(histkvalobs.Name) { + // dumpDB(histkvalobs, dataTable, textTable, config) + } + return nil } diff --git a/migrations/kvalobs/import/text.go b/migrations/kvalobs/import/text.go index a56d51e5..e3d2e3a1 100644 --- a/migrations/kvalobs/import/text.go +++ b/migrations/kvalobs/import/text.go @@ -29,7 +29,7 @@ func (config *Config) ImportText(pool *pgxpool.Pool, path string) error { continue } - // FIXME: FromTime can be nil + // FIXME: FromTime can be nil and anyway config.FromTime is wrong here! lardLabel := lard.Label(*label) tsid, err := lard.GetTimeseriesID(&lardLabel, config.FromTime.Inner(), pool) if err != nil { @@ -37,7 +37,7 @@ func (config *Config) ImportText(pool *pgxpool.Pool, path string) error { continue } - if !utils.Contains(config.Ts, tsid) { + if !utils.IsEmptyOrContains(config.Ts, tsid) { continue } diff --git a/migrations/lard/main.go b/migrations/lard/main.go index be99c2da..c9477af9 100644 --- a/migrations/lard/main.go +++ b/migrations/lard/main.go @@ -2,6 +2,8 @@ package lard import "time" +const LARD_ENV_VAR string = "LARD_STRING" + // Struct mimicking the `public.data` table type DataObs struct { // Timeseries ID diff --git a/migrations/utils/timestamp.go b/migrations/utils/time.go similarity index 84% rename from migrations/utils/timestamp.go rename to migrations/utils/time.go index 195e5550..7efa82e0 100644 --- a/migrations/utils/timestamp.go +++ b/migrations/utils/time.go @@ -12,7 +12,7 @@ type Timestamp struct { func (ts *Timestamp) UnmarshalText(b []byte) error { t, err := time.Parse(time.DateOnly, string(b)) if err != nil { - return fmt.Errorf("Only the date-only format (\"YYYY-MM-DD\") is allowed. Got \"%s\"", b) + return fmt.Errorf("Only the date-only format (\"YYYY-MM-DD\") is allowed. Got %s", b) } ts.t = t return nil @@ -29,3 +29,8 @@ func (ts *Timestamp) Inner() *time.Time { return &ts.t } + +type TimeSpan struct { + From *time.Time + To *time.Time +} diff --git a/migrations/utils/utils.go b/migrations/utils/utils.go index 3e155162..7035e253 100644 --- a/migrations/utils/utils.go +++ b/migrations/utils/utils.go @@ -104,22 +104,25 @@ func TryMap[T, V any](ts []T, fn func(T) (V, error)) ([]V, error) { return result, nil } -// Same as slices.Contains but return `true` if the slice is nil, -// meaning that upstream the slice is optional -func Contains[T comparable](s []T, v T) bool { +// Returns `true` if the slice is nil, otherwise checks if the element is +// contained in the slice +func IsEmptyOrContains[T comparable](s []T, v T) bool { if s == nil { return true } return slices.Contains(s, v) } -func NullableContains[T comparable](s []T, v *T) bool { +// Returns `true` if the slice is nil, +// `false` if the element pointer is nil, +// otherwise checks if the element is contained in the slice +func IsEmptyOrContainsPtr[T comparable](s []T, v *T) bool { if s == nil { return true } if v == nil { - // Non-nil slice does not contain nil + // Nil value is definitely not contained in non-nil slice return false } From 9e7349bc6aa94cbd287da4684f9d9feb0838545e Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 28 Nov 2024 17:01:22 +0100 Subject: [PATCH 22/67] Kvalobs import WIP --- db/flags.sql | 4 +- migrations/kdvh_test.go | 78 +++++++++++++++ migrations/kvalobs/db/csv.go | 147 +++++++--------------------- migrations/kvalobs/db/labels.go | 3 +- migrations/kvalobs/db/table.go | 32 ++++++ migrations/kvalobs/dump/data.go | 14 ++- migrations/kvalobs/dump/dump.go | 85 ++++++++-------- migrations/kvalobs/dump/table.go | 22 ----- migrations/kvalobs/dump/text.go | 14 ++- migrations/kvalobs/import/data.go | 91 ++++++++++++++++- migrations/kvalobs/import/import.go | 121 +++++++++++++++++++++++ migrations/kvalobs/import/main.go | 7 +- migrations/kvalobs/import/text.go | 66 ++++++------- migrations/kvalobs/kvalobs_test.go | 1 - migrations/kvalobs_test.go | 119 ++++++++++++++++++++++ migrations/lard/main.go | 2 +- migrations/lard/permissions.go | 129 ++++++++++++++++++++++++ migrations/lard/timeseries.go | 2 +- 18 files changed, 710 insertions(+), 227 deletions(-) create mode 100644 migrations/kdvh_test.go create mode 100644 migrations/kvalobs/db/table.go delete mode 100644 migrations/kvalobs/dump/table.go create mode 100644 migrations/kvalobs/import/import.go delete mode 100644 migrations/kvalobs/kvalobs_test.go create mode 100644 migrations/kvalobs_test.go create mode 100644 migrations/lard/permissions.go diff --git a/db/flags.sql b/db/flags.sql index d3d5de2d..432e54b2 100644 --- a/db/flags.sql +++ b/db/flags.sql @@ -7,7 +7,7 @@ CREATE TABLE IF NOT EXISTS flags.kvdata ( corrected REAL NULL, controlinfo TEXT NULL, useinfo TEXT NULL, - cfailed INT4 NULL, + cfailed TEXT NULL, CONSTRAINT unique_kvdata_timeseries_obstime UNIQUE (timeseries, obstime) ); CREATE INDEX IF NOT EXISTS kvdata_obtime_index ON flags.kvdata (obstime); @@ -19,7 +19,7 @@ CREATE TABLE IF NOT EXISTS flags.old_databases ( corrected REAL NULL, controlinfo TEXT NULL, useinfo TEXT NULL, - cfailed INT4 NULL , + cfailed TEXT NULL , CONSTRAINT unique_old_flags_timeseries_obstime UNIQUE (timeseries, obstime) ); CREATE INDEX IF NOT EXISTS old_flags_obtime_index ON flags.old_databases (obstime); diff --git a/migrations/kdvh_test.go b/migrations/kdvh_test.go new file mode 100644 index 00000000..16247b26 --- /dev/null +++ b/migrations/kdvh_test.go @@ -0,0 +1,78 @@ +package main + +import ( + "context" + "fmt" + "log" + "testing" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + + "migrate/kdvh/db" + port "migrate/kdvh/import" + "migrate/kdvh/import/cache" +) + +type KdvhTestCase struct { + table string + station int32 + elem string + permit int32 + expectedRows int64 +} + +func (t *KdvhTestCase) mockConfig() (*port.Config, *cache.Cache) { + return &port.Config{ + Tables: []string{t.table}, + Stations: []string{fmt.Sprint(t.station)}, + Elements: []string{t.elem}, + Path: "./files", + HasHeader: true, + Sep: ";", + }, + &cache.Cache{ + Stinfo: cache.StinfoMap{ + {ElemCode: t.elem, TableName: t.table}: { + Fromtime: time.Date(2001, 7, 1, 9, 0, 0, 0, time.UTC), + IsScalar: true, + }, + }, + StationPermits: cache.StationPermitMap{ + t.station: t.permit, + }, + } +} + +func TestImportKDVH(t *testing.T) { + log.SetFlags(log.LstdFlags | log.Lshortfile) + + pool, err := pgxpool.New(context.TODO(), LARD_STRING) + if err != nil { + t.Log("Could not connect to Lard:", err) + } + defer pool.Close() + + testCases := []KdvhTestCase{ + {table: "T_MDATA", station: 12345, elem: "TA", permit: 0, expectedRows: 0}, // restricted TS + {table: "T_MDATA", station: 12345, elem: "TA", permit: 1, expectedRows: 2644}, // open TS + } + + kdvh := db.Init() + + // TODO: test does not fail, if flags are not inserted + // TODO: bar does not work well with log print outs + for _, c := range testCases { + config, cache := c.mockConfig() + + table, ok := kdvh.Tables[c.table] + if !ok { + t.Fatal("Table does not exist in database") + } + + insertedRows := port.ImportTable(table, cache, pool, config) + if insertedRows != c.expectedRows { + t.Fail() + } + } +} diff --git a/migrations/kvalobs/db/csv.go b/migrations/kvalobs/db/csv.go index 3ef72c1e..177ea100 100644 --- a/migrations/kvalobs/db/csv.go +++ b/migrations/kvalobs/db/csv.go @@ -1,116 +1,35 @@ package db -import ( - "bufio" - "log/slog" - "migrate/lard" - "os" - - "github.com/gocarina/gocsv" -) - -func ReadLabelCSV(filename string) (labels []*KvLabel, err error) { - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - - // TODO: maybe I should preallocate slice size if I can? - err = gocsv.UnmarshalFile(file, &labels) - return labels, err -} - -func ReadDataCSV(tsid int32, filename string) ([][]any, error) { - file, err := os.Open(filename) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - defer file.Close() - - reader := bufio.NewScanner(file) - - // TODO: maybe I should preallocate slice size if I can? - var data [][]any - for reader.Scan() { - var obs lard.DataObs - - err = gocsv.UnmarshalString(reader.Text(), &obs) - if err != nil { - return nil, err - } - - // Kvalobs does not have IDs so we have to add it here - obs.Id = tsid - - row := obs.ToRow() - data = append(data, row) - } - - return data, nil -} - -func ReadTextCSV(tsid int32, filename string) ([][]any, error) { - file, err := os.Open(filename) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - defer file.Close() - - reader := bufio.NewScanner(file) - - // TODO: maybe I should preallocate slice size if I can? - var data [][]any - for reader.Scan() { - var obs lard.TextObs - - err = gocsv.UnmarshalString(reader.Text(), &obs) - if err != nil { - return nil, err - } - - // Kvalobs does not have IDs so we have to add it here - obs.Id = tsid - - row := obs.ToRow() - data = append(data, row) - } - - return data, nil -} - -type Rower interface { - ToRow() []any -} - -func ReadSeriesCSV[T Rower](tsid int32, filename string) ([][]any, error) { - file, err := os.Open(filename) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - defer file.Close() - - reader := bufio.NewScanner(file) - - // TODO: maybe I should preallocate slice size if I can? - var data [][]any - for reader.Scan() { - var obs T - - err = gocsv.UnmarshalString(reader.Text(), &obs) - if err != nil { - return nil, err - } - - // Kvalobs does not have IDs so we have to add it here - // obs.Id = tsid - - row := obs.ToRow() - data = append(data, row) - } - - return data, nil -} +// type Rower interface { +// ToRow() []any +// } +// +// func ReadSeriesCSV[T Rower](tsid int32, filename string) ([][]any, error) { +// file, err := os.Open(filename) +// if err != nil { +// slog.Error(err.Error()) +// return nil, err +// } +// defer file.Close() +// +// reader := bufio.NewScanner(file) +// +// // TODO: maybe I should preallocate slice size if I can? +// var data [][]any +// for reader.Scan() { +// var obs T +// +// err = gocsv.UnmarshalString(reader.Text(), &obs) +// if err != nil { +// return nil, err +// } +// +// // Kvalobs does not have IDs so we have to add it here +// // obs.Id = tsid +// +// row := obs.ToRow() +// data = append(data, row) +// } +// +// return data, nil +// } diff --git a/migrations/kvalobs/db/labels.go b/migrations/kvalobs/db/labels.go index 11189cd5..fff3818b 100644 --- a/migrations/kvalobs/db/labels.go +++ b/migrations/kvalobs/db/labels.go @@ -20,6 +20,8 @@ type Label[T int32 | string] struct { // Can be directly casted to lard.Label type LardLabel = Label[int32] + +// Kvalobs specific type KvLabel = Label[string] func (l *Label[T]) sensorLevelString() (string, string) { @@ -51,7 +53,6 @@ func (l *Label[T]) ToString() string { } func parseFilenameFields(s *string) (*int32, error) { - // TODO: probably there is a better way to do this without defining a gazillion functions if *s == "" { return nil, nil } diff --git a/migrations/kvalobs/db/table.go b/migrations/kvalobs/db/table.go new file mode 100644 index 00000000..f6bdbfcb --- /dev/null +++ b/migrations/kvalobs/db/table.go @@ -0,0 +1,32 @@ +package db + +import ( + "migrate/utils" + + "github.com/jackc/pgx/v5/pgxpool" +) + +// Maps to `data` and `text_data` tables in Kvalobs +type Table[S DataSeries | TextSeries] struct { + // Name string // Name of the table + Path string // Path of the dumped table + DumpLabels LabelDumpFunc // Function that dumps labels from the table + DumpSeries ObsDumpFunc[S] // Function that dumps observations from the table + Import ImportFunc // Function that ingests observations into LARD + ReadCSV ReadCSVFunc // Function that reads dumped CSV files +} + +type DataTable = Table[DataSeries] +type TextTable = Table[TextSeries] + +// Function used to query labels from kvalobs given an optional timespan +type LabelDumpFunc func(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*KvLabel, error) + +// Function used to query timeseries from kvalobs for a specific label +type ObsDumpFunc[S DataSeries | TextSeries] func(label *KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (S, error) + +// Lard Import function +type ImportFunc func(ts [][]any, pool *pgxpool.Pool, logStr string) (int64, error) + +// How to read dumped CSV, returns one array for observations and one for flags +type ReadCSVFunc func(tsid int32, filename string) ([][]any, [][]any, error) diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go index d05a21cb..240ba9cc 100644 --- a/migrations/kvalobs/dump/data.go +++ b/migrations/kvalobs/dump/data.go @@ -3,6 +3,7 @@ package dump import ( "context" "log/slog" + "path/filepath" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" @@ -11,7 +12,16 @@ import ( "migrate/utils" ) -func getDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { +// Returns a DataTable for dump +func DataTable(path string) db.DataTable { + return db.DataTable{ + Path: filepath.Join(path, db.DATA_TABLE_NAME), + DumpLabels: dumpDataLabels, + DumpSeries: dumpDataSeries, + } +} + +func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { // TODO: not sure about the sensor/level conditions, // they should never be NULL since they have default values different from NULL? // TODO: We probably don't even need the join, @@ -43,7 +53,7 @@ func getDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, return labels, nil } -func getDataSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { +func dumpDataSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { // TODO: is the case useful here, we can just check for cfailed = '' in here // query := `SELECT // obstime, diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 715b5416..184bdf31 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -16,7 +16,19 @@ import ( "migrate/utils" ) -func writeLabels(path string, labels []*db.KvLabel) error { +func readLabelCSV(filename string) (labels []*db.KvLabel, err error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + // TODO: maybe I should preallocate slice size if I can? + err = gocsv.UnmarshalFile(file, &labels) + return labels, err +} + +func writeLabelCSV(path string, labels []*db.KvLabel) error { file, err := os.Create(path) if err != nil { return err @@ -30,7 +42,8 @@ func writeLabels(path string, labels []*db.KvLabel) error { return nil } -func writeSeries[S db.DataSeries | db.TextSeries](series S, path string, label *db.KvLabel) error { +// TODO: add number of rows as header row +func writeSeriesCSV[S db.DataSeries | db.TextSeries](series S, path string, label *db.KvLabel) error { filename := filepath.Join(path, label.ToFilename()) file, err := os.Create(filename) if err != nil { @@ -45,35 +58,37 @@ func writeSeries[S db.DataSeries | db.TextSeries](series S, path string, label * return nil } -func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[S], pool *pgxpool.Pool, config *Config) { - var labels []*db.KvLabel +func getLabels[T db.DataSeries | db.TextSeries](table db.Table[T], pool *pgxpool.Pool, config *Config) (labels []*db.KvLabel, err error) { + labelFile := table.Path + "_labels.csv" - timespan := config.TimeSpan() - - labelFile := filepath.Join(path, table.Name+"_labels.csv") if _, err := os.Stat(labelFile); err != nil || config.UpdateLabels { - labels, err = table.LabelFn(timespan, pool) + labels, err = table.DumpLabels(config.TimeSpan(), pool) if err != nil { - slog.Error(err.Error()) - return - } - if err = writeLabels(labelFile, labels); err != nil { - slog.Error(err.Error()) - return - } - } else { - if labels, err = db.ReadLabelCSV(labelFile); err != nil { - slog.Error(err.Error()) - return + return nil, err } + + err = writeLabelCSV(labelFile, labels) + return labels, err } - path = filepath.Join(path, table.Name) - utils.SetLogFile(path, "dump") + return readLabelCSV(labelFile) +} + +func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool.Pool, config *Config) { + var labels []*db.KvLabel + + labels, err := getLabels(table, pool, config) + if err != nil { + slog.Error(err.Error()) + return + } + + timespan := config.TimeSpan() + utils.SetLogFile(table.Path, "dump") // TODO: this bar is a bit deceiving if you don't dump all the labels // Maybe should only cache the ones requested from cli? - bar := utils.NewBar(len(labels), path) + bar := utils.NewBar(len(labels), table.Path) // Used to limit connections to the database semaphore := make(chan struct{}, config.MaxConn) @@ -87,7 +102,7 @@ func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[S], poo continue } - thisPath := filepath.Join(path, fmt.Sprint(label.StationID)) + thisPath := filepath.Join(table.Path, fmt.Sprint(label.StationID)) if thisPath != stationPath { stationPath = thisPath if err := os.MkdirAll(stationPath, os.ModePerm); err != nil { @@ -105,13 +120,13 @@ func dumpTable[S db.DataSeries | db.TextSeries](path string, table Table[S], poo <-semaphore }() - series, err := table.ObsFn(label, timespan, pool) + series, err := table.DumpSeries(label, timespan, pool) if err != nil { slog.Error(err.Error()) return } - if err := writeSeries(series, stationPath, label); err != nil { + if err := writeSeriesCSV(series, stationPath, label); err != nil { slog.Error(err.Error()) return } @@ -138,23 +153,11 @@ func dumpDB(database db.DB, config *Config) { return } - dataTable := Table[db.DataSeries]{ - Name: db.DATA_TABLE_NAME, - LabelFn: getDataLabels, - ObsFn: getDataSeries, - } - - textTable := Table[db.TextSeries]{ - Name: db.TEXT_TABLE_NAME, - LabelFn: getTextLabels, - ObsFn: getTextSeries, - } - - if config.ChosenTable(dataTable.Name) { - dumpTable(path, dataTable, pool, config) + if config.ChosenTable(db.DATA_TABLE_NAME) { + dumpTable(DataTable(path), pool, config) } - if config.ChosenTable(textTable.Name) { - dumpTable(path, textTable, pool, config) + if config.ChosenTable(db.TEXT_TABLE_NAME) { + dumpTable(TextTable(path), pool, config) } } diff --git a/migrations/kvalobs/dump/table.go b/migrations/kvalobs/dump/table.go deleted file mode 100644 index b295cb4a..00000000 --- a/migrations/kvalobs/dump/table.go +++ /dev/null @@ -1,22 +0,0 @@ -package dump - -import ( - "migrate/kvalobs/db" - "migrate/utils" - - "github.com/jackc/pgx/v5/pgxpool" -) - -// Maps to `data` and `text_data` tables in Kvalobs -type Table[S db.DataSeries | db.TextSeries] struct { - Name string // Name of the table - LabelFn LabelDumpFunc // Function that dumps labels from the table - ObsFn ObsDumpFunc[S] // Function that dumps observations from the table - ImportFn func() -} - -// Function used to query labels from kvalobs given an optional timespan -type LabelDumpFunc func(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) - -// Function used to query timeseries from kvalobs for a specific label -type ObsDumpFunc[S db.DataSeries | db.TextSeries] func(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (S, error) diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index 05d6b0cc..74337c1d 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -3,6 +3,7 @@ package dump import ( "context" "log/slog" + "path/filepath" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" @@ -11,7 +12,16 @@ import ( "migrate/utils" ) -func getTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { +// Returns a TextTable for dump +func TextTable(path string) db.TextTable { + return db.TextTable{ + Path: filepath.Join(path, db.TEXT_TABLE_NAME), + DumpLabels: dumpTextLabels, + DumpSeries: dumpTextSeries, + } +} + +func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { // OGquery := `SELECT DISTINCT // stationid, // typeid, @@ -67,7 +77,7 @@ func getTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, return labels, nil } -func getTextSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { +func dumpTextSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { // query := ` // SELECT // obstime, diff --git a/migrations/kvalobs/import/data.go b/migrations/kvalobs/import/data.go index 7add78c1..f9353410 100644 --- a/migrations/kvalobs/import/data.go +++ b/migrations/kvalobs/import/data.go @@ -1,8 +1,97 @@ package port -import "migrate/lard" +import ( + "bufio" + "log/slog" + "migrate/kvalobs/db" + "migrate/lard" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/gocarina/gocsv" +) func readDataFiles() []lard.Label { // TODO: return nil } + +// Returns a DataTable for import +func DataTable(path string) db.DataTable { + return db.DataTable{ + Path: filepath.Join(path, db.DATA_TABLE_NAME), + Import: lard.InsertData, + ReadCSV: ReadDataCSV, + } +} + +func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { + file, err := os.Open(filename) + if err != nil { + slog.Error(err.Error()) + return nil, nil, err + } + defer file.Close() + + reader := bufio.NewScanner(file) + + // TODO: maybe I should preallocate slice size if I can? + // Parse header + // reader.Scan() + // rowCount, _ = strconv.Atoi(scanner.Text()) + // data := make([][]any, 0, rowCount) + // flags := make([][]any, 0, rowCount) + var data [][]any + var flags [][]any + + for reader.Scan() { + // obstime, original, tbtime, corrected, Controlinfo, Useinfo, Cfailed + fields := strings.Split(reader.Text(), ",") + + obstime, err := time.Parse(time.RFC3339Nano, fields[0]) + if err != nil { + return nil, nil, err + } + + obsvalue64, err := strconv.ParseFloat(fields[1], 32) + if err != nil { + return nil, nil, err + } + + corrected64, err := strconv.ParseFloat(fields[1], 32) + if err != nil { + return nil, nil, err + } + + obsvalue := float32(obsvalue64) + corrected := float32(corrected64) + + lardObs := lard.DataObs{ + Id: tsid, + Obstime: obstime, + Data: &obsvalue, + } + + var cfailed *string = nil + if fields[6] != "" { + cfailed = &fields[6] + } + + flag := lard.Flag{ + Id: tsid, + Obstime: obstime, + Corrected: &corrected, + Controlinfo: &fields[4], + Useinfo: &fields[5], + Cfailed: cfailed, + } + + data = append(data, lardObs.ToRow()) + flags = append(flags, flag.ToRow()) + } + + return data, flags, nil +} diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go new file mode 100644 index 00000000..838510f9 --- /dev/null +++ b/migrations/kvalobs/import/import.go @@ -0,0 +1,121 @@ +package port + +import ( + "fmt" + "log" + "log/slog" + "migrate/kvalobs/db" + "migrate/lard" + "migrate/utils" + "os" + "path/filepath" + "sync" + "time" + + "github.com/jackc/pgx/v5/pgxpool" +) + +func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *lard.PermitMaps, pool *pgxpool.Pool, config *Config) (int64, error) { + stations, err := os.ReadDir(table.Path) + if err != nil { + slog.Error(err.Error()) + return 0, err + } + fmt.Println(stations) + + var rowsInserted int64 + for _, station := range stations { + stationDir := filepath.Join(table.Path, station.Name()) + labels, err := os.ReadDir(stationDir) + if err != nil { + slog.Warn(err.Error()) + continue + } + + var wg sync.WaitGroup + + bar := utils.NewBar(len(labels), station.Name()) + for _, file := range labels { + bar.Add(1) + + label, err := db.LabelFromFilename(file.Name()) + if err != nil { + slog.Error(err.Error()) + continue + } + + if !config.ShouldProcessLabel(label) { + continue + } + + labelStr := label.ToString() + + // Check if data for this station/element is restricted + if !permits.TimeseriesIsOpen(label.StationID, label.TypeID, label.ParamID) { + // TODO: eventually use this to choose which table to use on insert + slog.Warn(labelStr + "Timeseries data is restricted") + continue + } + + wg.Add(1) + go func() { + defer wg.Done() + + // FIXME: FromTime can be nil and anyway config.FromTime is wrong here! + lardLabel := lard.Label(*label) + // tsid, err := lard.GetTimeseriesID(&lardLabel, config.FromTime.Inner(), pool) + tsid, err := lard.GetTimeseriesID(&lardLabel, time.Now(), pool) + if err != nil { + slog.Error(err.Error()) + return + } + + ts, flags, err := table.ReadCSV(tsid, filepath.Join(stationDir, file.Name())) + if err != nil { + slog.Error(err.Error()) + return + } + + count, err := table.Import(ts, pool, labelStr) + if err != nil { + slog.Error("Failed bulk insertion: " + err.Error()) + return + } + + if err := lard.InsertFlags(flags, pool, labelStr); err != nil { + slog.Error(labelStr + "failed flag bulk insertion - " + err.Error()) + } + + rowsInserted += count + }() + } + wg.Wait() + } + + outputStr := fmt.Sprintf("%v: %v total rows inserted", table.Path, rowsInserted) + slog.Info(outputStr) + fmt.Println(outputStr) + + log.SetOutput(os.Stdout) + return rowsInserted, nil +} + +// TODO: here we trust that kvalobs and stinfosys have the same +// non scalar parameters, which might not be the case +func ImportDB(database db.DB, permits *lard.PermitMaps, pool *pgxpool.Pool, config *Config) { + path := filepath.Join(config.Path, database.Name) + + if config.ChosenTable(db.DATA_TABLE_NAME) { + table := DataTable(path) + utils.SetLogFile(table.Path, "import") + + ImportTable(table, permits, pool, config) + } + + if config.ChosenTable(db.TEXT_TABLE_NAME) { + table := TextTable(path) + utils.SetLogFile(table.Path, "import") + + ImportTable(table, permits, pool, config) + } +} diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go index 3ad4beca..2debd471 100644 --- a/migrations/kvalobs/import/main.go +++ b/migrations/kvalobs/import/main.go @@ -15,10 +15,11 @@ import ( type Config struct { db.BaseConfig[int32] - Ts []int32 `help:"Optional space separated list of timeseries."` } func (config *Config) Execute() error { + permits := lard.NewPermitTables() + pool, err := pgxpool.New(context.Background(), os.Getenv(lard.LARD_ENV_VAR)) if err != nil { slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) @@ -28,11 +29,11 @@ func (config *Config) Execute() error { kvalobs, histkvalobs := db.InitDBs() if config.ChosenDB(kvalobs.Name) { - // dumpDB(kvalobs, dataTable, textTable, config) + ImportDB(kvalobs, permits, pool, config) } if config.ChosenDB(histkvalobs.Name) { - // dumpDB(histkvalobs, dataTable, textTable, config) + ImportDB(histkvalobs, permits, pool, config) } return nil diff --git a/migrations/kvalobs/import/text.go b/migrations/kvalobs/import/text.go index e3d2e3a1..2a039aec 100644 --- a/migrations/kvalobs/import/text.go +++ b/migrations/kvalobs/import/text.go @@ -1,60 +1,54 @@ package port import ( + "bufio" "log/slog" "migrate/kvalobs/db" "migrate/lard" - "migrate/utils" "os" + "path/filepath" - "github.com/jackc/pgx/v5/pgxpool" + "github.com/gocarina/gocsv" ) -func (config *Config) ImportText(pool *pgxpool.Pool, path string) error { - dir, err := os.ReadDir(path) +// Returns a TextTable for import +func TextTable(path string) db.TextTable { + return db.TextTable{ + Path: filepath.Join(path, db.TEXT_TABLE_NAME), + Import: lard.InsertTextData, + ReadCSV: ReadTextCSV, + } +} + +func ReadTextCSV(tsid int32, filename string) ([][]any, [][]any, error) { + file, err := os.Open(filename) if err != nil { slog.Error(err.Error()) - return err + return nil, nil, err } + defer file.Close() - var totalRowsInserted int64 - for _, file := range dir { - label, err := db.LabelFromFilename(file.Name()) - if err != nil { - slog.Error(err.Error()) - continue - } - - if !config.ShouldProcessLabel(label) { - continue - } - - // FIXME: FromTime can be nil and anyway config.FromTime is wrong here! - lardLabel := lard.Label(*label) - tsid, err := lard.GetTimeseriesID(&lardLabel, config.FromTime.Inner(), pool) - if err != nil { - slog.Error(err.Error()) - continue - } + reader := bufio.NewScanner(file) - if !utils.IsEmptyOrContains(config.Ts, tsid) { - continue - } + // TODO: maybe I should preallocate slice size if I can? + var data [][]any + for reader.Scan() { + var kvObs db.TextObs - data, err := db.ReadTextCSV(tsid, file.Name()) + err = gocsv.UnmarshalString(reader.Text(), &kvObs) if err != nil { - slog.Error(err.Error()) - continue + return nil, nil, err } - count, err := lard.InsertTextData(data, pool, "") - if err != nil { - slog.Error("Failed bulk insertion: " + err.Error()) - continue + lardObs := lard.TextObs{ + Id: tsid, + Obstime: kvObs.Obstime, + Text: &kvObs.Original, } - totalRowsInserted += count + data = append(data, lardObs.ToRow()) } - return nil + // Text obs are not flagged + return data, nil, nil } diff --git a/migrations/kvalobs/kvalobs_test.go b/migrations/kvalobs/kvalobs_test.go deleted file mode 100644 index 20fa279e..00000000 --- a/migrations/kvalobs/kvalobs_test.go +++ /dev/null @@ -1 +0,0 @@ -package kvalobs diff --git a/migrations/kvalobs_test.go b/migrations/kvalobs_test.go new file mode 100644 index 00000000..ee0c01ee --- /dev/null +++ b/migrations/kvalobs_test.go @@ -0,0 +1,119 @@ +package main + +import ( + "context" + "log" + "path/filepath" + "testing" + + "github.com/jackc/pgx/v5/pgxpool" + + "migrate/kvalobs/db" + port "migrate/kvalobs/import" + "migrate/lard" +) + +const LARD_STRING string = "host=localhost user=postgres dbname=postgres password=postgres" +const DUMPS_PATH string = "./files" + +type KvalobsTestCase struct { + db db.DB + station int32 + paramid int32 + typeid int32 + sensor *int32 + level *int32 + permit int32 + expectedRows int64 +} + +func (t *KvalobsTestCase) mockConfig() (*port.Config, *lard.PermitMaps) { + return &port.Config{ + BaseConfig: db.BaseConfig[int32]{ + Stations: []int32{t.station}, + }, + }, &lard.PermitMaps{ + StationPermits: lard.StationPermitMap{ + t.station: t.permit, + }, + } +} + +type KvalobsDataCase struct { + KvalobsTestCase + table db.DataTable +} + +func DataCase(ktc KvalobsTestCase) KvalobsDataCase { + path := filepath.Join(DUMPS_PATH, ktc.db.Name) + return KvalobsDataCase{ktc, port.DataTable(path)} +} + +func TestImportDataKvalobs(t *testing.T) { + log.SetFlags(log.LstdFlags | log.Lshortfile) + + pool, err := pgxpool.New(context.TODO(), LARD_STRING) + if err != nil { + t.Log("Could not connect to Lard:", err) + } + defer pool.Close() + + _, histkvalobs := db.InitDBs() + + cases := []KvalobsDataCase{ + DataCase(KvalobsTestCase{db: histkvalobs, station: 18700, permit: 1, expectedRows: 100}), + // DataCase(KvalobsTestCase{db: histkvalobs, station: 18700, permit: 0, expectedRows: 100}), + } + + for _, c := range cases { + config, permits := c.mockConfig() + insertedRows, err := port.ImportTable(c.table, permits, pool, config) + + switch { + case err != nil: + t.Fatal(err) + case insertedRows != c.expectedRows: + t.Log(insertedRows) + // t.Fail() + } + } +} + +// type KvalobsTextCase struct { +// KvalobsTestCase +// table db.TextTable +// } +// +// func TextCase(ktc KvalobsTestCase) KvalobsTextCase { +// path := filepath.Join(DUMPS_PATH, ktc.db.Name) +// return KvalobsTextCase{ktc, port.TextTable(path)} +// } +// +// func TestImportTextKvalobs(t *testing.T) { +// log.SetFlags(log.LstdFlags | log.Lshortfile) +// +// pool, err := pgxpool.New(context.TODO(), LARD_STRING) +// if err != nil { +// t.Log("Could not connect to Lard:", err) +// } +// defer pool.Close() +// +// kvalobs, histkvalobs := db.InitDBs() +// +// cases := []KvalobsTextCase{ +// TextCase(KvalobsTestCase{db: kvalobs, station: 18700, paramid: 212, permit: 0, expectedRows: 100}), +// TextCase(KvalobsTestCase{db: histkvalobs, station: 18700, paramid: 212, permit: 0, expectedRows: 100}), +// } +// +// for _, c := range cases { +// config, permits := c.mockConfig() +// insertedRows, err := port.ImportTable(c.table, permits, pool, config) +// +// switch { +// case err != nil: +// t.Fatal(err) +// case insertedRows != c.expectedRows: +// t.Fail() +// } +// } +// } diff --git a/migrations/lard/main.go b/migrations/lard/main.go index c9477af9..fe5fe558 100644 --- a/migrations/lard/main.go +++ b/migrations/lard/main.go @@ -45,7 +45,7 @@ type Flag struct { // Flag encoding quality control status Useinfo *string // Number of tests that failed? - Cfailed *int32 + Cfailed *string } func (o *Flag) ToRow() []any { diff --git a/migrations/lard/permissions.go b/migrations/lard/permissions.go new file mode 100644 index 00000000..181a20f1 --- /dev/null +++ b/migrations/lard/permissions.go @@ -0,0 +1,129 @@ +package lard + +import ( + "context" + "log/slog" + "os" + "time" + + "github.com/jackc/pgx/v5" +) + +const STINFO_ENV_VAR string = "STINFO_STRING" + +type StationId = int32 +type PermitId = int32 + +type ParamPermitMap map[StationId][]ParamPermit +type StationPermitMap map[StationId]PermitId + +type ParamPermit struct { + TypeId int32 + ParamdId int32 + PermitId int32 +} + +type PermitMaps struct { + ParamPermits ParamPermitMap + StationPermits StationPermitMap +} + +func NewPermitTables() *PermitMaps { + slog.Info("Connecting to Stinfosys to cache metadata") + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + conn, err := pgx.Connect(ctx, os.Getenv(STINFO_ENV_VAR)) + if err != nil { + slog.Error("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) + os.Exit(1) + } + + return &PermitMaps{ + ParamPermits: cacheParamPermits(conn), + StationPermits: cacheStationPermits(conn), + } +} + +func cacheParamPermits(conn *pgx.Conn) ParamPermitMap { + cache := make(ParamPermitMap) + + rows, err := conn.Query( + context.TODO(), + "SELECT stationid, message_formatid, paramid, permitid FROM v_station_param_policy", + ) + if err != nil { + slog.Error(err.Error()) + os.Exit(1) + } + + for rows.Next() { + var stnr StationId + var permit ParamPermit + + if err := rows.Scan(&stnr, &permit.TypeId, &permit.ParamdId, &permit.PermitId); err != nil { + slog.Error(err.Error()) + os.Exit(1) + } + + cache[stnr] = append(cache[stnr], permit) + } + + if rows.Err() != nil { + slog.Error(rows.Err().Error()) + os.Exit(1) + } + + return cache +} + +func cacheStationPermits(conn *pgx.Conn) StationPermitMap { + cache := make(StationPermitMap) + + rows, err := conn.Query( + context.TODO(), + "SELECT stationid, permitid FROM station_policy", + ) + if err != nil { + slog.Error(err.Error()) + os.Exit(1) + } + + for rows.Next() { + var stnr StationId + var permit PermitId + + if err := rows.Scan(&stnr, &permit); err != nil { + slog.Error(err.Error()) + os.Exit(1) + } + + cache[stnr] = permit + } + + if rows.Err() != nil { + slog.Error(rows.Err().Error()) + os.Exit(1) + } + + return cache +} + +func (c *PermitMaps) TimeseriesIsOpen(stnr, typeid, paramid int32) bool { + // First check param permit table + if permits, ok := c.ParamPermits[stnr]; ok { + for _, permit := range permits { + if (permit.TypeId == 0 || permit.TypeId == typeid) && + (permit.ParamdId == 0 || permit.ParamdId == paramid) { + return permit.PermitId == 1 + } + } + } + + // Otherwise check station permit table + if permit, ok := c.StationPermits[stnr]; ok { + return permit == 1 + } + + return false +} diff --git a/migrations/lard/timeseries.go b/migrations/lard/timeseries.go index a67f3828..73f24b2a 100644 --- a/migrations/lard/timeseries.go +++ b/migrations/lard/timeseries.go @@ -16,7 +16,7 @@ type Label struct { Level *int32 } -func GetTimeseriesID(label *Label, fromtime *time.Time, pool *pgxpool.Pool) (tsid int32, err error) { +func GetTimeseriesID(label *Label, fromtime time.Time, pool *pgxpool.Pool) (tsid int32, err error) { // Query LARD labels table err = pool.QueryRow( context.TODO(), From 8b385066f1b7dc812ec1e661feb0322623a91459 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Fri, 29 Nov 2024 12:31:49 +0100 Subject: [PATCH 23/67] Kvalobs import working commit --- migrations/kdvh/import/cache/main.go | 3 +- migrations/kdvh/kdvh_test.go | 80 - migrations/kvalobs/{ => db}/config_test.go | 30 +- migrations/kvalobs/db/csv.go | 35 - migrations/kvalobs/db/labels.go | 18 +- migrations/kvalobs/db/main.go | 2 +- migrations/kvalobs/dump/dump.go | 12 +- migrations/kvalobs/import/data.go | 26 +- migrations/kvalobs/import/import.go | 14 +- migrations/kvalobs/import/text.go | 25 +- migrations/lard/timeseries.go | 2 +- .../tests/files/T_MDATA_combined/12345/TA.csv | 2645 +++++++++++++++++ .../data/18700/18700_313_509_0_0.csv | 41 + .../text/18700/18700_1000_316__.csv | 184 ++ migrations/{ => tests}/kdvh_test.go | 0 migrations/{ => tests}/kvalobs_test.go | 83 +- 16 files changed, 2983 insertions(+), 217 deletions(-) delete mode 100644 migrations/kdvh/kdvh_test.go rename migrations/kvalobs/{ => db}/config_test.go (50%) delete mode 100644 migrations/kvalobs/db/csv.go create mode 100644 migrations/tests/files/T_MDATA_combined/12345/TA.csv create mode 100644 migrations/tests/files/histkvalobs/data/18700/18700_313_509_0_0.csv create mode 100644 migrations/tests/files/histkvalobs/text/18700/18700_1000_316__.csv rename migrations/{ => tests}/kdvh_test.go (100%) rename migrations/{ => tests}/kvalobs_test.go (53%) diff --git a/migrations/kdvh/import/cache/main.go b/migrations/kdvh/import/cache/main.go index e69a9957..83b5714b 100644 --- a/migrations/kdvh/import/cache/main.go +++ b/migrations/kdvh/import/cache/main.go @@ -38,6 +38,7 @@ func CacheMetadata(tables, stations, elements []string, kdvh *db.KDVH) *Cache { } stinfoMeta := cacheStinfoMeta(tables, elements, kdvh, conn) + // TODO: use the one in migrate/lard instead! stationPermits := cacheStationPermits(conn) paramPermits := cacheParamPermits(conn) @@ -98,7 +99,7 @@ func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpoo Level: param.Hlevel, } - tsid, err := lard.GetTimeseriesID(&label, ¶m.Fromtime, pool) + tsid, err := lard.GetTimeseriesID(&label, param.Fromtime, pool) if err != nil { slog.Error(logstr + "could not obtain timeseries - " + err.Error()) return nil, err diff --git a/migrations/kdvh/kdvh_test.go b/migrations/kdvh/kdvh_test.go deleted file mode 100644 index fa1b9be9..00000000 --- a/migrations/kdvh/kdvh_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package kdvh - -import ( - "context" - "fmt" - "log" - "testing" - "time" - - "github.com/jackc/pgx/v5/pgxpool" - - "migrate/kdvh/db" - port "migrate/kdvh/import" - "migrate/kdvh/import/cache" -) - -const LARD_STRING string = "host=localhost user=postgres dbname=postgres password=postgres" - -type ImportTest struct { - table string - station int32 - elem string - permit int32 - expectedRows int64 -} - -func (t *ImportTest) mockConfig() (*port.Config, *cache.Cache) { - return &port.Config{ - Tables: []string{t.table}, - Stations: []string{fmt.Sprint(t.station)}, - Elements: []string{t.elem}, - Path: "./tests", - HasHeader: true, - Sep: ";", - }, - &cache.Cache{ - Stinfo: cache.StinfoMap{ - {ElemCode: t.elem, TableName: t.table}: { - Fromtime: time.Date(2001, 7, 1, 9, 0, 0, 0, time.UTC), - IsScalar: true, - }, - }, - StationPermits: cache.StationPermitMap{ - t.station: t.permit, - }, - } -} - -func TestImportKDVH(t *testing.T) { - log.SetFlags(log.LstdFlags | log.Lshortfile) - - pool, err := pgxpool.New(context.TODO(), LARD_STRING) - if err != nil { - t.Log("Could not connect to Lard:", err) - } - defer pool.Close() - - testCases := []ImportTest{ - {table: "T_MDATA", station: 12345, elem: "TA", permit: 0, expectedRows: 0}, // restricted TS - {table: "T_MDATA", station: 12345, elem: "TA", permit: 1, expectedRows: 2644}, // open TS - } - - kdvh := db.Init() - - // TODO: test does not fail, if flags are not inserted - // TODO: bar does not work well with log print outs - for _, c := range testCases { - config, cache := c.mockConfig() - - table, ok := kdvh.Tables[c.table] - if !ok { - t.Fatal("Table does not exist in database") - } - - insertedRows := port.ImportTable(table, cache, pool, config) - if insertedRows != c.expectedRows { - t.Fail() - } - } -} diff --git a/migrations/kvalobs/config_test.go b/migrations/kvalobs/db/config_test.go similarity index 50% rename from migrations/kvalobs/config_test.go rename to migrations/kvalobs/db/config_test.go index 5fa65d34..a2ec2477 100644 --- a/migrations/kvalobs/config_test.go +++ b/migrations/kvalobs/db/config_test.go @@ -1,51 +1,49 @@ -package kvalobs +package db import ( "testing" - - "migrate/kvalobs/db" ) func TestShouldProcessLabel(t *testing.T) { type TestCase[T string] struct { tag string - label db.Label[T] - config db.BaseConfig[T] + label Label[T] + config BaseConfig[T] expected bool } cases := []TestCase[string]{ { tag: "empty config", - label: db.Label[string]{StationID: 18700}, - config: db.BaseConfig[string]{}, + label: Label[string]{StationID: 18700}, + config: BaseConfig[string]{}, expected: true, }, { tag: "station specified", - label: db.Label[string]{StationID: 18700}, - config: db.BaseConfig[string]{Stations: []int32{18700}}, + label: Label[string]{StationID: 18700}, + config: BaseConfig[string]{Stations: []int32{18700}}, expected: true, }, { tag: "station not in label", - label: db.Label[string]{StationID: 18700}, - config: db.BaseConfig[string]{Stations: []int32{20000}}, + label: Label[string]{StationID: 18700}, + config: BaseConfig[string]{Stations: []int32{20000}}, expected: false, }, { tag: "label without level", - label: db.Label[string]{}, - config: db.BaseConfig[string]{Levels: []int32{2}}, + label: Label[string]{}, + config: BaseConfig[string]{Levels: []int32{2}}, expected: false, }, { tag: "valid level", - label: func() db.Label[string] { + label: func() Label[string] { var level int32 = 2 - return db.Label[string]{Level: &level} + return Label[string]{Level: &level} }(), - config: db.BaseConfig[string]{Levels: []int32{2}}, + config: BaseConfig[string]{Levels: []int32{2}}, expected: true, }, } diff --git a/migrations/kvalobs/db/csv.go b/migrations/kvalobs/db/csv.go deleted file mode 100644 index 177ea100..00000000 --- a/migrations/kvalobs/db/csv.go +++ /dev/null @@ -1,35 +0,0 @@ -package db - -// type Rower interface { -// ToRow() []any -// } -// -// func ReadSeriesCSV[T Rower](tsid int32, filename string) ([][]any, error) { -// file, err := os.Open(filename) -// if err != nil { -// slog.Error(err.Error()) -// return nil, err -// } -// defer file.Close() -// -// reader := bufio.NewScanner(file) -// -// // TODO: maybe I should preallocate slice size if I can? -// var data [][]any -// for reader.Scan() { -// var obs T -// -// err = gocsv.UnmarshalString(reader.Text(), &obs) -// if err != nil { -// return nil, err -// } -// -// // Kvalobs does not have IDs so we have to add it here -// // obs.Id = tsid -// -// row := obs.ToRow() -// data = append(data, row) -// } -// -// return data, nil -// } diff --git a/migrations/kvalobs/db/labels.go b/migrations/kvalobs/db/labels.go index fff3818b..814dc7f2 100644 --- a/migrations/kvalobs/db/labels.go +++ b/migrations/kvalobs/db/labels.go @@ -11,8 +11,8 @@ import ( // Kvalobs specific label type Label[T int32 | string] struct { StationID int32 - TypeID int32 ParamID int32 + TypeID int32 // These two are not present in the `text_data` tabl Sensor *T // bpchar(1) in `data` table Level *int32 @@ -37,18 +37,14 @@ func (l *Label[T]) sensorLevelString() (string, string) { func (l *Label[T]) ToFilename() string { sensor, level := l.sensorLevelString() - return fmt.Sprintf("%v_%v_%v_%v_%v.csv", l.StationID, l.TypeID, l.ParamID, sensor, level) + return fmt.Sprintf("%v_%v_%v_%v_%v.csv", l.StationID, l.ParamID, l.ParamID, sensor, level) } -func (l *Label[T]) ToString() string { +func (l *Label[T]) LogStr() string { sensor, level := l.sensorLevelString() return fmt.Sprintf( - "%v - %v - %v - %v - %v", - l.StationID, - l.ParamID, - l.TypeID, - sensor, - level, + "(%v - %v - %v - %v - %v): ", + l.StationID, l.ParamID, l.TypeID, sensor, level, ) } @@ -85,8 +81,8 @@ func LabelFromFilename(filename string) (*LardLabel, error) { return &LardLabel{ StationID: *converted[0], - TypeID: *converted[1], - ParamID: *converted[2], + ParamID: *converted[1], + TypeID: *converted[2], Sensor: converted[3], Level: converted[4], }, nil diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go index bc9909c8..09282dc8 100644 --- a/migrations/kvalobs/db/main.go +++ b/migrations/kvalobs/db/main.go @@ -86,7 +86,7 @@ import ( // instead there is a sequential ID associated with each observation row const DATA_TABLE_NAME string = "data" -const TEXT_TABLE_NAME string = "text" +const TEXT_TABLE_NAME string = "text" // text_data var NULL_VALUES []float64 = []float64{-34767, -34766} diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 184bdf31..c90d4e29 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -23,8 +23,7 @@ func readLabelCSV(filename string) (labels []*db.KvLabel, err error) { } defer file.Close() - // TODO: maybe I should preallocate slice size if I can? - err = gocsv.UnmarshalFile(file, &labels) + err = gocsv.Unmarshal(file, &labels) return labels, err } @@ -35,6 +34,8 @@ func writeLabelCSV(path string, labels []*db.KvLabel) error { } slog.Info("Writing timeseries labels to " + path) + // Write number of lines as header + // file.Write([]byte(fmt.Sprintf("%v\n", len(labels)))) if err = gocsv.Marshal(labels, file); err != nil { return err } @@ -42,7 +43,6 @@ func writeLabelCSV(path string, labels []*db.KvLabel) error { return nil } -// TODO: add number of rows as header row func writeSeriesCSV[S db.DataSeries | db.TextSeries](series S, path string, label *db.KvLabel) error { filename := filepath.Join(path, label.ToFilename()) file, err := os.Create(filename) @@ -50,7 +50,9 @@ func writeSeriesCSV[S db.DataSeries | db.TextSeries](series S, path string, labe return err } - if err = gocsv.MarshalFile(series, file); err != nil { + // Write number of lines on first line, keep headers on 2nd line + file.Write([]byte(fmt.Sprintf("%v\n", len(series)))) + if err = gocsv.Marshal(series, file); err != nil { slog.Error(err.Error()) return err } @@ -131,7 +133,7 @@ func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool return } - slog.Info(label.ToString() + ": dumped successfully") + slog.Info(label.LogStr() + "dumped successfully") }() } wg.Wait() diff --git a/migrations/kvalobs/import/data.go b/migrations/kvalobs/import/data.go index f9353410..1f603fc9 100644 --- a/migrations/kvalobs/import/data.go +++ b/migrations/kvalobs/import/data.go @@ -10,8 +10,6 @@ import ( "strconv" "strings" "time" - - "github.com/gocarina/gocsv" ) func readDataFiles() []lard.Label { @@ -38,24 +36,28 @@ func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { reader := bufio.NewScanner(file) - // TODO: maybe I should preallocate slice size if I can? - // Parse header - // reader.Scan() - // rowCount, _ = strconv.Atoi(scanner.Text()) - // data := make([][]any, 0, rowCount) - // flags := make([][]any, 0, rowCount) - var data [][]any - var flags [][]any + // Parse number of rows + reader.Scan() + rowCount, _ := strconv.Atoi(reader.Text()) + + // Skip header + reader.Scan() + // Parse observations + data := make([][]any, 0, rowCount) + flags := make([][]any, 0, rowCount) for reader.Scan() { - // obstime, original, tbtime, corrected, Controlinfo, Useinfo, Cfailed + // obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed + // We don't parse tbtime fields := strings.Split(reader.Text(), ",") - obstime, err := time.Parse(time.RFC3339Nano, fields[0]) + obstime, err := time.Parse(time.RFC3339, fields[0]) if err != nil { return nil, nil, err } + // TODO: probably should insert corrected to data table + // and keep original in flags table? obsvalue64, err := strconv.ParseFloat(fields[1], 32) if err != nil { return nil, nil, err diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 838510f9..ff80582c 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -34,6 +34,8 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la var wg sync.WaitGroup + var stationRows int64 + bar := utils.NewBar(len(labels), station.Name()) for _, file := range labels { bar.Add(1) @@ -48,12 +50,12 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la continue } - labelStr := label.ToString() + labelStr := label.LogStr() // Check if data for this station/element is restricted if !permits.TimeseriesIsOpen(label.StationID, label.TypeID, label.ParamID) { // TODO: eventually use this to choose which table to use on insert - slog.Warn(labelStr + "Timeseries data is restricted") + slog.Warn(labelStr + "timeseries data is restricted, skipping") continue } @@ -61,9 +63,9 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la go func() { defer wg.Done() - // FIXME: FromTime can be nil and anyway config.FromTime is wrong here! lardLabel := lard.Label(*label) - // tsid, err := lard.GetTimeseriesID(&lardLabel, config.FromTime.Inner(), pool) + // TODO: figure out if we should (0, 0) sensor level pair to (NULL, NULL) + // TODO: figure where to get fromtime, kvalobs directly? Stinfosys? tsid, err := lard.GetTimeseriesID(&lardLabel, time.Now(), pool) if err != nil { slog.Error(err.Error()) @@ -86,10 +88,12 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la slog.Error(labelStr + "failed flag bulk insertion - " + err.Error()) } - rowsInserted += count + stationRows += count }() } wg.Wait() + rowsInserted += stationRows + slog.Info(fmt.Sprintf("Station %v: %v rows inserted", station.Name(), stationRows)) } outputStr := fmt.Sprintf("%v: %v total rows inserted", table.Path, rowsInserted) diff --git a/migrations/kvalobs/import/text.go b/migrations/kvalobs/import/text.go index 2a039aec..941e0e47 100644 --- a/migrations/kvalobs/import/text.go +++ b/migrations/kvalobs/import/text.go @@ -7,8 +7,9 @@ import ( "migrate/lard" "os" "path/filepath" - - "github.com/gocarina/gocsv" + "strconv" + "strings" + "time" ) // Returns a TextTable for import @@ -30,20 +31,28 @@ func ReadTextCSV(tsid int32, filename string) ([][]any, [][]any, error) { reader := bufio.NewScanner(file) - // TODO: maybe I should preallocate slice size if I can? - var data [][]any + // Parse number of rows + reader.Scan() + rowCount, _ := strconv.Atoi(reader.Text()) + + // Skip header + reader.Scan() + + // Parse observations + data := make([][]any, 0, rowCount) for reader.Scan() { - var kvObs db.TextObs + // obstime, original, tbtime + fields := strings.Split(reader.Text(), ",") - err = gocsv.UnmarshalString(reader.Text(), &kvObs) + obstime, err := time.Parse(time.RFC3339, fields[0]) if err != nil { return nil, nil, err } lardObs := lard.TextObs{ Id: tsid, - Obstime: kvObs.Obstime, - Text: &kvObs.Original, + Obstime: obstime, + Text: &fields[1], } data = append(data, lardObs.ToRow()) diff --git a/migrations/lard/timeseries.go b/migrations/lard/timeseries.go index 73f24b2a..490d3c02 100644 --- a/migrations/lard/timeseries.go +++ b/migrations/lard/timeseries.go @@ -10,8 +10,8 @@ import ( // Struct that mimics `labels.met` table structure type Label struct { StationID int32 - TypeID int32 ParamID int32 + TypeID int32 Sensor *int32 Level *int32 } diff --git a/migrations/tests/files/T_MDATA_combined/12345/TA.csv b/migrations/tests/files/T_MDATA_combined/12345/TA.csv new file mode 100644 index 00000000..dd6cb263 --- /dev/null +++ b/migrations/tests/files/T_MDATA_combined/12345/TA.csv @@ -0,0 +1,2645 @@ +2644 +2001-07-01_09:00:00;12.9;70000 +2001-07-01_10:00:00;13;70000 +2001-07-01_11:00:00;13;70000 +2001-07-01_12:00:00;13.1;70000 +2001-07-01_13:00:00;13.1;70000 +2001-07-01_14:00:00;13;70000 +2001-07-01_15:00:00;12.9;70000 +2001-07-01_16:00:00;12.8;70000 +2001-07-01_17:00:00;12.8;70000 +2001-07-01_18:00:00;12.7;70000 +2001-07-01_19:00:00;12.8;70000 +2001-07-01_20:00:00;12.6;70000 +2001-07-01_21:00:00;12.6;70000 +2001-07-01_22:00:00;12.6;70000 +2001-07-01_23:00:00;12.6;70000 +2001-07-02_00:00:00;12.5;70000 +2001-07-02_01:00:00;12.4;70000 +2001-07-02_02:00:00;12.4;70000 +2001-07-02_03:00:00;12.3;70000 +2001-07-02_04:00:00;12.3;70000 +2001-07-02_05:00:00;12.3;70000 +2001-07-02_06:00:00;12.4;70000 +2001-07-02_07:00:00;12.5;70000 +2001-07-02_08:00:00;12.6;70000 +2001-07-02_09:00:00;12.7;70000 +2001-07-02_10:00:00;12.9;70000 +2001-07-02_11:00:00;13;70000 +2001-07-02_12:00:00;13.2;70000 +2001-07-02_13:00:00;13.3;70000 +2001-07-02_14:00:00;13.3;70000 +2001-07-02_15:00:00;13.4;70000 +2001-07-02_16:00:00;13.3;70000 +2001-07-02_17:00:00;13.3;70000 +2001-07-02_18:00:00;13.2;70000 +2001-07-02_19:00:00;13.2;70000 +2001-07-02_20:00:00;13.1;70000 +2001-07-02_21:00:00;12.9;70000 +2001-07-02_22:00:00;12.9;70000 +2001-07-02_23:00:00;12.9;70000 +2001-07-03_00:00:00;12.8;58927 +2001-07-03_01:00:00;12.7;70000 +2001-07-03_02:00:00;12.6;70000 +2001-07-03_03:00:00;12.7;70000 +2001-07-03_04:00:00;12.5;70000 +2001-07-03_05:00:00;12.2;70000 +2001-07-03_06:00:00;12.3;70000 +2001-07-03_07:00:00;12.4;70000 +2001-07-03_08:00:00;12.5;70000 +2001-07-03_09:00:00;12.6;70000 +2001-07-03_10:00:00;12.6;70000 +2001-07-03_11:00:00;12.8;70000 +2001-07-03_12:00:00;12.8;70000 +2001-07-03_13:00:00;13;70000 +2001-07-03_14:00:00;13.1;70000 +2001-07-03_15:00:00;13.2;70000 +2001-07-03_16:00:00;13.2;70000 +2001-07-03_17:00:00;13.1;70000 +2001-07-03_18:00:00;13.1;70000 +2001-07-03_19:00:00;13.1;70000 +2001-07-03_20:00:00;12.8;70000 +2001-07-03_21:00:00;12.8;70000 +2001-07-03_22:00:00;12.8;70000 +2001-07-03_23:00:00;12.8;70000 +2001-07-04_00:00:00;12.6;70000 +2001-07-04_01:00:00;12.8;70000 +2001-07-04_02:00:00;12.3;70000 +2001-07-04_03:00:00;12.6;70000 +2001-07-04_04:00:00;12.5;70000 +2001-07-04_05:00:00;12.5;70000 +2001-07-04_06:00:00;12.5;70000 +2001-07-04_07:00:00;12.5;70000 +2001-07-04_08:00:00;12.4;70000 +2001-07-04_09:00:00;12.5;70000 +2001-07-04_10:00:00;12.6;70000 +2001-07-04_11:00:00;12.6;70000 +2001-07-04_12:00:00;12.6;58927 +2001-07-04_13:00:00;12.5;70000 +2001-07-04_14:00:00;12.6;70000 +2001-07-04_15:00:00;12.5;70000 +2001-07-04_16:00:00;12.6;70000 +2001-07-04_17:00:00;12.6;70000 +2001-07-04_18:00:00;12.6;70000 +2001-07-04_19:00:00;12.5;70000 +2001-07-04_20:00:00;12.5;70000 +2001-07-04_21:00:00;12.5;70000 +2001-07-04_22:00:00;12.4;70000 +2001-07-04_23:00:00;12.4;70000 +2001-07-05_00:00:00;12.5;70000 +2001-07-05_01:00:00;12.4;70000 +2001-07-05_02:00:00;12.1;70000 +2001-07-05_03:00:00;11.9;70000 +2001-07-05_04:00:00;12;70000 +2001-07-05_05:00:00;12;70000 +2001-07-05_06:00:00;12.1;70000 +2001-07-05_07:00:00;12.3;70000 +2001-07-05_08:00:00;12.6;70000 +2001-07-05_09:00:00;12.9;70000 +2001-07-05_10:00:00;13;70000 +2001-07-05_11:00:00;13.2;70000 +2001-07-05_12:00:00;13.5;70000 +2001-07-05_13:00:00;13.8;70000 +2001-07-05_14:00:00;13.9;70000 +2001-07-05_15:00:00;13.4;70000 +2001-07-05_16:00:00;13.9;70000 +2001-07-05_17:00:00;13.8;70000 +2001-07-05_18:00:00;13.7;70000 +2001-07-05_19:00:00;13.6;70000 +2001-07-05_20:00:00;13.5;70000 +2001-07-05_21:00:00;13.3;70000 +2001-07-05_22:00:00;13.2;70000 +2001-07-05_23:00:00;13.1;70000 +2001-07-06_00:00:00;13.1;70000 +2001-07-06_01:00:00;13;70000 +2001-07-06_02:00:00;12.9;70000 +2001-07-06_03:00:00;12.8;70000 +2001-07-06_04:00:00;12.9;58927 +2001-07-06_05:00:00;12.9;70000 +2001-07-06_06:00:00;13.2;70000 +2001-07-06_07:00:00;13.2;70000 +2001-07-06_08:00:00;13.3;70000 +2001-07-06_09:00:00;13.8;70000 +2001-07-06_10:00:00;14.3;70000 +2001-07-06_11:00:00;14.7;70000 +2001-07-06_12:00:00;15.8;70000 +2001-07-06_13:00:00;14.9;70000 +2001-07-06_14:00:00;14.6;70000 +2001-07-06_15:00:00;14.7;70000 +2001-07-06_16:00:00;14.6;70000 +2001-07-06_17:00:00;15.5;70000 +2001-07-06_18:00:00;16.6;70000 +2001-07-06_19:00:00;15.5;70000 +2001-07-06_20:00:00;14.8;70000 +2001-07-06_21:00:00;14.7;70000 +2001-07-06_22:00:00;16.2;70000 +2001-07-06_23:00:00;15.6;70000 +2001-07-07_00:00:00;15.1;70000 +2001-07-07_01:00:00;14.4;70000 +2001-07-07_02:00:00;13.8;70000 +2001-07-07_03:00:00;13.2;70000 +2001-07-07_04:00:00;13.3;70000 +2001-07-07_05:00:00;13.6;70000 +2001-07-07_06:00:00;14;70000 +2001-07-07_07:00:00;14.1;70000 +2001-07-07_08:00:00;14.1;70000 +2001-07-07_09:00:00;14.3;70000 +2001-07-07_10:00:00;14.4;70000 +2001-07-07_11:00:00;14.5;70000 +2001-07-07_12:00:00;14.6;70000 +2001-07-07_13:00:00;14.9;70000 +2001-07-07_14:00:00;15;70000 +2001-07-07_15:00:00;14.9;70000 +2001-07-07_16:00:00;15;70000 +2001-07-07_17:00:00;14.9;70000 +2001-07-07_18:00:00;14.9;70000 +2001-07-07_19:00:00;14.8;70000 +2001-07-07_20:00:00;14.8;70000 +2001-07-07_21:00:00;15;70000 +2001-07-07_22:00:00;15;70000 +2001-07-07_23:00:00;15.3;70000 +2001-07-08_00:00:00;14.9;70000 +2001-07-08_01:00:00;14.6;70000 +2001-07-08_02:00:00;14.5;70000 +2001-07-08_03:00:00;14.4;70000 +2001-07-08_04:00:00;14.4;70000 +2001-07-08_05:00:00;14.7;70000 +2001-07-08_06:00:00;14.6;70000 +2001-07-08_07:00:00;14.3;70000 +2001-07-08_08:00:00;14.5;70000 +2001-07-08_09:00:00;14.5;70000 +2001-07-08_10:00:00;14.5;70000 +2001-07-08_11:00:00;15.1;70000 +2001-07-08_12:00:00;15.2;70000 +2001-07-08_13:00:00;15.5;70000 +2001-07-08_14:00:00;14.6;70000 +2001-07-08_15:00:00;16.9;78947 +2001-07-08_16:00:00;17.1;78947 +2001-07-08_17:00:00;16.9;78947 +2001-07-08_18:00:00;16;78947 +2001-07-08_19:00:00;15.5;78947 +2001-07-08_20:00:00;15.1;78947 +2001-07-08_21:00:00;14.9;78947 +2001-07-08_22:00:00;14.6;78947 +2001-07-08_23:00:00;14.3;78947 +2001-07-09_00:00:00;14.1;78947 +2001-07-09_01:00:00;14.2;78947 +2001-07-09_02:00:00;14.3;78947 +2001-07-09_03:00:00;14;78947 +2001-07-09_04:00:00;14;78947 +2001-07-09_05:00:00;14.2;78947 +2001-07-09_06:00:00;14;78947 +2001-07-09_07:00:00;14.6;78947 +2001-07-09_08:00:00;14.5;78947 +2001-07-09_09:00:00;15.3;78947 +2001-07-09_10:00:00;16.3;78947 +2001-07-09_11:00:00;15.1;78947 +2001-07-09_12:00:00;16.2;78947 +2001-07-09_13:00:00;15.2;78947 +2001-07-09_14:00:00;15.6;78947 +2001-07-09_15:00:00;15.4;78947 +2001-07-09_16:00:00;15.6;78947 +2001-07-09_17:00:00;15;78947 +2001-07-09_18:00:00;14.2;78947 +2001-07-09_19:00:00;13.7;78947 +2001-07-09_20:00:00;13.5;78947 +2001-07-09_21:00:00;13.2;78947 +2001-07-09_22:00:00;13.4;78947 +2001-07-09_23:00:00;13.5;78947 +2001-07-10_00:00:00;12.8;78947 +2001-07-10_01:00:00;12.9;78947 +2001-07-10_02:00:00;12.9;78947 +2001-07-10_03:00:00;13.2;78947 +2001-07-10_04:00:00;13.1;78947 +2001-07-10_05:00:00;13.3;78947 +2001-07-10_06:00:00;13.8;78947 +2001-07-10_07:00:00;13.9;78947 +2001-07-10_08:00:00;14.3;78947 +2001-07-10_09:00:00;14.7;78947 +2001-07-10_10:00:00;15.1;78947 +2001-07-10_11:00:00;15.3;78947 +2001-07-10_12:00:00;15.3;78947 +2001-07-10_13:00:00;16;78947 +2001-07-10_14:00:00;16.1;78947 +2001-07-10_15:00:00;15.6;78947 +2001-07-10_16:00:00;15;78947 +2001-07-10_17:00:00;14.5;78947 +2001-07-10_18:00:00;14.3;78947 +2001-07-10_19:00:00;13.5;78947 +2001-07-10_20:00:00;13.3;78947 +2001-07-10_21:00:00;12.9;78947 +2001-07-10_22:00:00;12.2;78947 +2001-07-10_23:00:00;11.9;78947 +2001-07-11_00:00:00;13;78947 +2001-07-11_01:00:00;12.7;78947 +2001-07-11_02:00:00;12.7;78947 +2001-07-11_03:00:00;12.6;78947 +2001-07-11_04:00:00;12.7;78947 +2001-07-11_05:00:00;12.8;78947 +2001-07-11_06:00:00;13.7;78947 +2001-07-11_07:00:00;13.7;78947 +2001-07-11_08:00:00;13.7;78947 +2001-07-11_09:00:00;14.4;78947 +2001-07-11_10:00:00;14.7;78947 +2001-07-11_11:00:00;15.2;78947 +2001-07-11_12:00:00;15.3;78947 +2001-07-11_13:00:00;13.7;78947 +2001-07-11_14:00:00;14.5;78947 +2001-07-11_15:00:00;15;78947 +2001-07-11_16:00:00;13.2;78947 +2001-07-11_17:00:00;12.9;78947 +2001-07-11_18:00:00;12.5;78947 +2001-07-11_19:00:00;12.3;78947 +2001-07-11_20:00:00;12.4;78947 +2001-07-11_21:00:00;12.4;78947 +2001-07-11_22:00:00;12.4;78947 +2001-07-11_23:00:00;12.5;78947 +2001-07-12_00:00:00;12;78947 +2001-07-12_01:00:00;12.1;78947 +2001-07-12_02:00:00;12.2;78947 +2001-07-12_03:00:00;12.2;78947 +2001-07-12_04:00:00;12.3;78947 +2001-07-12_05:00:00;12.3;78947 +2001-07-12_06:00:00;12.1;78947 +2001-07-12_07:00:00;12.4;78947 +2001-07-12_08:00:00;13.5;78947 +2001-07-12_09:00:00;13.1;78947 +2001-07-12_10:00:00;14;78947 +2001-07-12_11:00:00;15.2;78947 +2001-07-12_12:00:00;14.3;78947 +2001-07-12_13:00:00;13.9;78947 +2001-07-12_14:00:00;14.3;78947 +2001-07-12_15:00:00;14;78947 +2001-07-12_16:00:00;13.9;78947 +2001-07-12_17:00:00;13.7;78947 +2001-07-12_18:00:00;13.5;78947 +2001-07-12_19:00:00;13.1;78947 +2001-07-12_20:00:00;12.6;78947 +2001-07-12_21:00:00;12.2;78947 +2001-07-12_22:00:00;11.9;78947 +2001-07-12_23:00:00;11.9;78947 +2001-07-13_00:00:00;11.7;78947 +2001-07-13_01:00:00;11.5;78947 +2001-07-13_02:00:00;11.3;78947 +2001-07-13_03:00:00;11.1;78947 +2001-07-13_04:00:00;11.3;78947 +2001-07-13_05:00:00;12;78947 +2001-07-13_06:00:00;13.4;78947 +2001-07-13_08:00:00;15.5;78947 +2001-07-13_09:00:00;16.5;78947 +2001-07-13_10:00:00;17.4;78947 +2001-07-13_11:00:00;17.7;78947 +2001-07-13_12:00:00;17.3;78947 +2001-07-13_13:00:00;17.3;78947 +2001-07-13_14:00:00;17.3;78947 +2001-07-13_15:00:00;17;78947 +2001-07-13_16:00:00;16.4;78947 +2001-07-13_17:00:00;15.5;78947 +2001-07-13_18:00:00;14.9;78947 +2001-07-13_19:00:00;14.1;78947 +2001-07-13_20:00:00;13.2;78947 +2001-07-13_21:00:00;12.3;78947 +2001-07-13_23:00:00;11.5;78947 +2001-07-14_00:00:00;11.2;78947 +2001-07-14_01:00:00;10.9;78947 +2001-07-14_02:00:00;10.7;78947 +2001-07-14_03:00:00;10.6;78947 +2001-07-14_04:00:00;10.7;78947 +2001-07-14_05:00:00;11.6;78947 +2001-07-14_06:00:00;13.1;78947 +2001-07-14_07:00:00;14.5;78947 +2001-07-14_08:00:00;15.9;78947 +2001-07-14_09:00:00;17.2;78947 +2001-07-14_10:00:00;18.3;78947 +2001-07-14_11:00:00;18.8;78947 +2001-07-14_12:00:00;18.5;78947 +2001-07-14_13:00:00;17.9;78947 +2001-07-14_14:00:00;17.4;78947 +2001-07-14_15:00:00;17.1;78947 +2001-07-14_16:00:00;17;78947 +2001-07-14_17:00:00;16.6;78947 +2001-07-14_18:00:00;16.4;78947 +2001-07-14_19:00:00;15.4;78947 +2001-07-14_20:00:00;14.6;78947 +2001-07-14_21:00:00;13.7;78947 +2001-07-14_23:00:00;12.9;78947 +2001-07-15_00:00:00;12.7;78947 +2001-07-15_01:00:00;12.5;78947 +2001-07-15_02:00:00;12.5;78947 +2001-07-15_03:00:00;12.4;78947 +2001-07-15_05:00:00;12.8;78947 +2001-07-15_06:00:00;13.6;78947 +2001-07-15_10:00:00;16.7;78947 +2001-07-15_11:00:00;16.6;78947 +2001-07-15_13:00:00;16.1;78947 +2001-07-15_15:00:00;15.9;78947 +2001-07-15_16:00:00;15.5;78947 +2001-07-15_17:00:00;15.1;78947 +2001-07-15_18:00:00;14.7;78947 +2001-07-15_19:00:00;14.2;78947 +2001-07-15_20:00:00;13.5;78947 +2001-07-15_21:00:00;12.4;78947 +2001-07-15_22:00:00;11.5;78947 +2001-07-15_23:00:00;10.9;78947 +2001-07-16_00:00:00;10.1;78947 +2001-07-16_01:00:00;10.6;78947 +2001-07-16_02:00:00;11.8;78947 +2001-07-16_03:00:00;12.7;78947 +2001-07-16_04:00:00;13.2;78947 +2001-07-16_05:00:00;13.6;78947 +2001-07-16_06:00:00;14;78947 +2001-07-16_07:00:00;15.2;78947 +2001-07-16_08:00:00;16.6;78947 +2001-07-16_09:00:00;17.7;78947 +2001-07-16_10:00:00;18.8;78947 +2001-07-16_11:00:00;19.5;78947 +2001-07-16_12:00:00;20.5;78947 +2001-07-16_13:00:00;20.6;78947 +2001-07-16_14:00:00;20.7;78947 +2001-07-16_15:00:00;19.3;78947 +2001-07-16_16:00:00;19.4;78947 +2001-07-16_17:00:00;18.5;78947 +2001-07-16_18:00:00;17.1;78947 +2001-07-16_19:00:00;15.8;78947 +2001-07-16_20:00:00;15.1;78947 +2001-07-16_21:00:00;15.1;78947 +2001-07-16_22:00:00;15.5;78947 +2001-07-16_23:00:00;15.1;78947 +2001-07-17_00:00:00;15.3;78947 +2001-07-17_01:00:00;15.1;78947 +2001-07-17_02:00:00;15.3;78947 +2001-07-17_03:00:00;15.3;78947 +2001-07-17_04:00:00;15.2;78947 +2001-07-17_05:00:00;15.1;78947 +2001-07-17_06:00:00;14.8;78947 +2001-07-17_07:00:00;14.8;78947 +2001-07-17_08:00:00;14.8;78947 +2001-07-17_09:00:00;15;78947 +2001-07-17_10:00:00;15.2;78947 +2001-07-17_11:00:00;15.2;78947 +2001-07-17_12:00:00;15.3;78947 +2001-07-17_13:00:00;15.4;78947 +2001-07-17_14:00:00;15.5;78947 +2001-07-17_15:00:00;15.5;78947 +2001-07-17_16:00:00;16.4;78947 +2001-07-17_17:00:00;16.2;78947 +2001-07-17_18:00:00;14.6;78947 +2001-07-17_19:00:00;14.7;78947 +2001-07-17_20:00:00;14.8;78947 +2001-07-17_21:00:00;14.1;78947 +2001-07-17_22:00:00;13.7;78947 +2001-07-17_23:00:00;13.7;78947 +2001-07-18_00:00:00;13.4;78947 +2001-07-18_01:00:00;14;78947 +2001-07-18_02:00:00;14.3;78947 +2001-07-18_03:00:00;13.9;78947 +2001-07-18_04:00:00;13.4;78947 +2001-07-18_05:00:00;13.7;78947 +2001-07-18_06:00:00;16.2;78947 +2001-07-18_07:00:00;17.4;78947 +2001-07-18_08:00:00;18.3;78947 +2001-07-18_11:00:00;20.1;78947 +2001-07-18_12:00:00;19.7;78947 +2001-07-18_13:00:00;18.6;78947 +2001-07-18_14:00:00;19.3;78947 +2001-07-18_15:00:00;18.3;78947 +2001-07-18_16:00:00;16.6;78947 +2001-07-18_17:00:00;17;78947 +2001-07-18_18:00:00;16.9;78947 +2001-07-18_19:00:00;16.5;78947 +2001-07-18_20:00:00;15.1;78947 +2001-07-18_21:00:00;14.7;78947 +2001-07-18_22:00:00;14.3;78947 +2001-07-18_23:00:00;14;78947 +2001-07-19_00:00:00;14.1;78947 +2001-07-19_01:00:00;14;78947 +2001-07-19_02:00:00;14.1;78947 +2001-07-19_03:00:00;14.2;78947 +2001-07-19_04:00:00;13.9;78947 +2001-07-19_05:00:00;13.8;78947 +2001-07-19_06:00:00;14.7;78947 +2001-07-19_07:00:00;15.7;78947 +2001-07-19_08:00:00;15.7;78947 +2001-07-19_09:00:00;17.2;78947 +2001-07-19_10:00:00;18.4;78947 +2001-07-19_11:00:00;18.3;78947 +2001-07-19_12:00:00;16.1;78947 +2001-07-19_13:00:00;15.5;78947 +2001-07-19_14:00:00;16;78947 +2001-07-19_15:00:00;16.5;78947 +2001-07-19_16:00:00;15.3;78947 +2001-07-19_17:00:00;15.4;78947 +2001-07-19_18:00:00;15;78947 +2001-07-19_19:00:00;14.4;78947 +2001-07-19_20:00:00;14.2;78947 +2001-07-19_21:00:00;14.1;78947 +2001-07-19_22:00:00;14;78947 +2001-07-19_23:00:00;13.6;78947 +2001-07-20_00:00:00;13.8;78947 +2001-07-20_01:00:00;13.8;78947 +2001-07-20_02:00:00;13.6;78947 +2001-07-20_03:00:00;13.7;78947 +2001-07-20_04:00:00;13.6;78947 +2001-07-20_05:00:00;14;78947 +2001-07-20_06:00:00;15.1;78947 +2001-07-20_07:00:00;15.6;78947 +2001-07-20_08:00:00;15.4;78947 +2001-07-20_09:00:00;16;78947 +2001-07-20_10:00:00;16.6;78947 +2001-07-20_11:00:00;17.1;78947 +2001-07-20_12:00:00;17.3;78947 +2001-07-20_13:00:00;17;78947 +2001-07-20_14:00:00;16.5;78947 +2001-07-20_15:00:00;16.4;78947 +2001-07-20_16:00:00;15.7;78947 +2001-07-20_17:00:00;14.9;78947 +2001-07-20_18:00:00;14.4;78947 +2001-07-20_19:00:00;14.1;78947 +2001-07-20_20:00:00;13.8;78947 +2001-07-20_21:00:00;13.7;78947 +2001-07-20_22:00:00;13.5;78947 +2001-07-20_23:00:00;13.4;78947 +2001-07-21_00:00:00;13.4;78947 +2001-07-21_01:00:00;13.4;78947 +2001-07-21_02:00:00;13.4;78947 +2001-07-21_03:00:00;13.3;78947 +2001-07-21_04:00:00;13.2;78947 +2001-07-21_05:00:00;13.2;78947 +2001-07-21_06:00:00;13.2;78947 +2001-07-21_07:00:00;13.4;78947 +2001-07-21_08:00:00;14;78947 +2001-07-21_09:00:00;14.6;78947 +2001-07-21_10:00:00;15.2;78947 +2001-07-21_11:00:00;15.4;78947 +2001-07-21_12:00:00;16.5;78947 +2001-07-21_13:00:00;16.2;78947 +2001-07-21_14:00:00;15.8;78947 +2001-07-21_15:00:00;15.4;78947 +2001-07-21_16:00:00;15.1;78947 +2001-07-21_17:00:00;14.7;78947 +2001-07-21_18:00:00;13.9;78947 +2001-07-21_19:00:00;13.4;78947 +2001-07-21_20:00:00;13;78947 +2001-07-21_21:00:00;12.8;78947 +2001-07-21_22:00:00;12.8;78947 +2001-07-21_23:00:00;12.9;78947 +2001-07-22_00:00:00;13;78947 +2001-07-22_01:00:00;13.1;78947 +2001-07-22_02:00:00;13.2;78947 +2001-07-22_03:00:00;13.2;78947 +2001-07-22_04:00:00;13.3;78947 +2001-07-22_05:00:00;13.5;78947 +2001-07-22_06:00:00;14;78947 +2001-07-22_07:00:00;14.7;78947 +2001-07-22_08:00:00;15.5;78947 +2001-07-22_09:00:00;15.8;78947 +2001-07-22_10:00:00;16.7;78947 +2001-07-22_11:00:00;17;78947 +2001-07-22_12:00:00;16.5;78947 +2001-07-22_13:00:00;17.4;78947 +2001-07-22_14:00:00;17.3;78947 +2001-07-22_15:00:00;17.5;78947 +2001-07-22_16:00:00;17;78947 +2001-07-22_17:00:00;16.7;78947 +2001-07-22_18:00:00;15.8;78947 +2001-07-22_19:00:00;15.4;78947 +2001-07-22_20:00:00;15.5;78947 +2001-07-22_21:00:00;15.2;78947 +2001-07-22_22:00:00;15.2;78947 +2001-07-22_23:00:00;15.1;78947 +2001-07-23_00:00:00;14.9;78947 +2001-07-23_01:00:00;14.8;78947 +2001-07-23_02:00:00;14.8;78947 +2001-07-23_03:00:00;14.6;78947 +2001-07-23_04:00:00;14.5;78947 +2001-07-23_05:00:00;14.7;78947 +2001-07-23_06:00:00;15.1;78947 +2001-07-23_07:00:00;15.5;78947 +2001-07-23_08:00:00;15.7;78947 +2001-07-23_09:00:00;16.6;78947 +2001-07-23_10:00:00;18.7;78947 +2001-07-23_11:00:00;19.6;78947 +2001-07-23_12:00:00;16.8;78947 +2001-07-23_13:00:00;17.3;78947 +2001-07-23_14:00:00;17.6;78947 +2001-07-23_15:00:00;19.6;78947 +2001-07-23_16:00:00;17.4;78947 +2001-07-23_17:00:00;17.5;78947 +2001-07-23_18:00:00;16.7;78947 +2001-07-23_19:00:00;16.1;78947 +2001-07-23_20:00:00;15.4;78947 +2001-07-23_21:00:00;15.2;78947 +2001-07-23_22:00:00;14.9;78947 +2001-07-23_23:00:00;15.2;78947 +2001-07-24_00:00:00;15.4;78947 +2001-07-24_01:00:00;15;78947 +2001-07-24_02:00:00;14.1;78947 +2001-07-24_03:00:00;14.1;78947 +2001-07-24_04:00:00;14.5;78947 +2001-07-24_05:00:00;15;78947 +2001-07-24_06:00:00;15.6;78947 +2001-07-24_07:00:00;16.1;78947 +2001-07-24_08:00:00;17.4;78947 +2001-07-24_09:00:00;18.6;78947 +2001-07-24_10:00:00;19.9;78947 +2001-07-24_11:00:00;19.9;78947 +2001-07-24_12:00:00;18.2;78947 +2001-07-24_13:00:00;17.6;78947 +2001-07-24_14:00:00;17.9;78947 +2001-07-24_15:00:00;18.4;78947 +2001-07-24_16:00:00;17.9;78947 +2001-07-24_17:00:00;17.6;78947 +2001-07-24_18:00:00;17.3;78947 +2001-07-24_19:00:00;16.4;78947 +2001-07-24_20:00:00;15.4;78947 +2001-07-24_21:00:00;15.1;78947 +2001-07-24_22:00:00;15.2;78947 +2001-07-24_23:00:00;15.1;78947 +2001-07-25_00:00:00;15.1;78947 +2001-07-25_01:00:00;15;78947 +2001-07-25_02:00:00;14.9;78947 +2001-07-25_03:00:00;15;78947 +2001-07-25_04:00:00;14.9;78947 +2001-07-25_05:00:00;15.2;78947 +2001-07-25_06:00:00;15.7;78947 +2001-07-25_07:00:00;16.3;78947 +2001-07-25_09:00:00;17.7;78947 +2001-07-25_10:00:00;18.6;78947 +2001-07-25_11:00:00;19.1;78947 +2001-07-25_12:00:00;18.1;78947 +2001-07-25_13:00:00;18.7;78947 +2001-07-25_14:00:00;18.8;78947 +2001-07-25_15:00:00;18.9;78947 +2001-07-25_16:00:00;18.7;78947 +2001-07-25_17:00:00;17.8;78947 +2001-07-25_18:00:00;16.9;78947 +2001-07-25_19:00:00;16.4;78947 +2001-07-25_20:00:00;16;78947 +2001-07-25_21:00:00;15.7;78947 +2001-07-25_22:00:00;15.4;78947 +2001-07-25_23:00:00;15.1;78947 +2001-07-26_00:00:00;14.7;78947 +2001-07-26_01:00:00;14.7;78947 +2001-07-26_02:00:00;14.6;78947 +2001-07-26_03:00:00;14.6;78947 +2001-07-26_04:00:00;14.7;78947 +2001-07-26_05:00:00;14.7;78947 +2001-07-26_06:00:00;14.6;78947 +2001-07-26_07:00:00;14.7;78947 +2001-07-26_09:00:00;15.2;78947 +2001-07-26_10:00:00;15.7;78947 +2001-07-26_11:00:00;15.8;78947 +2001-07-26_12:00:00;14.8;78947 +2001-07-26_13:00:00;14.9;78947 +2001-07-26_14:00:00;15.4;78947 +2001-07-26_15:00:00;15.7;78947 +2001-07-26_16:00:00;15.5;78947 +2001-07-26_17:00:00;15.3;78947 +2001-07-26_18:00:00;15.2;78947 +2001-07-26_19:00:00;14.6;78947 +2001-07-26_20:00:00;13.9;78947 +2001-07-26_21:00:00;13.3;78947 +2001-07-26_22:00:00;13.2;78947 +2001-07-26_23:00:00;13.2;78947 +2001-07-27_01:00:00;13.4;78947 +2001-07-27_02:00:00;13.4;78947 +2001-07-27_03:00:00;13.2;78947 +2001-07-27_04:00:00;13;78947 +2001-07-27_05:00:00;13.1;78947 +2001-07-27_06:00:00;13.8;78947 +2001-07-27_07:00:00;14.4;78947 +2001-07-27_08:00:00;15.1;78947 +2001-07-27_09:00:00;16;78947 +2001-07-27_10:00:00;16.7;78947 +2001-07-27_11:00:00;16.7;78947 +2001-07-27_12:00:00;16.8;78947 +2001-07-27_13:00:00;16.5;78947 +2001-07-27_14:00:00;16.1;78947 +2001-07-27_15:00:00;15.6;78947 +2001-07-27_16:00:00;15;78947 +2001-07-27_17:00:00;14.5;78947 +2001-07-27_18:00:00;14.1;78947 +2001-07-27_19:00:00;13.5;78947 +2001-07-27_20:00:00;12.9;78947 +2001-07-27_21:00:00;12.6;78947 +2001-07-27_22:00:00;12.5;78947 +2001-07-27_23:00:00;12.4;78947 +2001-07-28_00:00:00;12.6;78947 +2001-07-28_01:00:00;12.8;78947 +2001-07-28_02:00:00;12.9;78947 +2001-07-28_03:00:00;12.9;78947 +2001-07-28_04:00:00;13;78947 +2001-07-28_05:00:00;13.2;78947 +2001-07-28_06:00:00;13.4;78947 +2001-07-28_07:00:00;13.7;78947 +2001-07-28_09:00:00;14.1;78947 +2001-07-28_10:00:00;14.4;78947 +2001-07-28_11:00:00;14.7;78947 +2001-07-28_12:00:00;15.4;78947 +2001-07-28_13:00:00;15.3;78947 +2001-07-28_14:00:00;14.8;78947 +2001-07-28_15:00:00;14.5;78947 +2001-07-28_16:00:00;14.3;78947 +2001-07-28_17:00:00;14.1;78947 +2001-07-28_18:00:00;13.4;78947 +2001-07-28_19:00:00;12.8;78947 +2001-07-28_20:00:00;12.4;78947 +2001-07-28_21:00:00;12.3;78947 +2001-07-28_22:00:00;12.5;78947 +2001-07-28_23:00:00;12.7;78947 +2001-07-29_00:00:00;12.4;78947 +2001-07-29_01:00:00;12.3;78947 +2001-07-29_02:00:00;12.1;78947 +2001-07-29_03:00:00;12;78947 +2001-07-29_04:00:00;12.1;78947 +2001-07-29_05:00:00;12.3;78947 +2001-07-29_06:00:00;12.9;78947 +2001-07-29_07:00:00;13.5;78947 +2001-07-29_09:00:00;14.6;78947 +2001-07-29_10:00:00;15;78947 +2001-07-29_11:00:00;15.2;78947 +2001-07-29_12:00:00;15.5;78947 +2001-07-29_13:00:00;15.4;78947 +2001-07-29_14:00:00;15.2;78947 +2001-07-29_15:00:00;14.8;78947 +2001-07-29_16:00:00;14.4;78947 +2001-07-29_17:00:00;14;78947 +2001-07-29_18:00:00;13.6;78947 +2001-07-29_19:00:00;13.1;78947 +2001-07-29_20:00:00;12.7;78947 +2001-07-29_21:00:00;12.5;78947 +2001-07-29_22:00:00;12.4;78947 +2001-07-29_23:00:00;12.3;78947 +2001-07-30_00:00:00;12.3;78947 +2001-07-30_01:00:00;12.2;78947 +2001-07-30_02:00:00;12.3;78947 +2001-07-30_03:00:00;12.3;78947 +2001-07-30_05:00:00;12.6;78947 +2001-07-30_06:00:00;13.4;78947 +2001-07-30_07:00:00;14;78947 +2001-07-30_08:00:00;14.8;78947 +2001-07-30_09:00:00;15.4;78947 +2001-07-30_10:00:00;15.8;78947 +2001-07-30_11:00:00;16.1;78947 +2001-07-30_12:00:00;16.6;78947 +2001-07-30_13:00:00;16.5;78947 +2001-07-30_14:00:00;16.3;78947 +2001-07-30_15:00:00;16;78947 +2001-07-30_16:00:00;15.6;78947 +2001-07-30_17:00:00;15;78947 +2001-07-30_18:00:00;14.4;78947 +2001-07-30_19:00:00;13.9;78947 +2001-07-30_20:00:00;13.3;78947 +2001-07-30_21:00:00;12.8;78947 +2001-07-30_22:00:00;12.5;78947 +2001-07-30_23:00:00;12.6;78947 +2001-07-31_00:00:00;13;78947 +2001-07-31_01:00:00;13.1;78947 +2001-07-31_02:00:00;13.3;78947 +2001-07-31_03:00:00;13.3;78947 +2001-07-31_04:00:00;13.2;78947 +2001-07-31_05:00:00;13.5;78947 +2001-07-31_06:00:00;14.2;78947 +2001-07-31_07:00:00;14.6;78947 +2001-07-31_08:00:00;15;78947 +2001-07-31_09:00:00;15.8;78947 +2001-07-31_10:00:00;16.4;78947 +2001-07-31_11:00:00;16.8;78947 +2001-07-31_12:00:00;17.1;78947 +2001-07-31_13:00:00;17.1;78947 +2001-07-31_14:00:00;16.6;78947 +2001-07-31_15:00:00;16.2;78947 +2001-07-31_16:00:00;15.8;78947 +2001-07-31_17:00:00;15.4;78947 +2001-07-31_18:00:00;14.8;78947 +2001-07-31_19:00:00;14;78947 +2001-07-31_20:00:00;13.2;78947 +2001-07-31_21:00:00;12.6;78947 +2001-07-31_22:00:00;12;78947 +2001-07-31_23:00:00;12.3;78947 +2001-08-01_00:00:00;13.1;78947 +2001-08-01_01:00:00;13.2;78947 +2001-08-01_02:00:00;13.3;78947 +2001-08-01_03:00:00;13.3;78947 +2001-08-01_04:00:00;13.3;78947 +2001-08-01_05:00:00;13.6;78947 +2001-08-01_06:00:00;14.4;78947 +2001-08-01_07:00:00;14.9;78947 +2001-08-01_09:00:00;16.2;78947 +2001-08-01_10:00:00;16.6;78947 +2001-08-01_11:00:00;17;78947 +2001-08-01_12:00:00;17.2;78947 +2001-08-01_13:00:00;17.4;78947 +2001-08-01_14:00:00;17.4;78947 +2001-08-01_15:00:00;17.1;78947 +2001-08-01_16:00:00;16.7;78947 +2001-08-01_17:00:00;16.2;78947 +2001-08-01_18:00:00;15.4;78947 +2001-08-01_19:00:00;14.6;78947 +2001-08-01_20:00:00;13.7;78947 +2001-08-01_21:00:00;13;78947 +2001-08-01_22:00:00;12.5;78947 +2001-08-01_23:00:00;12.3;78947 +2001-08-02_00:00:00;12.6;78947 +2001-08-02_01:00:00;12.6;78947 +2001-08-02_02:00:00;12.6;78947 +2001-08-02_03:00:00;12.8;78947 +2001-08-02_04:00:00;13.2;78947 +2001-08-02_05:00:00;14;78947 +2001-08-02_06:00:00;15.6;78947 +2001-08-02_07:00:00;16.1;78947 +2001-08-02_09:00:00;17;78947 +2001-08-02_10:00:00;17;78947 +2001-08-02_11:00:00;17.1;78947 +2001-08-02_12:00:00;17.2;78947 +2001-08-02_13:00:00;18.2;78947 +2001-08-02_14:00:00;19.2;78947 +2001-08-02_15:00:00;19.4;78947 +2001-08-02_16:00:00;19.8;78947 +2001-08-02_17:00:00;20;78947 +2001-08-02_18:00:00;18.4;78947 +2001-08-02_19:00:00;17.8;78947 +2001-08-02_20:00:00;16.6;78947 +2001-08-02_21:00:00;15.6;78947 +2001-08-02_22:00:00;15.6;78947 +2001-08-02_23:00:00;15.7;78947 +2001-08-03_00:00:00;15.6;78947 +2001-08-03_01:00:00;16;78947 +2001-08-03_02:00:00;16.5;78947 +2001-08-03_03:00:00;16.7;78947 +2001-08-03_04:00:00;16.3;78947 +2001-08-03_05:00:00;16.5;78947 +2001-08-03_06:00:00;17.5;78947 +2001-08-03_07:00:00;17;78947 +2001-08-03_09:00:00;17.4;78947 +2001-08-03_10:00:00;18.7;78947 +2001-08-03_11:00:00;20.4;78947 +2001-08-03_12:00:00;21.9;78947 +2001-08-03_13:00:00;21.3;78947 +2001-08-03_14:00:00;21.9;78947 +2001-08-03_15:00:00;21.7;78947 +2001-08-03_16:00:00;19.8;78947 +2001-08-03_17:00:00;17.8;78947 +2001-08-03_18:00:00;19.1;78947 +2001-08-03_19:00:00;18.6;78947 +2001-08-03_20:00:00;18;78947 +2001-08-03_21:00:00;17.7;78947 +2001-08-03_22:00:00;17.7;78947 +2001-08-03_23:00:00;17.8;78947 +2001-08-04_00:00:00;17;78947 +2001-08-04_01:00:00;16.8;78947 +2001-08-04_02:00:00;16.9;78947 +2001-08-04_03:00:00;16.9;78947 +2001-08-04_04:00:00;16.9;78947 +2001-08-04_05:00:00;16.6;78947 +2001-08-04_06:00:00;17.5;78947 +2001-08-04_07:00:00;19;78947 +2001-08-04_09:00:00;20.2;78947 +2001-08-04_10:00:00;19.4;78947 +2001-08-04_11:00:00;20;78947 +2001-08-04_12:00:00;20.3;78947 +2001-08-04_13:00:00;21.3;78947 +2001-08-04_14:00:00;22.2;78947 +2001-08-04_15:00:00;22.1;78947 +2001-08-04_16:00:00;22.5;78947 +2001-08-04_17:00:00;22;78947 +2001-08-04_18:00:00;18.6;78947 +2001-08-04_19:00:00;17.1;78947 +2001-08-04_20:00:00;16.5;78947 +2001-08-04_21:00:00;16.2;78947 +2001-08-04_22:00:00;16.2;78947 +2001-08-04_23:00:00;16.7;78947 +2001-08-05_00:00:00;16.4;78947 +2001-08-05_01:00:00;16.7;78947 +2001-08-05_02:00:00;16.5;78947 +2001-08-05_03:00:00;16.4;78947 +2001-08-05_04:00:00;16.6;78947 +2001-08-05_05:00:00;16.5;78947 +2001-08-05_06:00:00;16.1;78947 +2001-08-05_07:00:00;16.3;78947 +2001-08-05_09:00:00;16.5;78947 +2001-08-05_10:00:00;16.7;78947 +2001-08-05_11:00:00;16.8;78947 +2001-08-05_12:00:00;17;78947 +2001-08-05_13:00:00;16.9;78947 +2001-08-05_14:00:00;16.6;78947 +2001-08-05_15:00:00;16.3;78947 +2001-08-05_16:00:00;16.1;78947 +2001-08-05_17:00:00;15.9;78947 +2001-08-05_18:00:00;16;78947 +2001-08-05_19:00:00;15.8;78947 +2001-08-05_20:00:00;15.6;78947 +2001-08-05_21:00:00;15.6;78947 +2001-08-05_22:00:00;15.5;78947 +2001-08-05_23:00:00;15.4;78947 +2001-08-06_00:00:00;15;78947 +2001-08-06_01:00:00;14.8;78947 +2001-08-06_02:00:00;14.6;78947 +2001-08-06_03:00:00;14.5;78947 +2001-08-06_04:00:00;14.5;78947 +2001-08-06_05:00:00;14.5;78947 +2001-08-06_06:00:00;14.7;78947 +2001-08-06_08:00:00;14.8;78947 +2001-08-06_09:00:00;15.7;78947 +2001-08-06_10:00:00;16.1;78947 +2001-08-06_11:00:00;15.8;78947 +2001-08-06_12:00:00;16.5;78947 +2001-08-06_13:00:00;15.8;78947 +2001-08-06_14:00:00;16.5;78947 +2001-08-06_15:00:00;16.2;78947 +2001-08-06_16:00:00;16.1;78947 +2001-08-06_17:00:00;15.7;78947 +2001-08-06_18:00:00;14.8;78947 +2001-08-06_19:00:00;14.2;78947 +2001-08-06_20:00:00;13.1;78947 +2001-08-06_21:00:00;12.3;78947 +2001-08-06_22:00:00;11.7;78947 +2001-08-06_23:00:00;11.3;78947 +2001-08-07_00:00:00;11.2;78947 +2001-08-07_01:00:00;11.5;78947 +2001-08-07_02:00:00;11.6;78947 +2001-08-07_03:00:00;11.8;78947 +2001-08-07_04:00:00;11.7;78947 +2001-08-07_05:00:00;12.3;78947 +2001-08-07_06:00:00;13.1;78947 +2001-08-07_07:00:00;15;78947 +2001-08-07_08:00:00;14.6;78947 +2001-08-07_09:00:00;13.8;78947 +2001-08-07_10:00:00;14.3;78947 +2001-08-07_11:00:00;15.5;78947 +2001-08-07_12:00:00;15.6;78947 +2001-08-07_13:00:00;15.5;78947 +2001-08-07_14:00:00;16.2;78947 +2001-08-07_15:00:00;16.5;78947 +2001-08-07_16:00:00;16;78947 +2001-08-07_17:00:00;15.9;78947 +2001-08-07_18:00:00;14.8;78947 +2001-08-07_19:00:00;14.5;78947 +2001-08-07_20:00:00;14.2;78947 +2001-08-07_21:00:00;14.5;78947 +2001-08-07_22:00:00;14.6;78947 +2001-08-07_23:00:00;13.5;78947 +2001-08-08_00:00:00;13.2;78947 +2001-08-08_01:00:00;13.1;78947 +2001-08-08_02:00:00;13.4;78947 +2001-08-08_03:00:00;13.2;78947 +2001-08-08_04:00:00;12.9;78947 +2001-08-08_05:00:00;12.7;78947 +2001-08-08_06:00:00;14.1;78947 +2001-08-08_07:00:00;14.9;78947 +2001-08-08_08:00:00;14.7;78947 +2001-08-08_09:00:00;15.5;78947 +2001-08-08_10:00:00;15;78947 +2001-08-08_11:00:00;16.7;78947 +2001-08-08_12:00:00;14.6;70000 +2001-08-08_13:00:00;14;70000 +2001-08-08_14:00:00;13.8;58927 +2001-08-08_15:00:00;13.6;70000 +2001-08-08_16:00:00;14.1;58927 +2001-08-08_17:00:00;14.5;70000 +2001-08-08_18:00:00;14.7;58927 +2001-08-08_19:00:00;14.9;70000 +2001-08-08_20:00:00;13.7;78947 +2001-08-08_21:00:00;13.8;78947 +2001-08-08_22:00:00;15.8;70000 +2001-08-08_23:00:00;15.9;70000 +2001-08-09_00:00:00;12.5;78947 +2001-08-09_01:00:00;13.1;78947 +2001-08-09_02:00:00;13.1;78947 +2001-08-09_03:00:00;13.2;78947 +2001-08-09_04:00:00;13.5;78947 +2001-08-09_05:00:00;13.7;78947 +2001-08-09_06:00:00;13.8;78947 +2001-08-09_07:00:00;14.7;70000 +2001-08-09_08:00:00;14.7;58927 +2001-08-09_09:00:00;14.7;70000 +2001-08-09_10:00:00;14.6;70000 +2001-08-09_11:00:00;14.6;70000 +2001-08-09_12:00:00;14.4;70000 +2001-08-09_13:00:00;14.2;70000 +2001-08-09_14:00:00;14.4;70000 +2001-08-09_15:00:00;14.3;70000 +2001-08-09_16:00:00;14.2;70000 +2001-08-09_17:00:00;14;70000 +2001-08-09_18:00:00;13.9;70000 +2001-08-09_19:00:00;12.9;70000 +2001-08-09_20:00:00;13.2;70000 +2001-08-09_21:00:00;13.1;70000 +2001-08-09_22:00:00;13.1;70000 +2001-08-09_23:00:00;13.1;70000 +2001-08-10_00:00:00;12.8;70000 +2001-08-10_01:00:00;11.6;70000 +2001-08-10_02:00:00;12.2;70000 +2001-08-10_03:00:00;12.7;70000 +2001-08-10_04:00:00;12.5;70000 +2001-08-10_05:00:00;12.4;70000 +2001-08-10_06:00:00;12.5;70000 +2001-08-10_07:00:00;12.6;70000 +2001-08-10_08:00:00;12.6;70000 +2001-08-10_09:00:00;11.8;70000 +2001-08-10_10:00:00;12.1;70000 +2001-08-10_11:00:00;12.1;70000 +2001-08-10_12:00:00;12.7;70000 +2001-08-10_13:00:00;13;70000 +2001-08-10_14:00:00;13.1;70000 +2001-08-10_15:00:00;13.3;70000 +2001-08-10_16:00:00;13.4;70000 +2001-08-10_17:00:00;13.5;70000 +2001-08-10_18:00:00;13.5;70000 +2001-08-10_19:00:00;13.6;70000 +2001-08-10_20:00:00;13.8;70000 +2001-08-10_21:00:00;13.9;70000 +2001-08-10_22:00:00;13.9;70000 +2001-08-10_23:00:00;13.7;58927 +2001-08-11_00:00:00;13.5;70000 +2001-08-11_01:00:00;13.4;70000 +2001-08-11_02:00:00;13.1;70000 +2001-08-11_03:00:00;12.5;70000 +2001-08-11_04:00:00;12.7;70000 +2001-08-11_05:00:00;13.2;70000 +2001-08-11_06:00:00;12.9;70000 +2001-08-11_07:00:00;14.3;70000 +2001-08-11_08:00:00;15;70000 +2001-08-11_09:00:00;15.8;70000 +2001-08-11_10:00:00;15.8;58927 +2001-08-11_11:00:00;15.7;70000 +2001-08-11_12:00:00;15.9;70000 +2001-08-11_13:00:00;16;70000 +2001-08-11_14:00:00;16.1;70000 +2001-08-11_15:00:00;16.2;70000 +2001-08-11_16:00:00;16;70000 +2001-08-11_17:00:00;16.1;70000 +2001-08-11_18:00:00;16.1;70000 +2001-08-11_19:00:00;16;70000 +2001-08-11_20:00:00;15.9;70000 +2001-08-11_21:00:00;15.7;70000 +2001-08-11_22:00:00;15.6;70000 +2001-08-11_23:00:00;15.6;70000 +2001-08-12_00:00:00;15.6;70000 +2001-08-12_01:00:00;15.3;70000 +2001-08-12_02:00:00;15.2;70000 +2001-08-12_03:00:00;15.1;70000 +2001-08-12_04:00:00;15;70000 +2001-08-12_05:00:00;14.7;70000 +2001-08-12_06:00:00;14.7;70000 +2001-08-12_07:00:00;14.9;70000 +2001-08-12_08:00:00;14.9;70000 +2001-08-12_09:00:00;14.9;70000 +2001-08-12_10:00:00;15.3;70000 +2001-08-12_11:00:00;15.6;70000 +2001-08-12_12:00:00;15.8;70000 +2001-08-12_13:00:00;15.6;70000 +2001-08-12_14:00:00;15.9;70000 +2001-08-12_15:00:00;16.2;70000 +2001-08-12_16:00:00;16.2;70000 +2001-08-12_17:00:00;16.3;70000 +2001-08-12_18:00:00;16.9;70000 +2001-08-12_19:00:00;16.6;70000 +2001-08-12_20:00:00;15.7;70000 +2001-08-12_21:00:00;16.2;70000 +2001-08-12_22:00:00;16.5;70000 +2001-08-12_23:00:00;16.3;70000 +2001-08-13_00:00:00;16.1;70000 +2001-08-13_01:00:00;16;70000 +2001-08-13_02:00:00;15.9;70000 +2001-08-13_03:00:00;15.7;70000 +2001-08-13_04:00:00;15.8;70000 +2001-08-13_05:00:00;15.5;70000 +2001-08-13_06:00:00;15.3;70000 +2001-08-13_07:00:00;15.6;70000 +2001-08-13_08:00:00;16.2;70000 +2001-08-13_09:00:00;16.6;70000 +2001-08-13_10:00:00;16.9;70000 +2001-08-13_11:00:00;17.2;70000 +2001-08-13_12:00:00;17.5;70000 +2001-08-13_13:00:00;17.4;70000 +2001-08-13_14:00:00;17.6;70000 +2001-08-13_15:00:00;17.2;70000 +2001-08-13_16:00:00;17.1;70000 +2001-08-13_17:00:00;17.2;70000 +2001-08-13_18:00:00;17.2;70000 +2001-08-13_19:00:00;17.9;70000 +2001-08-13_20:00:00;17.5;70000 +2001-08-13_21:00:00;17.3;70000 +2001-08-13_22:00:00;17.2;70000 +2001-08-13_23:00:00;17.1;70000 +2001-08-14_00:00:00;17.1;70000 +2001-08-14_01:00:00;17.2;70000 +2001-08-14_02:00:00;16.9;70000 +2001-08-14_03:00:00;17.2;70000 +2001-08-14_04:00:00;17;70000 +2001-08-14_05:00:00;16.9;70000 +2001-08-14_06:00:00;16.8;70000 +2001-08-14_07:00:00;17;70000 +2001-08-14_08:00:00;17.3;70000 +2001-08-14_09:00:00;17;70000 +2001-08-14_10:00:00;17.1;70000 +2001-08-14_11:00:00;17.3;70000 +2001-08-14_12:00:00;17.1;70000 +2001-08-14_13:00:00;17;70000 +2001-08-14_14:00:00;17.1;70000 +2001-08-14_15:00:00;16.9;70000 +2001-08-14_16:00:00;16.7;70000 +2001-08-14_17:00:00;16.5;70000 +2001-08-14_18:00:00;16.3;70000 +2001-08-14_19:00:00;16.2;70000 +2001-08-14_20:00:00;16.2;70000 +2001-08-14_21:00:00;16.2;70000 +2001-08-14_22:00:00;15.9;70000 +2001-08-14_23:00:00;16;70000 +2001-08-15_00:00:00;15.4;70000 +2001-08-15_01:00:00;15.3;70000 +2001-08-15_02:00:00;15.3;70000 +2001-08-15_03:00:00;15.3;70000 +2001-08-15_04:00:00;14.9;70000 +2001-08-15_05:00:00;15;70000 +2001-08-15_06:00:00;15;70000 +2001-08-15_07:00:00;15.2;70000 +2001-08-15_08:00:00;15.1;70000 +2001-08-15_09:00:00;15.4;70000 +2001-08-15_10:00:00;15.3;70000 +2001-08-15_11:00:00;15.3;70000 +2001-08-15_12:00:00;15.6;70000 +2001-08-15_13:00:00;15.6;70000 +2001-08-15_14:00:00;15.5;70000 +2001-08-15_15:00:00;15.5;70000 +2001-08-15_16:00:00;15.5;70000 +2001-08-15_17:00:00;15.5;70000 +2001-08-15_18:00:00;15.3;70000 +2001-08-15_19:00:00;15.3;70000 +2001-08-15_20:00:00;15.2;70000 +2001-08-15_21:00:00;15.2;70000 +2001-08-15_22:00:00;15.1;70000 +2001-08-15_23:00:00;15;70000 +2001-08-16_00:00:00;15;70000 +2001-08-16_01:00:00;14.8;70000 +2001-08-16_02:00:00;14.8;70000 +2001-08-16_03:00:00;14.8;70000 +2001-08-16_04:00:00;14.6;70000 +2001-08-16_05:00:00;14.6;70000 +2001-08-16_06:00:00;14.5;70000 +2001-08-16_07:00:00;14.6;70000 +2001-08-16_08:00:00;14.5;70000 +2001-08-16_09:00:00;14.5;70000 +2001-08-16_10:00:00;14.8;70000 +2001-08-16_11:00:00;14.9;70000 +2001-08-16_12:00:00;15;70000 +2001-08-16_13:00:00;15.1;70000 +2001-08-16_14:00:00;15.2;70000 +2001-08-16_15:00:00;15.3;70000 +2001-08-16_16:00:00;15.4;70000 +2001-08-16_17:00:00;15.3;70000 +2001-08-16_18:00:00;15.4;70000 +2001-08-16_19:00:00;15.4;70000 +2001-08-16_20:00:00;15.5;70000 +2001-08-16_21:00:00;15.6;70000 +2001-08-16_22:00:00;15.7;70000 +2001-08-16_23:00:00;15.3;70000 +2001-08-17_00:00:00;15.2;70000 +2001-08-17_01:00:00;14.9;70000 +2001-08-17_02:00:00;14.6;70000 +2001-08-17_03:00:00;14.6;70000 +2001-08-17_04:00:00;14.3;70000 +2001-08-17_05:00:00;14.1;70000 +2001-08-17_06:00:00;14.3;70000 +2001-08-17_07:00:00;14.5;70000 +2001-08-17_08:00:00;14.8;70000 +2001-08-17_09:00:00;15.1;70000 +2001-08-17_10:00:00;15.4;70000 +2001-08-17_11:00:00;15.8;70000 +2001-08-17_12:00:00;16;70000 +2001-08-17_13:00:00;16.1;70000 +2001-08-17_14:00:00;16.3;70000 +2001-08-17_15:00:00;16.5;70000 +2001-08-17_16:00:00;16.7;70000 +2001-08-17_17:00:00;16.5;70000 +2001-08-17_18:00:00;16.5;70000 +2001-08-17_19:00:00;16.3;70000 +2001-08-17_20:00:00;15.9;70000 +2001-08-17_21:00:00;15.9;70000 +2001-08-17_22:00:00;15.8;70000 +2001-08-17_23:00:00;15.6;70000 +2001-08-18_00:00:00;15.5;70000 +2001-08-18_01:00:00;15.4;70000 +2001-08-18_02:00:00;15.3;70000 +2001-08-18_03:00:00;15.2;70000 +2001-08-18_04:00:00;15.2;70000 +2001-08-18_05:00:00;15;70000 +2001-08-18_06:00:00;15;70000 +2001-08-18_07:00:00;15;70000 +2001-08-18_08:00:00;15.2;70000 +2001-08-18_09:00:00;15.2;70000 +2001-08-18_10:00:00;15.2;70000 +2001-08-18_11:00:00;15.2;70000 +2001-08-18_12:00:00;15.2;70000 +2001-08-18_13:00:00;15.2;70203 +2001-08-18_14:00:00;15.3;70000 +2001-08-18_15:00:00;15.3;70000 +2001-08-18_16:00:00;15.3;70000 +2001-08-18_17:00:00;15.3;70000 +2001-08-18_18:00:00;15.2;70000 +2001-08-18_19:00:00;15.2;70000 +2001-08-18_20:00:00;15.2;70000 +2001-08-18_21:00:00;15.2;70000 +2001-08-18_22:00:00;15.2;70000 +2001-08-18_23:00:00;15.2;70203 +2001-08-19_00:00:00;15.2;70203 +2001-08-19_01:00:00;15.2;70203 +2001-08-19_02:00:00;15.2;70203 +2001-08-19_03:00:00;15;70000 +2001-08-19_04:00:00;15;70000 +2001-08-19_05:00:00;15;70000 +2001-08-19_06:00:00;14.9;70000 +2001-08-19_07:00:00;15.1;70000 +2001-08-19_08:00:00;15;70000 +2001-08-19_09:00:00;15;70000 +2001-08-19_10:00:00;15.1;70000 +2001-08-19_11:00:00;15;70000 +2001-08-19_12:00:00;15.1;70000 +2001-08-19_13:00:00;15.1;70000 +2001-08-19_14:00:00;15.1;70000 +2001-08-19_15:00:00;15;70000 +2001-08-19_16:00:00;15;70000 +2001-08-19_17:00:00;15;70000 +2001-08-19_18:00:00;14.9;70000 +2001-08-19_19:00:00;14.8;70000 +2001-08-19_20:00:00;14.8;70000 +2001-08-19_21:00:00;14.6;70000 +2001-08-19_22:00:00;14.6;70000 +2001-08-19_23:00:00;14.5;70000 +2001-08-20_00:00:00;14.8;70000 +2001-08-20_01:00:00;14.6;70000 +2001-08-20_02:00:00;14.8;70000 +2001-08-20_03:00:00;14.1;70000 +2001-08-20_04:00:00;14.9;70000 +2001-08-20_05:00:00;15.2;70000 +2001-08-20_06:00:00;15.2;70000 +2001-08-20_07:00:00;13.9;70000 +2001-08-20_08:00:00;13.5;70000 +2001-08-20_09:00:00;14.6;70000 +2001-08-20_10:00:00;15.8;70000 +2001-08-20_11:00:00;15.8;70000 +2001-08-20_12:00:00;15.9;70000 +2001-08-20_13:00:00;15.8;70000 +2001-08-20_14:00:00;15.8;70000 +2001-08-20_15:00:00;15.9;70000 +2001-08-20_16:00:00;16;70000 +2001-08-20_17:00:00;16;70000 +2001-08-20_18:00:00;15.8;70000 +2001-08-20_19:00:00;15.6;70000 +2001-08-20_20:00:00;15.5;70000 +2001-08-20_21:00:00;15.5;70000 +2001-08-20_22:00:00;15.4;70000 +2001-08-20_23:00:00;15.3;70000 +2001-08-21_00:00:00;15.4;70000 +2001-08-21_01:00:00;15.4;70000 +2001-08-21_02:00:00;15.5;70000 +2001-08-21_03:00:00;15.6;70000 +2001-08-21_04:00:00;15.8;70000 +2001-08-21_05:00:00;15.4;70000 +2001-08-21_06:00:00;14.7;70000 +2001-08-21_07:00:00;14.9;70000 +2001-08-21_08:00:00;14.9;70000 +2001-08-21_09:00:00;14.4;70000 +2001-08-21_10:00:00;14.9;70000 +2001-08-21_11:00:00;15.9;70000 +2001-08-21_12:00:00;15.8;70000 +2001-08-21_13:00:00;16.1;70000 +2001-08-21_14:00:00;16.1;70000 +2001-08-21_15:00:00;15.9;70000 +2001-08-21_16:00:00;15.8;70000 +2001-08-21_17:00:00;15.8;70000 +2001-08-21_18:00:00;15.7;70000 +2001-08-21_19:00:00;15.7;70000 +2001-08-21_20:00:00;15.6;70000 +2001-08-21_21:00:00;15.5;70000 +2001-08-21_22:00:00;15.3;70000 +2001-08-21_23:00:00;15.2;70000 +2001-08-22_00:00:00;15.2;70000 +2001-08-22_01:00:00;15.3;70000 +2001-08-22_02:00:00;15.4;70000 +2001-08-22_03:00:00;15.4;70000 +2001-08-22_04:00:00;15.2;70000 +2001-08-22_05:00:00;15.2;70000 +2001-08-22_06:00:00;15.5;70000 +2001-08-22_07:00:00;15.5;70000 +2001-08-22_08:00:00;15.5;70000 +2001-08-22_09:00:00;15.5;70000 +2001-08-22_10:00:00;15.4;70000 +2001-08-22_11:00:00;15.3;70000 +2001-08-22_12:00:00;15.2;70000 +2001-08-22_13:00:00;15.2;70000 +2001-08-22_14:00:00;15.2;70000 +2001-08-22_15:00:00;15.2;70000 +2001-08-22_16:00:00;15;70000 +2001-08-22_17:00:00;15.3;70000 +2001-08-22_18:00:00;15.2;70000 +2001-08-22_19:00:00;15.2;70000 +2001-08-22_20:00:00;15.3;70000 +2001-08-22_21:00:00;15.3;70000 +2001-08-22_22:00:00;15;70000 +2001-08-22_23:00:00;15.2;70000 +2001-08-23_00:00:00;15.2;70000 +2001-08-23_01:00:00;15;70000 +2001-08-23_02:00:00;14.9;70000 +2001-08-23_03:00:00;14.9;70000 +2001-08-23_04:00:00;14.8;70000 +2001-08-23_05:00:00;14.9;70000 +2001-08-23_06:00:00;15;70000 +2001-08-23_07:00:00;15.1;70000 +2001-08-23_08:00:00;14.8;70000 +2001-08-23_09:00:00;15.1;70000 +2001-08-23_10:00:00;15.2;70000 +2001-08-23_11:00:00;15.2;70000 +2001-08-23_12:00:00;15.5;70000 +2001-08-23_13:00:00;15.6;70000 +2001-08-23_14:00:00;15.7;70000 +2001-08-23_15:00:00;15.9;70000 +2001-08-23_16:00:00;16;70000 +2001-08-23_17:00:00;16.2;70000 +2001-08-23_18:00:00;16.3;70000 +2001-08-23_19:00:00;16.3;70000 +2001-08-23_20:00:00;16.5;70000 +2001-08-23_21:00:00;16.5;70000 +2001-08-23_22:00:00;16.9;70000 +2001-08-23_23:00:00;16.6;70000 +2001-08-24_00:00:00;16.5;70000 +2001-08-24_01:00:00;16.2;70000 +2001-08-24_02:00:00;16.6;70000 +2001-08-24_03:00:00;16;70000 +2001-08-24_04:00:00;16.6;70000 +2001-08-24_05:00:00;16.7;70000 +2001-08-24_06:00:00;15.8;70000 +2001-08-24_07:00:00;14.9;70000 +2001-08-24_08:00:00;14.6;70000 +2001-08-24_09:00:00;15.2;70000 +2001-08-24_10:00:00;15.7;70000 +2001-08-24_11:00:00;15.7;70000 +2001-08-24_12:00:00;15.8;70000 +2001-08-24_13:00:00;16.3;70000 +2001-08-24_14:00:00;17.2;70000 +2001-08-24_15:00:00;17.3;70000 +2001-08-24_16:00:00;17.4;70000 +2001-08-24_17:00:00;17.4;70000 +2001-08-24_18:00:00;15.8;70000 +2001-08-24_19:00:00;15.4;70000 +2001-08-24_20:00:00;15.3;70000 +2001-08-24_21:00:00;15;70000 +2001-08-24_22:00:00;15.5;70000 +2001-08-24_23:00:00;15.5;70000 +2001-08-25_00:00:00;15.5;70000 +2001-08-25_01:00:00;15.3;70000 +2001-08-25_02:00:00;15.3;70000 +2001-08-25_03:00:00;15.3;70000 +2001-08-25_04:00:00;15.3;70000 +2001-08-25_05:00:00;15.3;70000 +2001-08-25_06:00:00;15.3;70203 +2001-08-25_07:00:00;15.3;70203 +2001-08-25_08:00:00;15.6;70000 +2001-08-25_09:00:00;15.7;70000 +2001-08-25_10:00:00;15.8;70000 +2001-08-25_11:00:00;15.8;70000 +2001-08-25_12:00:00;15.8;70000 +2001-08-25_13:00:00;15.9;70000 +2001-08-25_14:00:00;16.1;70000 +2001-08-25_15:00:00;16.2;70000 +2001-08-25_16:00:00;16.3;70000 +2001-08-25_17:00:00;16.2;70000 +2001-08-25_18:00:00;16.2;70000 +2001-08-25_19:00:00;16.3;70000 +2001-08-25_20:00:00;16.3;70000 +2001-08-25_21:00:00;16.3;70000 +2001-08-25_22:00:00;16.3;70000 +2001-08-25_23:00:00;16.1;70000 +2001-08-26_00:00:00;15.8;70000 +2001-08-26_01:00:00;16.4;70000 +2001-08-26_02:00:00;16.4;70000 +2001-08-26_03:00:00;16.5;70000 +2001-08-26_04:00:00;16.5;70000 +2001-08-26_05:00:00;16.2;70000 +2001-08-26_06:00:00;16.3;70000 +2001-08-26_07:00:00;16.5;70000 +2001-08-26_08:00:00;16.4;70000 +2001-08-26_09:00:00;16.6;70000 +2001-08-26_10:00:00;16.9;70000 +2001-08-26_11:00:00;17.1;70000 +2001-08-26_12:00:00;16.6;70000 +2001-08-26_13:00:00;16.2;70000 +2001-08-26_14:00:00;16.7;70000 +2001-08-26_15:00:00;17.3;70000 +2001-08-26_16:00:00;17.7;70000 +2001-08-26_17:00:00;16.4;70000 +2001-08-26_18:00:00;17.9;70000 +2001-08-26_19:00:00;18.1;70000 +2001-08-26_20:00:00;18.5;70000 +2001-08-26_21:00:00;19;70000 +2001-08-26_22:00:00;18.4;70000 +2001-08-26_23:00:00;18.4;70000 +2001-08-27_00:00:00;18.1;70000 +2001-08-27_01:00:00;17.8;70000 +2001-08-27_02:00:00;17.7;70000 +2001-08-27_03:00:00;17.6;70000 +2001-08-27_04:00:00;17.5;70000 +2001-08-27_05:00:00;17.5;70000 +2001-08-27_06:00:00;17.6;70000 +2001-08-27_07:00:00;17.1;70000 +2001-08-27_08:00:00;16.9;70000 +2001-08-27_09:00:00;16.7;70000 +2001-08-27_10:00:00;15;78947 +2001-08-27_11:00:00;15;78947 +2001-08-27_12:00:00;18.1;78947 +2001-08-27_13:00:00;18.1;78947 +2001-08-27_14:00:00;16.5;78947 +2001-08-27_15:00:00;16.9;78947 +2001-08-27_16:00:00;15.1;78947 +2001-08-27_17:00:00;15.2;78947 +2001-08-27_18:00:00;15.4;78947 +2001-08-27_19:00:00;14.9;78947 +2001-08-27_20:00:00;14.7;78947 +2001-08-27_21:00:00;14.3;78947 +2001-08-27_22:00:00;14.4;78947 +2001-08-27_23:00:00;14.1;78947 +2001-08-28_00:00:00;12.8;78947 +2001-08-28_01:00:00;12.7;78947 +2001-08-28_02:00:00;12.6;78947 +2001-08-28_03:00:00;12.4;78947 +2001-08-28_04:00:00;12.6;78947 +2001-08-28_05:00:00;11.9;78947 +2001-08-28_06:00:00;12.6;78947 +2001-08-28_07:00:00;12.7;78947 +2001-08-28_08:00:00;13.5;78947 +2001-08-28_09:00:00;12.8;78947 +2001-08-28_10:00:00;12.9;78947 +2001-08-28_11:00:00;13.2;78947 +2001-08-28_12:00:00;14.4;78947 +2001-08-28_13:00:00;13;78947 +2001-08-28_14:00:00;14.1;78947 +2001-08-28_15:00:00;13.2;78947 +2001-08-28_16:00:00;13;78947 +2001-08-28_17:00:00;12.7;78947 +2001-08-28_18:00:00;12.8;78947 +2001-08-28_19:00:00;13;78947 +2001-08-28_20:00:00;13.1;78947 +2001-08-28_21:00:00;12.9;78947 +2001-08-28_22:00:00;13;78947 +2001-08-28_23:00:00;13.4;78947 +2001-08-29_00:00:00;14.1;78947 +2001-08-29_01:00:00;13.3;78947 +2001-08-29_02:00:00;13.6;78947 +2001-08-29_03:00:00;13.8;78947 +2001-08-29_04:00:00;13.8;78947 +2001-08-29_05:00:00;13.9;78947 +2001-08-29_06:00:00;15.7;70000 +2001-08-29_07:00:00;15.9;70000 +2001-08-29_08:00:00;16.1;70000 +2001-08-29_09:00:00;16.1;70000 +2001-08-29_10:00:00;15.8;70000 +2001-08-29_11:00:00;15.4;70000 +2001-08-29_12:00:00;15.3;70000 +2001-08-29_13:00:00;15.7;70000 +2001-08-29_14:00:00;15.8;70000 +2001-08-29_15:00:00;15;70000 +2001-08-29_16:00:00;14.8;70000 +2001-08-29_17:00:00;14.5;70000 +2001-08-29_18:00:00;13.2;70000 +2001-08-29_19:00:00;13.6;70000 +2001-08-29_20:00:00;13.4;70000 +2001-08-29_21:00:00;13.3;70000 +2001-08-29_22:00:00;13.5;70000 +2001-08-29_23:00:00;13.2;70000 +2001-08-30_00:00:00;13.5;70000 +2001-08-30_01:00:00;13.3;70000 +2001-08-30_02:00:00;13.2;70000 +2001-08-30_03:00:00;13.6;70000 +2001-08-30_04:00:00;13.5;70000 +2001-08-30_05:00:00;13.5;70000 +2001-08-30_06:00:00;13.5;70000 +2001-08-30_07:00:00;13.6;70000 +2001-08-30_08:00:00;13.5;70000 +2001-08-30_09:00:00;13.5;70000 +2001-08-30_10:00:00;13.1;70000 +2001-08-30_11:00:00;13.2;70000 +2001-08-30_12:00:00;13.2;70000 +2001-08-30_13:00:00;13.4;70000 +2001-08-30_14:00:00;13.2;70000 +2001-08-30_15:00:00;13;70000 +2001-08-30_16:00:00;12.6;70000 +2001-08-30_17:00:00;12.8;70000 +2001-08-30_18:00:00;13.3;70000 +2001-08-30_19:00:00;14;70000 +2001-08-30_20:00:00;14.7;70000 +2001-08-30_21:00:00;14.8;70000 +2001-08-30_22:00:00;14.7;70000 +2001-08-30_23:00:00;14.6;70000 +2001-08-31_00:00:00;14.7;70000 +2001-08-31_01:00:00;14.6;70000 +2001-08-31_02:00:00;14.3;70000 +2001-08-31_03:00:00;14.4;70000 +2001-08-31_04:00:00;14;70000 +2001-08-31_05:00:00;14.2;70000 +2001-08-31_06:00:00;13.6;70000 +2001-08-31_07:00:00;14.5;70000 +2001-08-31_08:00:00;14.6;70000 +2001-08-31_09:00:00;14.3;70000 +2001-08-31_10:00:00;14.2;70000 +2001-08-31_11:00:00;14.5;70000 +2001-08-31_12:00:00;14.3;70000 +2001-08-31_13:00:00;14.2;70000 +2001-08-31_14:00:00;14.3;70000 +2001-08-31_15:00:00;14.3;70000 +2001-08-31_16:00:00;14.2;70000 +2001-08-31_17:00:00;14.3;70000 +2001-08-31_18:00:00;14.2;70000 +2001-08-31_19:00:00;14.1;70000 +2001-08-31_20:00:00;14.2;70000 +2001-08-31_21:00:00;14.3;70000 +2001-08-31_22:00:00;14.2;70000 +2001-08-31_23:00:00;14.3;70000 +2001-09-01_00:00:00;14.6;70000 +2001-09-01_01:00:00;14.8;70000 +2001-09-01_02:00:00;10.5;78947 +2001-09-01_03:00:00;9.7;78947 +2001-09-01_04:00:00;9.2;78947 +2001-09-01_05:00:00;8.9;78947 +2001-09-01_06:00:00;14.9;70000 +2001-09-01_07:00:00;14.8;70000 +2001-09-01_08:00:00;14.8;70000 +2001-09-01_09:00:00;14.6;70000 +2001-09-01_10:00:00;14.7;70000 +2001-09-01_11:00:00;14.9;70000 +2001-09-01_12:00:00;14.8;70000 +2001-09-01_13:00:00;14.4;70000 +2001-09-01_14:00:00;14.1;70000 +2001-09-01_15:00:00;14.1;70000 +2001-09-01_16:00:00;14.3;70000 +2001-09-01_17:00:00;14.3;70000 +2001-09-01_18:00:00;14.2;70000 +2001-09-01_19:00:00;14.1;70000 +2001-09-01_20:00:00;13.9;70000 +2001-09-01_21:00:00;13.9;70000 +2001-09-01_22:00:00;13.8;70000 +2001-09-01_23:00:00;13.6;70000 +2001-09-02_00:00:00;13.6;70000 +2001-09-02_01:00:00;13.3;70000 +2001-09-02_02:00:00;13.2;70000 +2001-09-02_03:00:00;13.2;70000 +2001-09-02_04:00:00;13.1;70000 +2001-09-02_05:00:00;13.1;70000 +2001-09-02_06:00:00;13.2;70000 +2001-09-02_07:00:00;13;70000 +2001-09-02_08:00:00;13.1;70000 +2001-09-02_09:00:00;13.4;70000 +2001-09-02_10:00:00;13.6;70000 +2001-09-02_11:00:00;13.7;70000 +2001-09-02_12:00:00;13.8;70000 +2001-09-02_13:00:00;13.9;70000 +2001-09-02_14:00:00;13.5;70000 +2001-09-02_15:00:00;13.6;70000 +2001-09-02_16:00:00;14;70000 +2001-09-02_17:00:00;13.6;70000 +2001-09-02_18:00:00;13.6;70000 +2001-09-02_19:00:00;13.9;70000 +2001-09-02_20:00:00;14.3;70000 +2001-09-02_21:00:00;14.6;70000 +2001-09-02_22:00:00;14.6;70000 +2001-09-02_23:00:00;14.9;70000 +2001-09-03_00:00:00;14.5;70000 +2001-09-03_01:00:00;14.6;70000 +2001-09-03_02:00:00;14.8;70000 +2001-09-03_03:00:00;14.7;70000 +2001-09-03_04:00:00;14.8;70000 +2001-09-03_05:00:00;14.9;70000 +2001-09-03_06:00:00;15.1;70000 +2001-09-03_07:00:00;15.1;70000 +2001-09-03_08:00:00;15.2;70000 +2001-09-03_09:00:00;15.5;70000 +2001-09-03_10:00:00;15.8;70000 +2001-09-03_11:00:00;15.8;70000 +2001-09-03_12:00:00;15.9;70000 +2001-09-03_13:00:00;16.2;70000 +2001-09-03_14:00:00;16.2;70000 +2001-09-03_15:00:00;16.2;70000 +2001-09-03_16:00:00;16.1;70000 +2001-09-03_17:00:00;16.1;70000 +2001-09-03_18:00:00;16.2;70000 +2001-09-03_19:00:00;16.3;70000 +2001-09-03_20:00:00;16.4;70000 +2001-09-03_21:00:00;16.4;70000 +2001-09-03_22:00:00;16.1;70000 +2001-09-03_23:00:00;16.2;70000 +2001-09-04_00:00:00;15.9;70000 +2001-09-04_01:00:00;15.6;70000 +2001-09-04_02:00:00;15.3;70000 +2001-09-04_03:00:00;15.3;70000 +2001-09-04_04:00:00;15.6;70000 +2001-09-04_05:00:00;15.6;70000 +2001-09-04_06:00:00;15.4;70000 +2001-09-04_07:00:00;15.4;70000 +2001-09-04_08:00:00;15.3;70000 +2001-09-04_09:00:00;15.4;70000 +2001-09-04_10:00:00;15.5;70000 +2001-09-04_11:00:00;15.8;70000 +2001-09-04_12:00:00;16;70000 +2001-09-04_13:00:00;15.3;70000 +2001-09-04_14:00:00;15.3;70000 +2001-09-04_15:00:00;15.3;70000 +2001-09-04_16:00:00;15.3;70000 +2001-09-04_17:00:00;15.3;70000 +2001-09-04_18:00:00;15.3;70203 +2001-09-04_19:00:00;15.3;70203 +2001-09-04_20:00:00;15;70000 +2001-09-04_21:00:00;15.4;70000 +2001-09-04_22:00:00;15.4;70000 +2001-09-04_23:00:00;15.6;70000 +2001-09-05_00:00:00;15.6;70000 +2001-09-05_01:00:00;15.4;70000 +2001-09-05_02:00:00;15.7;70000 +2001-09-05_03:00:00;15.8;70000 +2001-09-05_04:00:00;15.9;70000 +2001-09-05_05:00:00;15.9;70000 +2001-09-05_06:00:00;16;70000 +2001-09-05_07:00:00;16;70000 +2001-09-05_08:00:00;16.1;70000 +2001-09-05_09:00:00;16.2;70000 +2001-09-05_10:00:00;15.9;70000 +2001-09-05_11:00:00;15.9;70000 +2001-09-05_12:00:00;15.7;70000 +2001-09-05_13:00:00;15.5;70000 +2001-09-05_14:00:00;15;70000 +2001-09-05_15:00:00;14.7;70000 +2001-09-05_16:00:00;14.9;70000 +2001-09-05_17:00:00;14.8;70000 +2001-09-05_18:00:00;14.6;70000 +2001-09-05_19:00:00;14.5;70000 +2001-09-05_20:00:00;14.6;70000 +2001-09-05_21:00:00;14.8;70000 +2001-09-05_22:00:00;14.3;70000 +2001-09-05_23:00:00;14.2;70000 +2001-09-06_00:00:00;14.2;70000 +2001-09-06_01:00:00;14.2;70000 +2001-09-06_02:00:00;14.1;70000 +2001-09-06_03:00:00;14.1;70000 +2001-09-06_04:00:00;14.1;70000 +2001-09-06_05:00:00;14;70000 +2001-09-06_06:00:00;14.1;70000 +2001-09-06_07:00:00;14.3;70000 +2001-09-06_08:00:00;14.6;70000 +2001-09-06_09:00:00;14.9;70000 +2001-09-06_10:00:00;13.8;70000 +2001-09-06_11:00:00;14.3;70000 +2001-09-06_12:00:00;14.8;70000 +2001-09-06_13:00:00;14.9;70000 +2001-09-06_14:00:00;14.9;70000 +2001-09-06_15:00:00;14.9;70000 +2001-09-06_16:00:00;14.9;70000 +2001-09-06_17:00:00;14.8;70000 +2001-09-06_18:00:00;14.7;70000 +2001-09-06_19:00:00;14.6;70000 +2001-09-06_20:00:00;14.6;70000 +2001-09-06_21:00:00;14.5;70000 +2001-09-06_22:00:00;14.7;70000 +2001-09-06_23:00:00;14.7;70000 +2001-09-07_00:00:00;14.6;70000 +2001-09-07_01:00:00;14.6;70000 +2001-09-07_02:00:00;14.5;70000 +2001-09-07_03:00:00;14.3;70000 +2001-09-07_04:00:00;14.6;70000 +2001-09-07_05:00:00;14;70000 +2001-09-07_06:00:00;13.6;70000 +2001-09-07_07:00:00;13.7;70000 +2001-09-07_08:00:00;12.9;70000 +2001-09-07_09:00:00;13.6;70000 +2001-09-07_10:00:00;13.4;70000 +2001-09-07_11:00:00;13.2;70000 +2001-09-07_12:00:00;12.3;70000 +2001-09-07_13:00:00;13.6;70000 +2001-09-07_14:00:00;13.6;70000 +2001-09-07_15:00:00;13.3;70000 +2001-09-07_16:00:00;13.5;70000 +2001-09-07_17:00:00;13.6;70000 +2001-09-07_18:00:00;13.5;70000 +2001-09-07_19:00:00;13.4;70000 +2001-09-07_20:00:00;13.5;70000 +2001-09-07_21:00:00;13.1;70000 +2001-09-07_22:00:00;12.5;70000 +2001-09-07_23:00:00;12.5;70000 +2001-09-08_00:00:00;12.5;70000 +2001-09-08_01:00:00;12.8;70000 +2001-09-08_02:00:00;11.8;70000 +2001-09-08_03:00:00;12.5;70000 +2001-09-08_04:00:00;13.1;70000 +2001-09-08_05:00:00;13.2;70000 +2001-09-08_06:00:00;12.6;70000 +2001-09-08_07:00:00;12.9;70000 +2001-09-08_08:00:00;13.5;70000 +2001-09-08_09:00:00;13.5;70000 +2001-09-08_10:00:00;13.4;70000 +2001-09-08_11:00:00;13.6;70000 +2001-09-08_12:00:00;12.5;70000 +2001-09-08_13:00:00;13.2;70000 +2001-09-08_14:00:00;13.5;70000 +2001-09-08_15:00:00;13.6;70000 +2001-09-08_16:00:00;13.6;70000 +2001-09-08_17:00:00;13.6;70000 +2001-09-08_18:00:00;13.6;70000 +2001-09-08_19:00:00;13.6;70000 +2001-09-08_20:00:00;13.6;70203 +2001-09-08_21:00:00;13.7;70000 +2001-09-08_22:00:00;13.6;70000 +2001-09-08_23:00:00;13.6;70000 +2001-09-09_00:00:00;13.8;70000 +2001-09-09_01:00:00;13.6;70000 +2001-09-09_02:00:00;13.5;70000 +2001-09-09_03:00:00;13.6;70000 +2001-09-09_04:00:00;13.5;70000 +2001-09-09_05:00:00;12.3;70000 +2001-09-09_06:00:00;13.3;70000 +2001-09-09_07:00:00;13.2;70000 +2001-09-09_08:00:00;13.2;70000 +2001-09-09_09:00:00;13.3;70000 +2001-09-09_10:00:00;13.4;58927 +2001-09-09_11:00:00;13.5;70000 +2001-09-09_12:00:00;13.6;70000 +2001-09-09_13:00:00;13.6;70000 +2001-09-09_14:00:00;13.6;70000 +2001-09-09_15:00:00;13.6;70000 +2001-09-09_16:00:00;13.7;70000 +2001-09-09_17:00:00;13.8;70000 +2001-09-09_18:00:00;12.1;70000 +2001-09-09_19:00:00;13.3;70000 +2001-09-09_20:00:00;13.1;70000 +2001-09-09_21:00:00;12.9;70000 +2001-09-09_22:00:00;11.8;70000 +2001-09-09_23:00:00;11.6;70000 +2001-09-10_00:00:00;11.8;70000 +2001-09-10_01:00:00;11.5;70000 +2001-09-10_02:00:00;11.6;70000 +2001-09-10_03:00:00;11.9;70000 +2001-09-10_04:00:00;12.4;70000 +2001-09-10_05:00:00;12.5;70000 +2001-09-10_06:00:00;12.9;70000 +2001-09-10_07:00:00;12.8;70000 +2001-09-10_08:00:00;13;70000 +2001-09-10_09:00:00;14.6;70000 +2001-09-10_10:00:00;14.8;70000 +2001-09-10_11:00:00;15;70000 +2001-09-10_12:00:00;15.1;70000 +2001-09-10_13:00:00;15.3;70000 +2001-09-10_14:00:00;15.3;70000 +2001-09-10_15:00:00;15.2;70000 +2001-09-10_16:00:00;15.6;70000 +2001-09-10_17:00:00;15.4;70000 +2001-09-10_18:00:00;15.4;70000 +2001-09-10_19:00:00;15;70000 +2001-09-10_20:00:00;15.6;70000 +2001-09-10_21:00:00;16.4;70000 +2001-09-10_22:00:00;16.6;70000 +2001-09-10_23:00:00;16.4;70000 +2001-09-11_00:00:00;16.3;70000 +2001-09-11_01:00:00;16.1;70000 +2001-09-11_02:00:00;15.9;70000 +2001-09-11_03:00:00;15.9;70000 +2001-09-11_04:00:00;15.9;70000 +2001-09-11_05:00:00;15.6;70000 +2001-09-11_06:00:00;15.6;70000 +2001-09-11_07:00:00;15.4;70000 +2001-09-11_08:00:00;15.7;70000 +2001-09-11_09:00:00;15.7;70000 +2001-09-11_10:00:00;15.6;70000 +2001-09-11_11:00:00;15.3;70000 +2001-09-11_12:00:00;15.2;70000 +2001-09-11_13:00:00;15.2;70000 +2001-09-11_14:00:00;15.2;70000 +2001-09-11_15:00:00;15.3;70000 +2001-09-11_16:00:00;15.4;70000 +2001-09-11_17:00:00;15.5;70000 +2001-09-11_18:00:00;15.4;70000 +2001-09-11_19:00:00;15.1;70000 +2001-09-11_20:00:00;15;70000 +2001-09-11_21:00:00;14.9;70000 +2001-09-11_22:00:00;14.9;70000 +2001-09-11_23:00:00;15;70000 +2001-09-12_00:00:00;15.1;70000 +2001-09-12_01:00:00;14.9;70000 +2001-09-12_02:00:00;14.3;70000 +2001-09-12_03:00:00;13.6;70000 +2001-09-12_04:00:00;14.2;70000 +2001-09-12_05:00:00;14.3;70000 +2001-09-12_06:00:00;15;70000 +2001-09-12_07:00:00;14.9;70000 +2001-09-12_08:00:00;15.1;70000 +2001-09-12_09:00:00;14.9;70000 +2001-09-12_10:00:00;14.8;70000 +2001-09-12_11:00:00;14.8;70000 +2001-09-12_12:00:00;13.6;70000 +2001-09-12_13:00:00;13.3;70000 +2001-09-12_14:00:00;13.6;70000 +2001-09-12_15:00:00;14.1;70000 +2001-09-12_16:00:00;15;70000 +2001-09-12_17:00:00;15.2;70000 +2001-09-12_18:00:00;14.2;70000 +2001-09-12_19:00:00;14.2;70000 +2001-09-12_20:00:00;14.1;70000 +2001-09-12_21:00:00;14.2;70000 +2001-09-12_22:00:00;14.2;70000 +2001-09-12_23:00:00;14.2;70000 +2001-09-13_00:00:00;14.3;70000 +2001-09-13_01:00:00;14.4;70000 +2001-09-13_02:00:00;14.2;70000 +2001-09-13_03:00:00;14.3;70000 +2001-09-13_04:00:00;14.2;70000 +2001-09-13_05:00:00;13.9;70000 +2001-09-13_06:00:00;13.9;70000 +2001-09-13_07:00:00;13.9;70000 +2001-09-13_08:00:00;14.2;70000 +2001-09-13_09:00:00;14.2;70000 +2001-09-13_10:00:00;14.1;70000 +2001-09-13_11:00:00;13.8;70000 +2001-09-13_12:00:00;13.5;70000 +2001-09-13_13:00:00;14.2;70000 +2001-09-13_14:00:00;14;70000 +2001-09-13_15:00:00;14.1;70000 +2001-09-13_16:00:00;13.6;70000 +2001-09-13_17:00:00;13.9;70000 +2001-09-13_18:00:00;13.5;70000 +2001-09-13_19:00:00;13.9;70000 +2001-09-13_20:00:00;14.1;70000 +2001-09-13_21:00:00;14.1;70000 +2001-09-13_22:00:00;13.9;70000 +2001-09-13_23:00:00;13.9;70000 +2001-09-14_00:00:00;13.9;70000 +2001-09-14_01:00:00;14;70000 +2001-09-14_02:00:00;14;70000 +2001-09-14_03:00:00;13.9;70000 +2001-09-14_04:00:00;14.1;70000 +2001-09-14_05:00:00;14.1;70000 +2001-09-14_06:00:00;14.1;70000 +2001-09-14_07:00:00;13.9;70000 +2001-09-14_08:00:00;13.8;70000 +2001-09-14_09:00:00;13.3;70000 +2001-09-14_10:00:00;13.2;70000 +2001-09-14_11:00:00;13.2;70000 +2001-09-14_12:00:00;12.9;70000 +2001-09-14_13:00:00;12.9;70000 +2001-09-14_14:00:00;12.9;70000 +2001-09-14_15:00:00;12.9;70000 +2001-09-14_16:00:00;12.7;70000 +2001-09-14_17:00:00;12.8;70000 +2001-09-14_18:00:00;12.5;70000 +2001-09-14_19:00:00;12.6;70000 +2001-09-14_20:00:00;12.6;70000 +2001-09-14_21:00:00;12;70000 +2001-09-14_22:00:00;11.8;70000 +2001-09-14_23:00:00;12.9;70000 +2001-09-15_00:00:00;13.3;70000 +2001-09-15_01:00:00;12.8;70000 +2001-09-15_02:00:00;11.9;70000 +2001-09-15_03:00:00;12.6;70000 +2001-09-15_04:00:00;13.2;70000 +2001-09-15_05:00:00;13.2;70000 +2001-09-15_06:00:00;12.9;70000 +2001-09-15_07:00:00;12.8;70000 +2001-09-15_08:00:00;12.5;70000 +2001-09-15_09:00:00;11.4;70000 +2001-09-15_10:00:00;12.7;70000 +2001-09-15_11:00:00;11.1;70000 +2001-09-15_12:00:00;12.2;70000 +2001-09-15_13:00:00;12.1;70000 +2001-09-15_14:00:00;12.1;70000 +2001-09-15_15:00:00;12.2;70000 +2001-09-15_16:00:00;12.2;70000 +2001-09-15_17:00:00;12.2;70000 +2001-09-15_18:00:00;12.2;70000 +2001-09-15_19:00:00;12.2;70000 +2001-09-15_20:00:00;12.1;70000 +2001-09-15_21:00:00;12.1;70000 +2001-09-15_22:00:00;12.1;70000 +2001-09-15_23:00:00;12.2;70000 +2001-09-16_00:00:00;11.2;70000 +2001-09-16_01:00:00;11.6;70000 +2001-09-16_02:00:00;11.1;70000 +2001-09-16_03:00:00;11.5;70000 +2001-09-16_04:00:00;11.3;70000 +2001-09-16_05:00:00;11.5;70000 +2001-09-16_06:00:00;11.2;70000 +2001-09-16_07:00:00;11.5;70000 +2001-09-16_08:00:00;11.2;70000 +2001-09-16_09:00:00;11.4;70000 +2001-09-16_10:00:00;11.3;70000 +2001-09-16_11:00:00;11.5;70000 +2001-09-16_12:00:00;11.7;70000 +2001-09-16_13:00:00;11.8;70000 +2001-09-16_14:00:00;11.8;70000 +2001-09-16_15:00:00;11.9;70000 +2001-09-16_16:00:00;12.1;70000 +2001-09-16_17:00:00;12.1;70000 +2001-09-16_18:00:00;11.5;70000 +2001-09-16_19:00:00;11.5;70000 +2001-09-16_20:00:00;11.7;70000 +2001-09-16_21:00:00;11.8;70000 +2001-09-16_22:00:00;11.7;70000 +2001-09-16_23:00:00;11.9;70000 +2001-09-17_00:00:00;12;70000 +2001-09-17_01:00:00;11.6;70000 +2001-09-17_02:00:00;11.8;70000 +2001-09-17_03:00:00;11.9;70000 +2001-09-17_04:00:00;12;70000 +2001-09-17_05:00:00;12;70000 +2001-09-17_06:00:00;12.3;70000 +2001-09-17_07:00:00;12.5;70000 +2001-09-17_08:00:00;12.4;70000 +2001-09-17_09:00:00;12.6;70000 +2001-09-17_10:00:00;13.2;70000 +2001-09-17_11:00:00;13.1;70000 +2001-09-17_12:00:00;13.3;70000 +2001-09-17_13:00:00;12.8;70000 +2001-09-17_14:00:00;12.6;70000 +2001-09-17_15:00:00;12.9;70000 +2001-09-17_16:00:00;12.9;70000 +2001-09-17_17:00:00;14;70000 +2001-09-17_18:00:00;14.1;70000 +2001-09-17_19:00:00;14.1;70000 +2001-09-17_20:00:00;14.2;70000 +2001-09-17_21:00:00;14.2;70000 +2001-09-17_22:00:00;14.5;70000 +2001-09-17_23:00:00;14.2;70000 +2001-09-18_00:00:00;14.3;70000 +2001-09-18_01:00:00;14.2;70000 +2001-09-18_02:00:00;14.5;70000 +2001-09-18_03:00:00;14.3;70000 +2001-09-18_04:00:00;12.8;70000 +2001-09-18_05:00:00;12.2;70000 +2001-09-18_06:00:00;12.7;70000 +2001-09-18_07:00:00;12.8;70000 +2001-09-18_08:00:00;12.8;70000 +2001-09-18_09:00:00;13.2;70000 +2001-09-18_10:00:00;13.7;70000 +2001-09-18_11:00:00;14.3;70000 +2001-09-18_12:00:00;14.3;70000 +2001-09-18_13:00:00;14.5;70000 +2001-09-18_14:00:00;14.4;70000 +2001-09-18_15:00:00;14.5;70000 +2001-09-18_16:00:00;14.5;70000 +2001-09-18_17:00:00;14.6;70000 +2001-09-18_18:00:00;14.5;70000 +2001-09-18_19:00:00;14.2;70000 +2001-09-18_20:00:00;13.3;70000 +2001-09-18_21:00:00;13.1;70000 +2001-09-18_22:00:00;12.8;70000 +2001-09-18_23:00:00;12.7;70000 +2001-09-19_00:00:00;12.6;70000 +2001-09-19_01:00:00;12.4;70000 +2001-09-19_02:00:00;12.1;70000 +2001-09-19_03:00:00;11.9;70000 +2001-09-19_04:00:00;12.1;70000 +2001-09-19_05:00:00;11;78947 +2001-09-19_06:00:00;11.6;78947 +2001-09-19_07:00:00;11.7;78947 +2001-09-19_09:00:00;13.1;78947 +2001-09-19_10:00:00;13.5;78947 +2001-09-19_11:00:00;13.7;78947 +2001-09-19_12:00:00;12.2;78947 +2001-09-19_13:00:00;12.5;78947 +2001-09-19_14:00:00;12.2;78947 +2001-09-19_15:00:00;11.9;78947 +2001-09-19_16:00:00;11.8;78947 +2001-09-19_17:00:00;11.8;78947 +2001-09-19_18:00:00;11.6;78947 +2001-09-19_19:00:00;11.7;78947 +2001-09-19_20:00:00;11.8;78947 +2001-09-19_21:00:00;11.7;78947 +2001-09-19_22:00:00;11.7;78947 +2001-09-19_23:00:00;11.5;78947 +2001-09-20_00:00:00;11.4;78947 +2001-09-20_01:00:00;11.7;78947 +2001-09-20_02:00:00;12.2;78947 +2001-09-20_03:00:00;12.7;78947 +2001-09-20_04:00:00;12.7;78947 +2001-09-20_06:00:00;12.4;78947 +2001-09-20_07:00:00;12.5;78947 +2001-09-20_08:00:00;12.9;78947 +2001-09-20_09:00:00;13.2;78947 +2001-09-20_10:00:00;13.2;78947 +2001-09-20_11:00:00;13.4;78947 +2001-09-20_12:00:00;13.9;78947 +2001-09-20_13:00:00;14.4;78947 +2001-09-20_14:00:00;14.4;78947 +2001-09-20_15:00:00;14;78947 +2001-09-20_16:00:00;13.3;78947 +2001-09-20_17:00:00;12.7;78947 +2001-09-20_18:00:00;11.7;78947 +2001-09-20_19:00:00;11.6;78947 +2001-09-20_20:00:00;11.3;78947 +2001-09-20_21:00:00;11.3;78947 +2001-09-20_22:00:00;11.2;78947 +2001-09-20_23:00:00;11.3;78947 +2001-09-21_00:00:00;10.7;78947 +2001-09-21_01:00:00;10.8;78947 +2001-09-21_02:00:00;10.8;78947 +2001-09-21_03:00:00;11.3;78947 +2001-09-21_04:00:00;11.4;78947 +2001-09-21_05:00:00;11.3;78947 +2001-09-21_06:00:00;11.5;78947 +2001-09-21_07:00:00;11.9;78947 +2001-09-21_09:00:00;13.2;78947 +2001-09-21_10:00:00;13.3;78947 +2001-09-21_11:00:00;12.9;78947 +2001-09-21_12:00:00;13.3;78947 +2001-09-21_13:00:00;13.6;78947 +2001-09-21_14:00:00;12.7;78947 +2001-09-21_15:00:00;11.7;78947 +2001-09-21_16:00:00;12;78947 +2001-09-21_17:00:00;11.7;78947 +2001-09-21_18:00:00;11.1;78947 +2001-09-21_19:00:00;11.3;78947 +2001-09-21_20:00:00;11.4;78947 +2001-09-21_21:00:00;11.4;78947 +2001-09-21_22:00:00;10.8;78947 +2001-09-21_23:00:00;10.9;78947 +2001-09-22_00:00:00;10.9;78947 +2001-09-22_01:00:00;11.2;78947 +2001-09-22_02:00:00;11.2;78947 +2001-09-22_03:00:00;11.1;78947 +2001-09-22_04:00:00;11.2;78947 +2001-09-22_05:00:00;11.1;78947 +2001-09-22_06:00:00;10.6;78947 +2001-09-22_07:00:00;10.7;78947 +2001-09-22_09:00:00;11.3;78947 +2001-09-22_10:00:00;12.3;78947 +2001-09-22_11:00:00;12.9;78947 +2001-09-22_12:00:00;11.7;78947 +2001-09-22_13:00:00;11.8;78947 +2001-09-22_14:00:00;11.6;78947 +2001-09-22_15:00:00;11.7;78947 +2001-09-22_16:00:00;11.6;78947 +2001-09-22_17:00:00;11.6;78947 +2001-09-22_18:00:00;11.2;78947 +2001-09-22_19:00:00;11.1;78947 +2001-09-22_20:00:00;11;78947 +2001-09-22_21:00:00;10.8;78947 +2001-09-22_22:00:00;10.7;78947 +2001-09-22_23:00:00;10.8;78947 +2001-09-23_00:00:00;10.7;78947 +2001-09-23_01:00:00;10.8;78947 +2001-09-23_02:00:00;10.9;78947 +2001-09-23_03:00:00;10.8;78947 +2001-09-23_04:00:00;10.7;78947 +2001-09-23_05:00:00;10.9;78947 +2001-09-23_06:00:00;10.8;78947 +2001-09-23_07:00:00;11;78947 +2001-09-23_08:00:00;11.1;78947 +2001-09-23_09:00:00;11.7;78947 +2001-09-23_10:00:00;12.2;78947 +2001-09-23_11:00:00;12.6;78947 +2001-09-23_12:00:00;11.6;78947 +2001-09-23_13:00:00;12.6;78947 +2001-09-23_14:00:00;12.8;78947 +2001-09-23_15:00:00;12.7;78947 +2001-09-23_16:00:00;12.1;78947 +2001-09-23_17:00:00;11.6;78947 +2001-09-23_18:00:00;10.5;78947 +2001-09-23_19:00:00;10.6;78947 +2001-09-23_20:00:00;11.2;78947 +2001-09-23_21:00:00;11.6;78947 +2001-09-23_22:00:00;11.8;78947 +2001-09-23_23:00:00;12;78947 +2001-09-24_00:00:00;11.8;78947 +2001-09-24_01:00:00;12.1;78947 +2001-09-24_02:00:00;12.2;78947 +2001-09-24_03:00:00;12.4;78947 +2001-09-24_04:00:00;12.5;78947 +2001-09-24_05:00:00;12.7;78947 +2001-09-24_06:00:00;12.5;78947 +2001-09-24_07:00:00;12.8;78947 +2001-09-24_08:00:00;13.4;78947 +2001-09-24_09:00:00;13.6;78947 +2001-09-24_10:00:00;13.9;78947 +2001-09-24_11:00:00;15;78947 +2001-09-24_12:00:00;14.7;78947 +2001-09-24_13:00:00;14.6;78947 +2001-09-24_14:00:00;14.2;78947 +2001-09-24_15:00:00;13.8;78947 +2001-09-24_16:00:00;13.7;78947 +2001-09-24_17:00:00;13.5;78947 +2001-09-24_18:00:00;13.3;78947 +2001-09-24_19:00:00;13.1;78947 +2001-09-24_20:00:00;13;78947 +2001-09-24_21:00:00;12.9;78947 +2001-09-24_22:00:00;12.9;78947 +2001-09-24_23:00:00;12.8;78947 +2001-09-25_00:00:00;12.4;78947 +2001-09-25_01:00:00;12.4;78947 +2001-09-25_02:00:00;12.4;78947 +2001-09-25_03:00:00;12.4;78947 +2001-09-25_04:00:00;12.4;78947 +2001-09-25_05:00:00;12.5;78947 +2001-09-25_06:00:00;13.1;78947 +2001-09-25_07:00:00;13.3;78947 +2001-09-25_08:00:00;13.7;78947 +2001-09-25_09:00:00;13.8;78947 +2001-09-25_10:00:00;13.9;78947 +2001-09-25_12:00:00;15.2;78947 +2001-09-25_13:00:00;15.5;78947 +2001-09-25_14:00:00;15.8;78947 +2001-09-25_15:00:00;15.2;78947 +2001-09-25_16:00:00;14.5;78947 +2001-09-25_17:00:00;14;78947 +2001-09-25_18:00:00;14.9;78947 +2001-09-25_19:00:00;14.7;78947 +2001-09-25_20:00:00;14.3;78947 +2001-09-25_21:00:00;13.3;78947 +2001-09-25_22:00:00;13.3;78947 +2001-09-25_23:00:00;13.5;78947 +2001-09-26_00:00:00;13.1;78947 +2001-09-26_01:00:00;13.2;78947 +2001-09-26_02:00:00;13.3;78947 +2001-09-26_03:00:00;13.3;78947 +2001-09-26_04:00:00;13.4;78947 +2001-09-26_05:00:00;13.5;78947 +2001-09-26_06:00:00;13.7;78947 +2001-09-26_07:00:00;13.4;78947 +2001-09-26_08:00:00;13.2;78947 +2001-09-26_09:00:00;12.9;78947 +2001-09-26_10:00:00;12.5;78947 +2001-09-26_11:00:00;13.4;78947 +2001-09-26_12:00:00;13.3;78947 +2001-09-26_13:00:00;13.7;78947 +2001-09-26_14:00:00;13.5;78947 +2001-09-26_15:00:00;13;78947 +2001-09-26_16:00:00;12.5;78947 +2001-09-26_17:00:00;12.4;78947 +2001-09-26_18:00:00;12;78947 +2001-09-26_19:00:00;11.7;78947 +2001-09-26_20:00:00;11.5;78947 +2001-09-26_21:00:00;11.4;78947 +2001-09-26_22:00:00;11.3;78947 +2001-09-26_23:00:00;11.1;78947 +2001-09-27_00:00:00;10.8;78947 +2001-09-27_01:00:00;10.5;78947 +2001-09-27_02:00:00;10.1;78947 +2001-09-27_03:00:00;9.8;78947 +2001-09-27_04:00:00;9.7;78947 +2001-09-27_05:00:00;10.1;78947 +2001-09-27_06:00:00;9.5;78947 +2001-09-27_07:00:00;10.9;78947 +2001-09-27_09:00:00;12.5;78947 +2001-09-27_10:00:00;13.2;78947 +2001-09-27_11:00:00;13.9;78947 +2001-09-27_12:00:00;13.8;78947 +2001-09-27_13:00:00;13.7;78947 +2001-09-27_14:00:00;13.5;78947 +2001-09-27_15:00:00;12.9;78947 +2001-09-27_16:00:00;12.6;78947 +2001-09-27_17:00:00;12.4;78947 +2001-09-27_18:00:00;12.2;78947 +2001-09-27_19:00:00;12.3;78947 +2001-09-27_20:00:00;12.5;78947 +2001-09-27_21:00:00;12.7;78947 +2001-09-27_22:00:00;12.8;78947 +2001-09-27_23:00:00;12.9;78947 +2001-09-28_00:00:00;13;78947 +2001-09-28_01:00:00;13.1;78947 +2001-09-28_02:00:00;13.2;78947 +2001-09-28_03:00:00;13.3;78947 +2001-09-28_04:00:00;13.4;78947 +2001-09-28_05:00:00;13.4;78947 +2001-09-28_06:00:00;13.3;78947 +2001-09-28_07:00:00;13.3;78947 +2001-09-28_08:00:00;13.5;78947 +2001-09-28_09:00:00;14.1;78947 +2001-09-28_10:00:00;14.5;78947 +2001-09-28_11:00:00;14.2;78947 +2001-09-28_12:00:00;15.2;78947 +2001-09-28_13:00:00;14.6;78947 +2001-09-28_14:00:00;14.2;78947 +2001-09-28_15:00:00;14;78947 +2001-09-28_16:00:00;14.1;78947 +2001-09-28_17:00:00;14.1;78947 +2001-09-28_18:00:00;14.1;78947 +2001-09-28_19:00:00;14.1;78947 +2001-09-28_20:00:00;14;78947 +2001-09-28_21:00:00;13.9;78947 +2001-09-28_22:00:00;13.9;78947 +2001-09-28_23:00:00;13.8;78947 +2001-09-29_00:00:00;14.1;78947 +2001-09-29_01:00:00;14.1;78947 +2001-09-29_02:00:00;14;78947 +2001-09-29_03:00:00;13.7;78947 +2001-09-29_04:00:00;13.3;78947 +2001-09-29_05:00:00;13;78947 +2001-09-29_06:00:00;12.9;78947 +2001-09-29_07:00:00;13.1;78947 +2001-09-29_09:00:00;14.1;78947 +2001-09-29_10:00:00;14.7;78947 +2001-09-29_11:00:00;15.3;78947 +2001-09-29_12:00:00;14.1;78947 +2001-09-29_13:00:00;14.2;78947 +2001-09-29_14:00:00;14.6;78947 +2001-09-29_15:00:00;14.7;78947 +2001-09-29_16:00:00;14.5;78947 +2001-09-29_17:00:00;14.3;78947 +2001-09-29_18:00:00;14.1;78947 +2001-09-29_19:00:00;14.1;78947 +2001-09-29_20:00:00;14.1;78947 +2001-09-29_21:00:00;14.1;78947 +2001-09-29_22:00:00;14;78947 +2001-09-29_23:00:00;13.5;78947 +2001-09-30_00:00:00;13.2;78947 +2001-09-30_01:00:00;13;78947 +2001-09-30_02:00:00;12.9;78947 +2001-09-30_03:00:00;12.8;78947 +2001-09-30_04:00:00;12.7;78947 +2001-09-30_05:00:00;12.7;78947 +2001-09-30_06:00:00;12.9;78947 +2001-09-30_07:00:00;13;78947 +2001-09-30_09:00:00;13.9;78947 +2001-09-30_10:00:00;14.7;78947 +2001-09-30_11:00:00;15.4;78947 +2001-09-30_12:00:00;16.7;78947 +2001-09-30_13:00:00;16.7;78947 +2001-09-30_14:00:00;16.6;78947 +2001-09-30_15:00:00;16.4;78947 +2001-09-30_16:00:00;16.1;78947 +2001-09-30_17:00:00;15.3;78947 +2001-09-30_18:00:00;14.9;78947 +2001-09-30_19:00:00;14.8;78947 +2001-09-30_20:00:00;14.9;78947 +2001-09-30_21:00:00;14.8;78947 +2001-09-30_22:00:00;14.7;78947 +2001-09-30_23:00:00;14.7;78947 +2001-10-01_00:00:00;14.1;78947 +2001-10-01_01:00:00;14.2;78947 +2001-10-01_02:00:00;14.2;78947 +2001-10-01_03:00:00;14.1;78947 +2001-10-01_04:00:00;13.8;78947 +2001-10-01_05:00:00;13.6;78947 +2001-10-01_06:00:00;13.4;78947 +2001-10-01_07:00:00;13.8;78947 +2001-10-01_08:00:00;14.6;78947 +2001-10-01_09:00:00;15.5;78947 +2001-10-01_10:00:00;16.2;78947 +2001-10-01_11:00:00;16.7;78947 +2001-10-01_12:00:00;17.7;78947 +2001-10-01_13:00:00;18;78947 +2001-10-01_14:00:00;17.8;78947 +2001-10-01_15:00:00;17.2;78947 +2001-10-01_16:00:00;16.2;78947 +2001-10-01_17:00:00;15.3;78947 +2001-10-01_18:00:00;14.9;78947 +2001-10-01_19:00:00;14.8;78947 +2001-10-01_20:00:00;14.5;78947 +2001-10-01_21:00:00;14.2;78947 +2001-10-01_22:00:00;14.1;78947 +2001-10-01_23:00:00;14;78947 +2001-10-02_00:00:00;14.1;78947 +2001-10-02_01:00:00;13.9;78947 +2001-10-02_02:00:00;13.6;78947 +2001-10-02_03:00:00;13.6;78947 +2001-10-02_04:00:00;13.5;78947 +2001-10-02_05:00:00;13.5;78947 +2001-10-02_06:00:00;13.8;78947 +2001-10-02_07:00:00;13.9;78947 +2001-10-02_08:00:00;14;78947 +2001-10-02_09:00:00;14.4;78947 +2001-10-02_10:00:00;14.4;78947 +2001-10-02_11:00:00;14.1;78947 +2001-10-02_12:00:00;15;78947 +2001-10-02_13:00:00;14.7;78947 +2001-10-02_14:00:00;13.9;78947 +2001-10-02_15:00:00;13.4;78947 +2001-10-02_16:00:00;13;78947 +2001-10-02_17:00:00;12.7;78947 +2001-10-02_18:00:00;12.3;78947 +2001-10-02_19:00:00;12;78947 +2001-10-02_20:00:00;11.3;78947 +2001-10-02_21:00:00;10.6;78947 +2001-10-02_22:00:00;11.4;78947 +2001-10-02_23:00:00;11.1;78947 +2001-10-03_00:00:00;10.9;78947 +2001-10-03_01:00:00;11.3;78947 +2001-10-03_02:00:00;11.6;78947 +2001-10-03_03:00:00;11.8;78947 +2001-10-03_04:00:00;11.3;78947 +2001-10-03_05:00:00;11.7;78947 +2001-10-03_06:00:00;12.2;78947 +2001-10-03_07:00:00;12.8;78947 +2001-10-03_08:00:00;13.3;78947 +2001-10-03_09:00:00;13.1;78947 +2001-10-03_10:00:00;13.4;78947 +2001-10-03_11:00:00;13.7;78947 +2001-10-03_12:00:00;13.8;78947 +2001-10-03_13:00:00;13.9;78947 +2001-10-03_14:00:00;13.6;78947 +2001-10-03_15:00:00;13.9;78947 +2001-10-03_16:00:00;14.2;78947 +2001-10-03_17:00:00;14.3;78947 +2001-10-03_18:00:00;13.9;78947 +2001-10-03_19:00:00;14.1;78947 +2001-10-03_20:00:00;14.4;78947 +2001-10-03_21:00:00;12.9;78947 +2001-10-03_22:00:00;12.8;78947 +2001-10-03_23:00:00;13.1;78947 +2001-10-04_00:00:00;12.1;78947 +2001-10-04_01:00:00;12.3;78947 +2001-10-04_02:00:00;11.9;78947 +2001-10-04_03:00:00;11.5;78947 +2001-10-04_04:00:00;11.7;78947 +2001-10-04_05:00:00;12.3;78947 +2001-10-04_06:00:00;11;78947 +2001-10-04_07:00:00;11.1;78947 +2001-10-04_09:00:00;11.5;78947 +2001-10-04_10:00:00;11.6;78947 +2001-10-04_11:00:00;12;78947 +2001-10-04_12:00:00;11.6;78947 +2001-10-04_13:00:00;12.2;78947 +2001-10-04_14:00:00;13;78947 +2001-10-04_15:00:00;12;78947 +2001-10-04_16:00:00;12.4;78947 +2001-10-04_17:00:00;11.5;78947 +2001-10-04_18:00:00;11.1;78947 +2001-10-04_19:00:00;11.2;78947 +2001-10-04_20:00:00;11.8;78947 +2001-10-04_21:00:00;11.5;78947 +2001-10-04_22:00:00;11.4;78947 +2001-10-04_23:00:00;11.2;78947 +2001-10-05_00:00:00;10.5;78947 +2001-10-05_01:00:00;10.8;78947 +2001-10-05_02:00:00;10.7;78947 +2001-10-05_03:00:00;10.9;78947 +2001-10-05_04:00:00;11.1;78947 +2001-10-05_05:00:00;10.9;78947 +2001-10-05_06:00:00;10.8;78947 +2001-10-05_07:00:00;10.9;78947 +2001-10-05_09:00:00;12.2;78947 +2001-10-05_10:00:00;12.8;78947 +2001-10-05_11:00:00;12.6;78947 +2001-10-05_12:00:00;11.9;78947 +2001-10-05_13:00:00;12.3;78947 +2001-10-05_14:00:00;13;78947 +2001-10-05_15:00:00;13.4;78947 +2001-10-05_16:00:00;13.5;78947 +2001-10-05_17:00:00;13.6;78947 +2001-10-05_18:00:00;13.6;78947 +2001-10-05_19:00:00;13.7;78947 +2001-10-05_20:00:00;13.7;78947 +2001-10-05_21:00:00;13.6;78947 +2001-10-05_22:00:00;13.4;78947 +2001-10-05_23:00:00;13.3;78947 +2001-10-06_00:00:00;12.4;78947 +2001-10-06_01:00:00;12;78947 +2001-10-06_02:00:00;11.9;78947 +2001-10-06_03:00:00;11.9;78947 +2001-10-06_04:00:00;11.2;78947 +2001-10-06_05:00:00;11;78947 +2001-10-06_06:00:00;9.5;78947 +2001-10-06_07:00:00;9.7;78947 +2001-10-06_08:00:00;10.2;78947 +2001-10-06_09:00:00;10.9;78947 +2001-10-06_10:00:00;11.1;78947 +2001-10-06_11:00:00;11.3;78947 +2001-10-06_12:00:00;10.5;78947 +2001-10-06_13:00:00;9.8;78947 +2001-10-06_14:00:00;10;78947 +2001-10-06_15:00:00;10.1;78947 +2001-10-06_16:00:00;10.1;78947 +2001-10-06_17:00:00;9.9;78947 +2001-10-06_18:00:00;9.1;78947 +2001-10-06_19:00:00;8.8;78947 +2001-10-06_20:00:00;8.5;78947 +2001-10-06_21:00:00;9;78947 +2001-10-06_22:00:00;9;78947 +2001-10-06_23:00:00;9.1;78947 +2001-10-07_00:00:00;9.2;78947 +2001-10-07_01:00:00;9.6;78947 +2001-10-07_02:00:00;9.5;78947 +2001-10-07_03:00:00;9.9;78947 +2001-10-07_04:00:00;10.6;78947 +2001-10-07_05:00:00;10.1;78947 +2001-10-07_06:00:00;9.5;78947 +2001-10-07_07:00:00;9.4;78947 +2001-10-07_09:00:00;10.7;78947 +2001-10-07_10:00:00;10.7;78947 +2001-10-07_11:00:00;9.5;78947 +2001-10-07_12:00:00;9.9;78947 +2001-10-07_13:00:00;9.8;78947 +2001-10-07_14:00:00;10.5;78947 +2001-10-07_15:00:00;9.1;78947 +2001-10-07_16:00:00;9.5;78947 +2001-10-07_17:00:00;9.5;78947 +2001-10-07_18:00:00;8.1;78947 +2001-10-07_19:00:00;9.1;78947 +2001-10-07_20:00:00;9.3;78947 +2001-10-07_21:00:00;9.3;78947 +2001-10-07_22:00:00;9.2;78947 +2001-10-07_23:00:00;9.2;78947 +2001-10-08_00:00:00;7.8;78947 +2001-10-08_01:00:00;7.7;78947 +2001-10-08_02:00:00;7.9;78947 +2001-10-08_03:00:00;7.6;78947 +2001-10-08_04:00:00;7.8;78947 +2001-10-08_05:00:00;7.9;78947 +2001-10-08_06:00:00;7.1;78947 +2001-10-08_07:00:00;6.8;78947 +2001-10-08_08:00:00;7.1;78947 +2001-10-08_09:00:00;8.2;78947 +2001-10-08_10:00:00;9.3;78947 +2001-10-08_11:00:00;10;78947 +2001-10-08_12:00:00;9.8;78947 +2001-10-08_13:00:00;10.2;78947 +2001-10-08_14:00:00;10.2;78947 +2001-10-08_15:00:00;10;78947 +2001-10-08_16:00:00;9.6;78947 +2001-10-08_17:00:00;8.5;78947 +2001-10-08_18:00:00;5.9;78947 +2001-10-08_19:00:00;4.4;78947 +2001-10-08_20:00:00;3.5;78947 +2001-10-08_21:00:00;3.6;78947 +2001-10-08_22:00:00;4.6;78947 +2001-10-08_23:00:00;6.2;78947 +2001-10-09_00:00:00;8;78947 +2001-10-09_01:00:00;8.8;78947 +2001-10-09_02:00:00;8.9;78947 +2001-10-09_03:00:00;8.8;78947 +2001-10-09_04:00:00;8.9;78947 +2001-10-09_05:00:00;8.9;78947 +2001-10-09_06:00:00;9.6;78947 +2001-10-09_07:00:00;9.5;78947 +2001-10-09_08:00:00;9.1;78947 +2001-10-09_09:00:00;9.2;78947 +2001-10-09_10:00:00;9.4;78947 +2001-10-09_11:00:00;9.7;78947 +2001-10-09_12:00:00;9.7;78947 +2001-10-09_13:00:00;10.5;78947 +2001-10-09_14:00:00;11.2;78947 +2001-10-09_15:00:00;10.2;78947 +2001-10-09_16:00:00;9.9;78947 +2001-10-09_17:00:00;10.2;78947 +2001-10-09_18:00:00;10.8;78947 +2001-10-09_19:00:00;11.1;78947 +2001-10-09_20:00:00;10.9;78947 +2001-10-09_21:00:00;10.6;78947 +2001-10-09_22:00:00;10.4;78947 +2001-10-09_23:00:00;10.2;78947 +2001-10-10_00:00:00;9.8;78947 +2001-10-10_01:00:00;9.9;78947 +2001-10-10_02:00:00;9.9;78947 +2001-10-10_03:00:00;9.9;78947 +2001-10-10_04:00:00;10;78947 +2001-10-10_05:00:00;10.2;78947 +2001-10-10_06:00:00;10.2;78947 +2001-10-10_07:00:00;10.3;78947 +2001-10-10_08:00:00;10;78947 +2001-10-10_09:00:00;9.7;78947 +2001-10-10_10:00:00;10.7;78947 +2001-10-10_11:00:00;10.2;78947 +2001-10-10_12:00:00;10.5;78947 +2001-10-10_13:00:00;10.4;78947 +2001-10-10_14:00:00;10.9;78947 +2001-10-10_15:00:00;11;78947 +2001-10-10_16:00:00;10.4;78947 +2001-10-10_17:00:00;10.1;78947 +2001-10-10_18:00:00;9.5;78947 +2001-10-10_19:00:00;9.5;78947 +2001-10-10_20:00:00;9.7;78947 +2001-10-10_21:00:00;9.7;78947 +2001-10-10_22:00:00;9.8;78947 +2001-10-10_23:00:00;9.8;78947 +2001-10-11_00:00:00;9.6;78947 +2001-10-11_01:00:00;9.3;78947 +2001-10-11_02:00:00;9.7;78947 +2001-10-11_03:00:00;10;78947 +2001-10-11_04:00:00;9.8;78947 +2001-10-11_05:00:00;9.6;78947 +2001-10-11_06:00:00;9.2;78947 +2001-10-11_07:00:00;9.2;78947 +2001-10-11_08:00:00;9.4;78947 +2001-10-11_09:00:00;10.3;78947 +2001-10-11_10:00:00;9.6;78947 +2001-10-11_11:00:00;10.3;78947 +2001-10-11_12:00:00;10.9;78947 +2001-10-11_13:00:00;11;78947 +2001-10-11_14:00:00;10.7;78947 +2001-10-11_15:00:00;8.7;78947 +2001-10-11_16:00:00;8.7;78947 +2001-10-11_17:00:00;8.8;78947 +2001-10-11_18:00:00;8.2;78947 +2001-10-11_19:00:00;8.6;78947 +2001-10-11_20:00:00;8.7;78947 +2001-10-11_21:00:00;9.2;78947 +2001-10-11_22:00:00;9.3;78947 +2001-10-11_23:00:00;9.1;78947 +2001-10-12_00:00:00;9.3;78947 +2001-10-12_01:00:00;9.4;78947 +2001-10-12_02:00:00;9.6;78947 +2001-10-12_03:00:00;9.7;78947 +2001-10-12_04:00:00;9.6;78947 +2001-10-12_05:00:00;9.3;78947 +2001-10-12_06:00:00;9.1;78947 +2001-10-12_07:00:00;9.2;78947 +2001-10-12_08:00:00;9.3;78947 +2001-10-12_11:00:00;9.5;78947 +2001-10-12_12:00:00;10.6;78947 +2001-10-12_13:00:00;10.5;78947 +2001-10-12_14:00:00;11;78947 +2001-10-12_15:00:00;10.6;78947 +2001-10-12_16:00:00;10.1;78947 +2001-10-12_17:00:00;9.8;78947 +2001-10-12_18:00:00;8.7;78947 +2001-10-12_19:00:00;8.4;78947 +2001-10-12_20:00:00;8;78947 +2001-10-12_21:00:00;7.8;78947 +2001-10-12_22:00:00;7.3;78947 +2001-10-12_23:00:00;6.8;78947 +2001-10-13_00:00:00;6.4;78947 +2001-10-13_01:00:00;6.1;78947 +2001-10-13_02:00:00;5.5;78947 +2001-10-13_03:00:00;4.6;78947 +2001-10-13_04:00:00;3.5;78947 +2001-10-13_05:00:00;2.7;78947 +2001-10-13_06:00:00;2.2;78947 +2001-10-13_07:00:00;2;78947 +2001-10-13_09:00:00;6.4;78947 +2001-10-13_10:00:00;9.4;78947 +2001-10-13_11:00:00;10.5;78947 +2001-10-13_12:00:00;10.3;78947 +2001-10-13_13:00:00;10.4;78947 +2001-10-13_14:00:00;10.3;78947 +2001-10-13_15:00:00;9.9;78947 +2001-10-13_16:00:00;8.7;78947 +2001-10-13_17:00:00;6.7;78947 +2001-10-13_18:00:00;5.2;78947 +2001-10-13_19:00:00;4.6;78947 +2001-10-13_20:00:00;4.7;78947 +2001-10-13_21:00:00;4.7;78947 +2001-10-13_22:00:00;4.7;78947 +2001-10-13_23:00:00;5.4;78947 +2001-10-14_00:00:00;6.1;78947 +2001-10-14_01:00:00;6.7;78947 +2001-10-14_02:00:00;7.6;78947 +2001-10-14_03:00:00;8.1;78947 +2001-10-14_04:00:00;8.3;78947 +2001-10-14_06:00:00;9.5;78947 +2001-10-14_07:00:00;9.6;78947 +2001-10-14_09:00:00;9.9;78947 +2001-10-14_10:00:00;9.9;78947 +2001-10-14_11:00:00;10.3;78947 +2001-10-14_12:00:00;11.2;78947 +2001-10-14_13:00:00;11.1;78947 +2001-10-14_14:00:00;11.2;78947 +2001-10-14_15:00:00;11.3;78947 +2001-10-14_16:00:00;11.1;78947 +2001-10-14_17:00:00;10.4;78947 +2001-10-14_18:00:00;10.2;78947 +2001-10-14_19:00:00;10.3;78947 +2001-10-14_20:00:00;9.8;78947 +2001-10-14_21:00:00;9.6;78947 +2001-10-14_22:00:00;9.6;78947 +2001-10-14_23:00:00;9.8;78947 +2001-10-15_00:00:00;9.9;78947 +2001-10-15_01:00:00;10.3;78947 +2001-10-15_02:00:00;10.4;78947 +2001-10-15_03:00:00;10.5;78947 +2001-10-15_04:00:00;10.6;78947 +2001-10-15_05:00:00;10.8;78947 +2001-10-15_06:00:00;9.7;78947 +2001-10-15_07:00:00;9.9;78947 +2001-10-15_08:00:00;10.4;78947 +2001-10-15_09:00:00;10.6;78947 +2001-10-15_10:00:00;11;78947 +2001-10-15_11:00:00;11.1;78947 +2001-10-15_12:00:00;12.1;78947 +2001-10-15_13:00:00;11.1;78947 +2001-10-15_14:00:00;11.3;78947 +2001-10-15_15:00:00;11;78947 +2001-10-15_16:00:00;10.5;78947 +2001-10-15_17:00:00;10.3;78947 +2001-10-15_18:00:00;10.4;78947 +2001-10-15_19:00:00;10.1;78947 +2001-10-15_20:00:00;10.2;78947 +2001-10-15_21:00:00;10.1;78947 +2001-10-15_22:00:00;9.9;78947 +2001-10-15_23:00:00;10.1;78947 +2001-10-16_00:00:00;9.8;78947 +2001-10-16_01:00:00;9.8;78947 +2001-10-16_02:00:00;9.9;78947 +2001-10-16_03:00:00;9.9;78947 +2001-10-16_04:00:00;10.1;78947 +2001-10-16_05:00:00;10.3;78947 +2001-10-16_06:00:00;10.4;78947 +2001-10-16_07:00:00;10.4;78947 +2001-10-16_08:00:00;10.6;78947 +2001-10-16_09:00:00;11;78947 +2001-10-16_10:00:00;11.5;78947 +2001-10-16_11:00:00;11.7;78947 +2001-10-16_12:00:00;11.6;78947 +2001-10-16_13:00:00;11.5;78947 +2001-10-16_14:00:00;11.3;78947 +2001-10-16_15:00:00;11.1;78947 +2001-10-16_16:00:00;11.1;78947 +2001-10-16_17:00:00;11.1;78947 +2001-10-16_18:00:00;10.8;78947 +2001-10-16_19:00:00;11;78947 +2001-10-16_20:00:00;11.2;78947 +2001-10-16_21:00:00;11.3;78947 +2001-10-16_22:00:00;11.4;78947 +2001-10-16_23:00:00;11.4;78947 +2001-10-17_00:00:00;10.8;78947 +2001-10-17_01:00:00;10.5;78947 +2001-10-17_02:00:00;10.9;78947 +2001-10-17_03:00:00;11.1;78947 +2001-10-17_04:00:00;11.1;78947 +2001-10-17_05:00:00;10.7;78947 +2001-10-17_06:00:00;10.7;78947 +2001-10-17_07:00:00;10.5;78947 +2001-10-17_09:00:00;11.8;78947 +2001-10-17_10:00:00;11.5;78947 +2001-10-17_11:00:00;11.5;78947 +2001-10-17_12:00:00;11.4;78947 +2001-10-17_13:00:00;11.3;78947 +2001-10-17_14:00:00;11.1;78947 +2001-10-17_15:00:00;11;78947 +2001-10-17_16:00:00;10.8;78947 +2001-10-17_17:00:00;10.8;78947 +2001-10-17_18:00:00;10.5;78947 +2001-10-17_19:00:00;10.6;78947 +2001-10-17_20:00:00;10.7;78947 +2001-10-17_21:00:00;10.5;78947 +2001-10-17_22:00:00;10.6;78947 +2001-10-17_23:00:00;11.1;78947 +2001-10-18_00:00:00;8.5;78947 +2001-10-18_01:00:00;6.8;78947 +2001-10-18_02:00:00;7.5;78947 +2001-10-18_03:00:00;8.7;78947 +2001-10-18_04:00:00;8.6;78947 +2001-10-18_05:00:00;8.7;78947 +2001-10-18_06:00:00;8.5;78947 +2001-10-18_07:00:00;8.5;78947 +2001-10-18_09:00:00;9.5;78947 +2001-10-18_10:00:00;9.5;78947 +2001-10-18_11:00:00;9.9;78947 +2001-10-18_12:00:00;9.7;78947 +2001-10-18_13:00:00;9.1;78947 +2001-10-18_14:00:00;9.3;78947 +2001-10-18_15:00:00;9;78947 +2001-10-18_16:00:00;8.5;78947 +2001-10-18_17:00:00;8.3;78947 +2001-10-18_18:00:00;8.2;78947 +2001-10-18_19:00:00;8.3;78947 +2001-10-18_20:00:00;8.1;78947 +2001-10-18_21:00:00;7.6;78947 +2001-10-18_22:00:00;6.2;78947 +2001-10-18_23:00:00;6.7;78947 +2001-10-19_00:00:00;7.4;78947 +2001-10-19_01:00:00;7.1;78947 +2001-10-19_02:00:00;7.8;78947 +2001-10-19_03:00:00;8;78947 +2001-10-19_04:00:00;7.6;78947 +2001-10-19_05:00:00;6.3;78947 +2001-10-19_06:00:00;7.2;78947 +2001-10-19_07:00:00;8;78947 +2001-10-19_09:00:00;7.6;78947 +2001-10-19_10:00:00;7.6;78947 +2001-10-19_11:00:00;7.6;78947 +2001-10-19_12:00:00;6.9;78947 +2001-10-19_13:00:00;6.8;78947 +2001-10-19_14:00:00;6.6;78947 +2001-10-19_15:00:00;6.4;78947 +2001-10-19_16:00:00;6.3;78947 +2001-10-19_17:00:00;6.2;78947 +2001-10-19_18:00:00;6.9;78947 +2001-10-19_19:00:00;6.6;78947 +2001-10-19_20:00:00;6.4;78947 +2001-10-19_21:00:00;6;78947 +2001-10-19_22:00:00;5.9;78947 +2001-10-19_23:00:00;5.7;78947 +2001-10-20_00:00:00;6.1;78947 +2001-10-20_01:00:00;6.2;78947 +2001-10-20_02:00:00;7.1;78947 +2001-10-20_03:00:00;6.4;78947 +2001-10-20_04:00:00;6.3;78947 +2001-10-20_05:00:00;6.6;78947 +2001-10-20_06:00:00;7;78947 +2001-10-20_07:00:00;6.1;78947 +2001-10-20_09:00:00;5.6;78947 +2001-10-20_10:00:00;6.1;78947 +2001-10-20_11:00:00;6.6;78947 +2001-10-20_12:00:00;6.8;78947 +2001-10-20_13:00:00;6.4;78947 +2001-10-20_14:00:00;6.7;78947 +2001-10-20_15:00:00;6.2;78947 +2001-10-20_16:00:00;5.7;78947 +2001-10-20_17:00:00;5.2;78947 +2001-10-20_18:00:00;4;78947 +2001-10-20_19:00:00;3.1;78947 +2001-10-20_20:00:00;3.1;78947 +2001-10-20_21:00:00;3.1;78947 +2001-10-20_22:00:00;2.1;78947 +2001-10-20_23:00:00;2;78947 +2001-10-21_00:00:00;2.2;78947 +2001-10-21_01:00:00;3.7;78947 +2001-10-21_02:00:00;5.1;78947 +2001-10-21_03:00:00;6;78947 +2001-10-21_04:00:00;6.4;78947 +2001-10-21_05:00:00;6.7;78947 +2001-10-21_06:00:00;6.9;78947 +2001-10-21_07:00:00;7.3;78947 diff --git a/migrations/tests/files/histkvalobs/data/18700/18700_313_509_0_0.csv b/migrations/tests/files/histkvalobs/data/18700/18700_313_509_0_0.csv new file mode 100644 index 00000000..e74eebd9 --- /dev/null +++ b/migrations/tests/files/histkvalobs/data/18700/18700_313_509_0_0.csv @@ -0,0 +1,41 @@ +39 +Obstime,Original,Tbtime,Corrected,Controlinfo,Useinfo,Cfailed +2024-01-04T07:00:00Z,1171,2024-01-04T07:12:11.191105Z,1171,0100000000000000,7000000000000000, +2024-01-08T16:30:00Z,9528,2024-01-08T16:42:13.921859Z,9528,0100000000000000,7000000000000000, +2024-01-08T16:50:00Z,9376,2024-01-08T17:02:19.556872Z,9376,0100000000000000,7000000000000000, +2024-01-08T17:00:00Z,9301,2024-01-08T17:12:16.843635Z,9301,0100000000000000,7000000000000000, +2024-01-08T20:40:00Z,10889,2024-01-08T20:52:15.05597Z,10889,0100000000000000,7000000000000000, +2024-01-08T21:50:00Z,7367,2024-01-08T22:02:26.760424Z,7367,0100000000000000,7000000000000000, +2024-01-08T23:20:00Z,7908,2024-01-08T23:32:15.434304Z,7908,0100000000000000,7000000000000000, +2024-01-10T22:30:00Z,6543,2024-01-10T22:42:15.063955Z,6543,0100000000000000,7000000000000000, +2024-01-11T00:40:00Z,6010,2024-01-11T00:52:22.932791Z,6010,0100000000000000,7000000000000000, +2024-01-11T05:10:00Z,5268,2024-01-11T05:22:19.912004Z,5268,0100000000000000,7000000000000000, +2024-01-11T05:20:00Z,4919,2024-01-11T05:32:26.979669Z,4919,0100000000000000,7000000000000000, +2024-01-11T06:20:00Z,4835,2024-01-11T06:32:10.06531Z,4835,0100000000000000,7000000000000000, +2024-01-11T06:30:00Z,4835,2024-01-11T06:42:07.940127Z,4835,0100000000000000,7000000000000000, +2024-01-11T07:40:00Z,5149,2024-01-11T07:52:14.686664Z,5149,0100000000000000,7000000000000000, +2024-01-11T07:50:00Z,5801,2024-01-11T08:02:15.732653Z,5801,0100000000000000,7000000000000000, +2024-01-11T09:20:00Z,10185,2024-01-11T09:32:13.684992Z,10185,0100000000000000,7000000000000000, +2024-01-12T05:20:00Z,10150,2024-01-12T05:32:16.820627Z,10150,0100000000000000,7000000000000000, +2024-01-13T16:30:00Z,4715,2024-01-13T16:42:24.567189Z,4715,0100000000000000,7000000000000000, +2024-01-13T17:40:00Z,4265,2024-01-13T17:52:11.305075Z,4265,0100000000000000,7000000000000000, +2024-01-13T21:20:00Z,5629,2024-01-13T21:32:03.973429Z,5629,0100000000000000,7000000000000000, +2024-01-14T05:10:00Z,10235,2024-01-14T05:22:03.476192Z,10235,0100000000000000,7000000000000000, +2024-01-14T10:30:00Z,5854,2024-01-14T10:42:15.27465Z,5854,0100000000000000,7000000000000000, +2024-01-17T15:30:00Z,2977,2024-01-17T15:42:15.825088Z,2977,0100000000000000,7000000000000000, +2024-01-17T17:30:00Z,6034,2024-01-17T17:42:18.612787Z,6034,0100000000000000,7000000000000000, +2024-01-17T18:30:00Z,4655,2024-01-17T18:42:08.11493Z,4655,0100000000000000,7000000000000000, +2024-01-17T19:50:00Z,5360,2024-01-17T20:02:16.423065Z,5360,0100000000000000,7000000000000000, +2024-01-23T07:40:00Z,7038,2024-01-23T07:52:11.7033Z,7038,0100000000000000,7000000000000000, +2024-01-23T07:50:00Z,7023,2024-01-23T08:02:31.336199Z,7023,0100000000000000,7000000000000000, +2024-01-24T21:10:00Z,2932,2024-01-24T21:22:23.562398Z,2932,0100000000000000,7000000000000000, +2024-01-24T23:40:00Z,7247,2024-01-24T23:52:20.630463Z,7247,0100000000000000,7000000000000000, +2024-01-25T21:50:00Z,7787,2024-01-25T22:02:25.463753Z,7787,0100000000000000,7000000000000000, +2024-01-26T01:10:00Z,4310,2024-01-26T01:22:21.581437Z,4310,0100000000000000,7000000000000000, +2024-01-28T03:40:00Z,7203,2024-01-28T03:52:21.966694Z,7203,0100000000000000,7000000000000000, +2024-01-28T05:00:00Z,7427,2024-01-28T05:12:12.188434Z,7427,0100000000000000,7000000000000000, +2024-01-28T06:10:00Z,7188,2024-01-28T06:22:13.529801Z,7188,0100000000000000,7000000000000000, +2024-01-29T23:00:00Z,6798,2024-01-29T23:12:22.423795Z,6798,0100000000000000,7000000000000000, +2024-01-30T17:20:00Z,2408,2024-01-30T17:32:10.520612Z,2408,0100000000000000,7000000000000000, +2024-01-30T17:40:00Z,6124,2024-01-30T17:52:16.466449Z,6124,0100000000000000,7000000000000000, +2024-01-31T12:00:00Z,5030,2024-01-31T12:12:20.905963Z,5030,0100000000000000,7000000000000000, diff --git a/migrations/tests/files/histkvalobs/text/18700/18700_1000_316__.csv b/migrations/tests/files/histkvalobs/text/18700/18700_1000_316__.csv new file mode 100644 index 00000000..5b28d347 --- /dev/null +++ b/migrations/tests/files/histkvalobs/text/18700/18700_1000_316__.csv @@ -0,0 +1,184 @@ +182 +Obstime,Original,Tbtime +2024-01-01T06:00:00Z,va,2024-01-01T06:08:24.240635Z +2024-01-01T09:00:00Z,va,2024-01-01T09:10:50.1473Z +2024-01-01T12:00:00Z,va,2024-01-01T12:08:01.342058Z +2024-01-01T15:00:00Z,va,2024-01-01T15:05:18.95104Z +2024-01-01T18:00:00Z,va,2024-01-01T18:30:11.257833Z +2024-01-01T21:00:00Z,va,2024-01-01T20:55:31.867204Z +2024-01-02T06:00:00Z,SC,2024-01-02T06:04:05.169123Z +2024-01-02T09:00:00Z,SC,2024-01-02T08:47:34.474338Z +2024-01-02T12:00:00Z,SC,2024-01-02T12:13:07.648614Z +2024-01-02T15:00:00Z,va,2024-01-02T15:02:44.39202Z +2024-01-02T18:00:00Z,va,2024-01-02T18:02:59.425499Z +2024-01-02T21:00:00Z,va,2024-01-02T20:58:54.74345Z +2024-01-03T06:00:00Z,va,2024-01-03T06:20:35.275366Z +2024-01-03T09:00:00Z,va,2024-01-03T08:52:25.111242Z +2024-01-03T12:00:00Z,va,2024-01-03T11:51:31.620272Z +2024-01-03T15:00:00Z,va,2024-01-03T14:57:27.552375Z +2024-01-03T18:00:00Z,va,2024-01-03T17:32:50.639057Z +2024-01-03T21:00:00Z,va,2024-01-03T20:22:47.873367Z +2024-01-04T06:00:00Z,va,2024-01-04T06:02:14.54783Z +2024-01-04T09:00:00Z,va,2024-01-04T09:38:35.151297Z +2024-01-04T12:00:00Z,va,2024-01-04T11:58:39.210352Z +2024-01-04T15:00:00Z,va,2024-01-04T14:36:28.976216Z +2024-01-04T18:00:00Z,va,2024-01-04T17:41:29.45406Z +2024-01-04T21:00:00Z,va,2024-01-04T20:35:33.725461Z +2024-01-05T06:00:00Z,va,2024-01-05T06:14:54.41311Z +2024-01-05T09:00:00Z,va,2024-01-05T08:44:30.531347Z +2024-01-05T12:00:00Z,va,2024-01-05T11:51:51.93322Z +2024-01-05T15:00:00Z,va,2024-01-05T15:22:36.008918Z +2024-01-05T18:00:00Z,va,2024-01-05T17:39:49.191184Z +2024-01-05T21:00:00Z,va,2024-01-05T20:45:33.672168Z +2024-01-06T06:00:00Z,SC,2024-01-06T06:02:26.889775Z +2024-01-06T09:00:00Z,SC,2024-01-06T08:52:37.760554Z +2024-01-06T12:00:00Z,SC,2024-01-06T12:04:39.104056Z +2024-01-06T15:00:00Z,va,2024-01-06T14:51:41.02775Z +2024-01-06T18:00:00Z,va,2024-01-06T17:55:10.729851Z +2024-01-06T21:00:00Z,va,2024-01-06T20:45:06.338881Z +2024-01-07T06:00:00Z,SC,2024-01-07T06:02:23.831664Z +2024-01-07T09:00:00Z,SC,2024-01-07T09:00:16.907367Z +2024-01-07T12:00:00Z,SC,2024-01-07T12:00:36.88099Z +2024-01-07T15:00:00Z,va,2024-01-07T14:59:34.897702Z +2024-01-07T18:00:00Z,va,2024-01-07T18:11:41.549957Z +2024-01-07T21:00:00Z,va,2024-01-07T20:41:58.506384Z +2024-01-08T06:00:00Z,va,2024-01-08T06:01:01.372157Z +2024-01-08T09:00:00Z,va,2024-01-08T09:12:12.454335Z +2024-01-08T12:00:00Z,va,2024-01-08T12:15:47.63165Z +2024-01-08T15:00:00Z,va,2024-01-08T15:18:49.539079Z +2024-01-08T18:00:00Z,va,2024-01-08T19:42:54.296054Z +2024-01-08T21:00:00Z,va,2024-01-08T21:01:46.002814Z +2024-01-09T06:00:00Z,va,2024-01-09T05:58:14.305347Z +2024-01-09T09:00:00Z,va,2024-01-09T11:57:37.327976Z +2024-01-09T12:00:00Z,va,2024-01-09T11:59:47.53963Z +2024-01-09T15:00:00Z,SC,2024-01-09T14:50:23.998702Z +2024-01-09T18:00:00Z,SC,2024-01-09T17:51:45.133164Z +2024-01-09T21:00:00Z,SC,2024-01-09T20:53:42.77469Z +2024-01-10T06:00:00Z,va,2024-01-10T06:05:26.787634Z +2024-01-10T09:00:00Z,va,2024-01-10T09:08:11.442462Z +2024-01-10T12:00:00Z,va,2024-01-10T11:44:16.870721Z +2024-01-10T15:00:00Z,va,2024-01-11T08:41:11.583158Z +2024-01-10T18:00:00Z,va,2024-01-10T17:35:18.484989Z +2024-01-10T21:00:00Z,va,2024-01-10T20:41:50.248167Z +2024-01-11T06:00:00Z,SC,2024-01-11T06:04:20.161775Z +2024-01-11T09:00:00Z,SC,2024-01-11T08:59:27.063146Z +2024-01-11T12:00:00Z,SC,2024-01-11T11:49:34.889925Z +2024-01-11T15:00:00Z,JB,2024-01-11T14:36:50.081976Z +2024-01-11T18:00:00Z,JB,2024-01-11T17:43:17.266704Z +2024-01-11T21:00:00Z,JB,2024-01-11T20:40:41.69956Z +2024-01-12T06:00:00Z,va,2024-01-12T06:11:42.147972Z +2024-01-12T09:00:00Z,va,2024-01-12T09:26:38.134323Z +2024-01-12T12:00:00Z,va,2024-01-12T11:54:34.64495Z +2024-01-12T15:00:00Z,va,2024-01-12T14:31:53.355821Z +2024-01-12T18:00:00Z,va,2024-01-12T17:38:16.986212Z +2024-01-12T21:00:00Z,va,2024-01-12T20:39:25.87586Z +2024-01-13T06:00:00Z,va,2024-01-13T05:58:53.380086Z +2024-01-13T09:00:00Z,va,2024-01-13T08:58:14.543081Z +2024-01-13T12:00:00Z,va,2024-01-13T12:05:19.654932Z +2024-01-13T15:00:00Z,va,2024-01-13T14:51:45.819942Z +2024-01-13T18:00:00Z,va,2024-01-13T17:57:06.844951Z +2024-01-13T21:00:00Z,va,2024-01-13T20:48:20.136926Z +2024-01-14T06:00:00Z,va,2024-01-14T05:56:41.684562Z +2024-01-14T09:00:00Z,va,2024-01-14T08:54:13.553349Z +2024-01-14T12:00:00Z,va,2024-01-14T12:06:54.114354Z +2024-01-14T15:00:00Z,va,2024-01-14T14:51:24.397173Z +2024-01-14T18:00:00Z,va,2024-01-14T17:44:34.449537Z +2024-01-14T21:00:00Z,va,2024-01-14T21:07:00.418857Z +2024-01-15T06:00:00Z,va,2024-01-15T06:21:56.238594Z +2024-01-15T09:00:00Z,va,2024-01-15T09:21:49.666277Z +2024-01-15T12:00:00Z,va,2024-01-15T11:49:46.404973Z +2024-01-15T15:00:00Z,va,2024-01-15T15:03:35.548944Z +2024-01-15T18:00:00Z,va,2024-01-15T18:13:12.872413Z +2024-01-15T21:00:00Z,va,2024-01-15T21:04:01.093129Z +2024-01-16T06:00:00Z,SC,2024-01-16T06:02:29.196795Z +2024-01-16T09:00:00Z,SC,2024-01-16T08:59:58.937917Z +2024-01-16T12:00:00Z,SC,2024-01-16T11:53:13.700814Z +2024-01-16T15:00:00Z,va,2024-01-16T14:58:22.239067Z +2024-01-16T18:00:00Z,va,2024-01-16T18:13:01.354125Z +2024-01-16T21:00:00Z,va,2024-01-16T20:53:10.687428Z +2024-01-17T06:00:00Z,SC,2024-01-17T05:50:10.510894Z +2024-01-17T09:00:00Z,SC,2024-01-17T08:52:35.553462Z +2024-01-17T12:00:00Z,SC,2024-01-17T11:44:37.594396Z +2024-01-17T15:00:00Z,va,2024-01-17T14:54:25.938316Z +2024-01-17T18:00:00Z,va,2024-01-17T17:51:49.384976Z +2024-01-17T21:00:00Z,va,2024-01-17T20:37:27.05038Z +2024-01-18T06:00:00Z,va,2024-01-18T06:00:11.917462Z +2024-01-18T09:00:00Z,va,2024-01-18T09:10:18.629467Z +2024-01-18T12:00:00Z,va,2024-01-18T12:07:40.456704Z +2024-01-18T15:00:00Z,va,2024-01-18T14:46:35.571809Z +2024-01-18T18:00:00Z,va,2024-01-18T17:52:16.032836Z +2024-01-19T06:00:00Z,SC,2024-01-19T06:14:46.595777Z +2024-01-19T09:00:00Z,SC,2024-01-19T09:06:55.85563Z +2024-01-19T12:00:00Z,SC,2024-01-19T11:56:59.340324Z +2024-01-19T15:00:00Z,bw,2024-01-19T14:51:47.71753Z +2024-01-19T18:00:00Z,va,2024-01-19T17:39:43.99902Z +2024-01-19T21:00:00Z,va,2024-01-19T20:44:17.031778Z +2024-01-20T06:00:00Z,BW,2024-01-20T05:26:01.293989Z +2024-01-20T09:00:00Z,bw,2024-01-20T08:47:24.672509Z +2024-01-20T12:00:00Z,BW,2024-01-20T11:47:46.011987Z +2024-01-20T15:00:00Z,va,2024-01-20T14:25:38.98911Z +2024-01-20T18:00:00Z,va,2024-01-20T18:58:19.481547Z +2024-01-20T21:00:00Z,va,2024-01-20T20:59:04.386123Z +2024-01-21T06:00:00Z,va,2024-01-21T09:17:45.251403Z +2024-01-21T09:00:00Z,va,2024-01-21T09:20:25.583798Z +2024-01-21T12:00:00Z,va,2024-01-21T12:04:34.520547Z +2024-01-21T15:00:00Z,va,2024-01-21T14:57:45.720579Z +2024-01-21T18:00:00Z,va,2024-01-21T17:53:16.434501Z +2024-01-21T21:00:00Z,va,2024-01-21T20:55:08.020237Z +2024-01-22T06:00:00Z,va,2024-01-22T06:49:17.410717Z +2024-01-22T09:00:00Z,va,2024-01-22T08:43:53.580873Z +2024-01-22T12:00:00Z,va,2024-01-22T12:02:25.689051Z +2024-01-22T15:00:00Z,SC,2024-01-22T14:57:12.17922Z +2024-01-22T18:00:00Z,SC,2024-01-22T18:02:32.378874Z +2024-01-22T21:00:00Z,SC,2024-01-22T20:47:33.700204Z +2024-01-23T06:00:00Z,va,2024-01-23T06:06:40.003847Z +2024-01-23T09:00:00Z,va,2024-01-23T09:44:54.914519Z +2024-01-23T12:00:00Z,va,2024-01-23T11:26:22.815102Z +2024-01-23T15:00:00Z,jih,2024-01-23T14:49:07.301263Z +2024-01-23T18:00:00Z,jih,2024-01-23T20:52:21.247563Z +2024-01-23T21:00:00Z,jih,2024-01-23T20:51:45.070332Z +2024-01-24T06:00:00Z,va,2024-01-24T06:21:48.032913Z +2024-01-24T09:00:00Z,va,2024-01-24T08:56:18.676311Z +2024-01-24T12:00:00Z,va,2024-01-24T11:59:30.47087Z +2024-01-24T15:00:00Z,mg,2024-01-24T15:32:33.55375Z +2024-01-24T18:00:00Z,mg,2024-01-24T17:56:06.438737Z +2024-01-24T21:00:00Z,mg,2024-01-24T20:42:05.766951Z +2024-01-25T06:00:00Z,SC,2024-01-25T06:01:42.014859Z +2024-01-25T09:00:00Z,SC,2024-01-25T08:52:18.318853Z +2024-01-25T12:00:00Z,SC,2024-01-25T12:02:59.837605Z +2024-01-25T15:00:00Z,VA,2024-01-25T14:49:11.476807Z +2024-01-26T06:00:00Z,va,2024-01-26T05:55:15.039851Z +2024-01-26T09:00:00Z,va,2024-01-26T09:04:00.737621Z +2024-01-26T12:00:00Z,va,2024-01-26T11:49:17.932132Z +2024-01-26T15:00:00Z,va,2024-01-26T14:53:41.582922Z +2024-01-26T18:00:00Z,va,2024-01-26T17:39:36.184963Z +2024-01-26T21:00:00Z,va,2024-01-26T20:40:10.970383Z +2024-01-27T06:00:00Z,SC,2024-01-27T06:02:23.021561Z +2024-01-27T09:00:00Z,SC,2024-01-27T08:58:11.570132Z +2024-01-27T12:00:00Z,SC,2024-01-27T11:42:35.473277Z +2024-01-27T15:00:00Z,mg,2024-01-27T15:00:17.328911Z +2024-01-27T18:00:00Z,mg,2024-01-27T17:58:20.247898Z +2024-01-27T21:00:00Z,mg,2024-01-27T20:43:19.447566Z +2024-01-28T06:00:00Z,SC,2024-01-28T06:02:23.33471Z +2024-01-28T09:00:00Z,SC,2024-01-28T08:54:38.591659Z +2024-01-28T12:00:00Z,SC,2024-01-28T11:58:44.609364Z +2024-01-28T15:00:00Z,mg,2024-01-28T15:34:37.199962Z +2024-01-28T18:00:00Z,mg,2024-01-28T18:01:27.087291Z +2024-01-28T21:00:00Z,mg,2024-01-28T20:49:38.070406Z +2024-01-29T06:00:00Z,va,2024-01-29T06:17:51.260721Z +2024-01-29T09:00:00Z,va,2024-01-29T09:17:11.929249Z +2024-01-29T12:00:00Z,va,2024-01-29T13:00:13.966988Z +2024-01-29T15:00:00Z,SC,2024-01-29T14:55:35.018886Z +2024-01-29T18:00:00Z,SC,2024-01-29T18:01:34.059239Z +2024-01-29T21:00:00Z,SC,2024-01-29T20:52:07.216667Z +2024-01-30T06:00:00Z,va,2024-01-30T05:54:41.193538Z +2024-01-30T09:00:00Z,va,2024-01-30T09:02:14.035271Z +2024-01-30T12:00:00Z,va,2024-01-30T11:48:50.918808Z +2024-01-30T15:00:00Z,va,2024-01-30T14:59:36.664281Z +2024-01-30T18:00:00Z,va,2024-01-30T18:16:24.86856Z +2024-01-30T21:00:00Z,va,2024-01-30T20:51:28.917896Z +2024-01-31T06:00:00Z,SC,2024-01-31T06:03:00.208048Z +2024-01-31T09:00:00Z,SC,2024-01-31T09:10:06.967889Z +2024-01-31T12:00:00Z,SC,2024-01-31T11:58:16.524756Z +2024-01-31T18:00:00Z,mg,2024-01-31T18:17:04.641392Z +2024-01-31T21:00:00Z,mg,2024-01-31T20:49:57.68738Z diff --git a/migrations/kdvh_test.go b/migrations/tests/kdvh_test.go similarity index 100% rename from migrations/kdvh_test.go rename to migrations/tests/kdvh_test.go diff --git a/migrations/kvalobs_test.go b/migrations/tests/kvalobs_test.go similarity index 53% rename from migrations/kvalobs_test.go rename to migrations/tests/kvalobs_test.go index ee0c01ee..1d3cf3aa 100644 --- a/migrations/kvalobs_test.go +++ b/migrations/tests/kvalobs_test.go @@ -61,8 +61,7 @@ func TestImportDataKvalobs(t *testing.T) { _, histkvalobs := db.InitDBs() cases := []KvalobsDataCase{ - DataCase(KvalobsTestCase{db: histkvalobs, station: 18700, permit: 1, expectedRows: 100}), - // DataCase(KvalobsTestCase{db: histkvalobs, station: 18700, permit: 0, expectedRows: 100}), + DataCase(KvalobsTestCase{db: histkvalobs, station: 18700, paramid: 313, permit: 1, expectedRows: 39}), } for _, c := range cases { @@ -73,47 +72,47 @@ func TestImportDataKvalobs(t *testing.T) { case err != nil: t.Fatal(err) case insertedRows != c.expectedRows: - t.Log(insertedRows) - // t.Fail() + // t.Log(insertedRows) + t.Fail() } } } -// type KvalobsTextCase struct { -// KvalobsTestCase -// table db.TextTable -// } -// -// func TextCase(ktc KvalobsTestCase) KvalobsTextCase { -// path := filepath.Join(DUMPS_PATH, ktc.db.Name) -// return KvalobsTextCase{ktc, port.TextTable(path)} -// } -// -// func TestImportTextKvalobs(t *testing.T) { -// log.SetFlags(log.LstdFlags | log.Lshortfile) -// -// pool, err := pgxpool.New(context.TODO(), LARD_STRING) -// if err != nil { -// t.Log("Could not connect to Lard:", err) -// } -// defer pool.Close() -// -// kvalobs, histkvalobs := db.InitDBs() -// -// cases := []KvalobsTextCase{ -// TextCase(KvalobsTestCase{db: kvalobs, station: 18700, paramid: 212, permit: 0, expectedRows: 100}), -// TextCase(KvalobsTestCase{db: histkvalobs, station: 18700, paramid: 212, permit: 0, expectedRows: 100}), -// } -// -// for _, c := range cases { -// config, permits := c.mockConfig() -// insertedRows, err := port.ImportTable(c.table, permits, pool, config) -// -// switch { -// case err != nil: -// t.Fatal(err) -// case insertedRows != c.expectedRows: -// t.Fail() -// } -// } -// } +type KvalobsTextCase struct { + KvalobsTestCase + table db.TextTable +} + +func TextCase(ktc KvalobsTestCase) KvalobsTextCase { + path := filepath.Join(DUMPS_PATH, ktc.db.Name) + return KvalobsTextCase{ktc, port.TextTable(path)} +} + +func TestImportTextKvalobs(t *testing.T) { + log.SetFlags(log.LstdFlags | log.Lshortfile) + + pool, err := pgxpool.New(context.TODO(), LARD_STRING) + if err != nil { + t.Log("Could not connect to Lard:", err) + } + defer pool.Close() + + _, histkvalobs := db.InitDBs() + + cases := []KvalobsTextCase{ + // TextCase(KvalobsTestCase{db: kvalobs, station: 18700, paramid: 212, permit: 1, expectedRows: 100}), + TextCase(KvalobsTestCase{db: histkvalobs, station: 18700, permit: 1, expectedRows: 182}), + } + + for _, c := range cases { + config, permits := c.mockConfig() + insertedRows, err := port.ImportTable(c.table, permits, pool, config) + + switch { + case err != nil: + t.Fatal(err) + case insertedRows != c.expectedRows: + t.Fail() + } + } +} From af465ce182df21a147ad58cae9da90763f754f0e Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Fri, 29 Nov 2024 12:37:30 +0100 Subject: [PATCH 24/67] Change cfailed type --- ingestion/src/kvkafka.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ingestion/src/kvkafka.rs b/ingestion/src/kvkafka.rs index 0a99eebf..d5573c26 100644 --- a/ingestion/src/kvkafka.rs +++ b/ingestion/src/kvkafka.rs @@ -109,7 +109,7 @@ pub struct Kvdata { #[serde(default, deserialize_with = "optional")] useinfo: Option, #[serde(default, deserialize_with = "optional")] - cfailed: Option, + cfailed: Option, } // If the field is either empty or missing it should deserialize to None. From cd14110948495b48cc5c308ac3e8b2c0001eb219 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Fri, 29 Nov 2024 12:42:42 +0100 Subject: [PATCH 25/67] Log outside before if condition --- migrations/kvalobs/db/config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/migrations/kvalobs/db/config_test.go b/migrations/kvalobs/db/config_test.go index a2ec2477..911b290f 100644 --- a/migrations/kvalobs/db/config_test.go +++ b/migrations/kvalobs/db/config_test.go @@ -49,9 +49,9 @@ func TestShouldProcessLabel(t *testing.T) { } for _, c := range cases { + t.Log(c.tag) res := c.config.ShouldProcessLabel(&c.label) if res != c.expected { - t.Log(c.tag) t.Fail() } } From a14ca9ba9cf88e52994cdc242934670e9cf8f1cb Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Fri, 29 Nov 2024 14:43:08 +0100 Subject: [PATCH 26/67] Remove unnecessary generic struct and improve logging/bar outputs --- migrations/kdvh/import/cache/main.go | 2 +- migrations/kdvh/import/import.go | 11 ++- migrations/kvalobs/db/base_config.go | 16 ++-- migrations/kvalobs/db/config_test.go | 38 ++++---- migrations/kvalobs/db/labels.go | 22 ++--- migrations/kvalobs/db/table.go | 4 +- migrations/kvalobs/dump/data.go | 24 +++-- migrations/kvalobs/dump/dump.go | 128 +++++++++++++++------------ migrations/kvalobs/dump/main.go | 2 +- migrations/kvalobs/dump/text.go | 12 ++- migrations/kvalobs/import/data.go | 5 +- migrations/kvalobs/import/import.go | 28 +++--- migrations/kvalobs/import/main.go | 3 +- migrations/kvalobs/import/text.go | 5 +- migrations/tests/kvalobs_test.go | 2 +- 15 files changed, 166 insertions(+), 136 deletions(-) diff --git a/migrations/kdvh/import/cache/main.go b/migrations/kdvh/import/cache/main.go index 83b5714b..a6d48b00 100644 --- a/migrations/kdvh/import/cache/main.go +++ b/migrations/kdvh/import/cache/main.go @@ -66,7 +66,7 @@ type TsInfo struct { } func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpool.Pool) (*TsInfo, error) { - logstr := fmt.Sprintf("%v - %v - %v: ", table, station, element) + logstr := fmt.Sprintf("[%v - %v - %v]: ", table, station, element) key := newKDVHKey(element, table, station) param, ok := cache.Stinfo[key.Inner] diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index 86d6a97d..aceab4ce 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -33,8 +33,6 @@ func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config convFunc := getConvertFunc(table) - bar := utils.NewBar(len(stations), table.TableName) - bar.RenderBlank() for _, station := range stations { stnr, err := getStationNumber(station, config.Stations) if err != nil { @@ -44,15 +42,17 @@ func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config continue } - dir := filepath.Join(config.Path, table.Path, station.Name()) - elements, err := os.ReadDir(dir) + stationDir := filepath.Join(config.Path, table.Path, station.Name()) + elements, err := os.ReadDir(stationDir) if err != nil { slog.Warn(err.Error()) continue } + bar := utils.NewBar(len(elements), stationDir) var wg sync.WaitGroup for _, element := range elements { + bar.Add(1) elemCode, err := getElementCode(element, config.Elements) if err != nil { if config.Verbose { @@ -70,7 +70,7 @@ func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config return } - filename := filepath.Join(dir, element.Name()) + filename := filepath.Join(stationDir, element.Name()) data, text, flag, err := parseData(filename, tsInfo, convFunc, table, config) if err != nil { return @@ -98,7 +98,6 @@ func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config }() } wg.Wait() - bar.Add(1) } outputStr := fmt.Sprintf("%v: %v total rows inserted", table.TableName, rowsInserted) diff --git a/migrations/kvalobs/db/base_config.go b/migrations/kvalobs/db/base_config.go index 3473eb13..ff24bc18 100644 --- a/migrations/kvalobs/db/base_config.go +++ b/migrations/kvalobs/db/base_config.go @@ -10,7 +10,7 @@ import ( // TODO: it looks like histkvalobs has data only starting from 2023-06-01? var FROMTIME time.Time = time.Date(2006, 01, 01, 00, 00, 00, 00, time.UTC) -type BaseConfig[T int32 | string] struct { +type BaseConfig struct { Path string `arg:"-p" default:"./dumps" help:"Location the dumped data will be stored in"` FromTime *utils.Timestamp `arg:"--from" help:"Fetch data only starting from this date-only timestamp"` ToTime *utils.Timestamp `arg:"--to" help:"Fetch data only until this date-only timestamp"` @@ -19,30 +19,30 @@ type BaseConfig[T int32 | string] struct { Stations []int32 `help:"Optional space separated list of station numbers"` TypeIds []int32 `help:"Optional space separated list of type IDs"` ParamIds []int32 `help:"Optional space separated list of param IDs"` - Sensors []T `help:"Optional space separated list of sensors"` + Sensors []int32 `help:"Optional space separated list of sensors"` Levels []int32 `help:"Optional space separated list of levels"` } -func (config *BaseConfig[T]) ShouldProcessLabel(label *Label[T]) bool { +func (config *BaseConfig) ShouldProcessLabel(label *Label) bool { // (config.Ts == nil || slices.Contains(config.Ts, ts.ID)) || - return utils.IsEmptyOrContains(config.Stations, label.StationID) && + return utils.IsEmptyOrContains(config.ParamIds, label.ParamID) && + // utils.IsEmptyOrContains(config.Stations, label.StationID) && utils.IsEmptyOrContains(config.TypeIds, label.TypeID) && - utils.IsEmptyOrContains(config.ParamIds, label.ParamID) && // TODO: these two should never be null anyway? utils.IsEmptyOrContainsPtr(config.Sensors, label.Sensor) && utils.IsEmptyOrContainsPtr(config.Levels, label.Level) } -func (config *BaseConfig[T]) TimeSpan() *utils.TimeSpan { +func (config *BaseConfig) TimeSpan() *utils.TimeSpan { return &utils.TimeSpan{From: config.FromTime.Inner(), To: config.ToTime.Inner()} } // Check if the `--db` flag was passed in -func (config *BaseConfig[T]) ChosenDB(name string) bool { +func (config *BaseConfig) ChosenDB(name string) bool { return config.Database == "" || config.Database == name } // Check if the `--table` flag was passed in -func (config *BaseConfig[T]) ChosenTable(name string) bool { +func (config *BaseConfig) ChosenTable(name string) bool { return config.Table == "" || config.Table == name } diff --git a/migrations/kvalobs/db/config_test.go b/migrations/kvalobs/db/config_test.go index 911b290f..555a7447 100644 --- a/migrations/kvalobs/db/config_test.go +++ b/migrations/kvalobs/db/config_test.go @@ -5,45 +5,45 @@ import ( ) func TestShouldProcessLabel(t *testing.T) { - type TestCase[T string] struct { + type TestCase struct { tag string - label Label[T] - config BaseConfig[T] + label Label + config BaseConfig expected bool } - cases := []TestCase[string]{ + cases := []TestCase{ { tag: "empty config", - label: Label[string]{StationID: 18700}, - config: BaseConfig[string]{}, + label: Label{ParamID: 212}, + config: BaseConfig{}, expected: true, }, { - tag: "station specified", - label: Label[string]{StationID: 18700}, - config: BaseConfig[string]{Stations: []int32{18700}}, + tag: "label paramid in config paramids", + label: Label{ParamID: 212}, + config: BaseConfig{ParamIds: []int32{212}}, expected: true, }, { - tag: "station not in label", - label: Label[string]{StationID: 18700}, - config: BaseConfig[string]{Stations: []int32{20000}}, + tag: "label paramid NOT in config paramids", + label: Label{ParamID: 212}, + config: BaseConfig{ParamIds: []int32{300}}, expected: false, }, { - tag: "label without level", - label: Label[string]{}, - config: BaseConfig[string]{Levels: []int32{2}}, + tag: "label level NOT in config level", + label: Label{}, + config: BaseConfig{Levels: []int32{2}}, expected: false, }, { - tag: "valid level", - label: func() Label[string] { + tag: "label level in config levels", + label: func() Label { var level int32 = 2 - return Label[string]{Level: &level} + return Label{Level: &level} }(), - config: BaseConfig[string]{Levels: []int32{2}}, + config: BaseConfig{Levels: []int32{2}}, expected: true, }, } diff --git a/migrations/kvalobs/db/labels.go b/migrations/kvalobs/db/labels.go index 814dc7f2..d4d2be06 100644 --- a/migrations/kvalobs/db/labels.go +++ b/migrations/kvalobs/db/labels.go @@ -9,22 +9,16 @@ import ( ) // Kvalobs specific label -type Label[T int32 | string] struct { +type Label struct { StationID int32 ParamID int32 TypeID int32 // These two are not present in the `text_data` tabl - Sensor *T // bpchar(1) in `data` table + Sensor *int32 // bpchar(1) in `data` table Level *int32 } -// Can be directly casted to lard.Label -type LardLabel = Label[int32] - -// Kvalobs specific -type KvLabel = Label[string] - -func (l *Label[T]) sensorLevelString() (string, string) { +func (l *Label) sensorLevelString() (string, string) { var sensor, level string if l.Sensor != nil { sensor = fmt.Sprint(*l.Sensor) @@ -35,15 +29,15 @@ func (l *Label[T]) sensorLevelString() (string, string) { return sensor, level } -func (l *Label[T]) ToFilename() string { +func (l *Label) ToFilename() string { sensor, level := l.sensorLevelString() return fmt.Sprintf("%v_%v_%v_%v_%v.csv", l.StationID, l.ParamID, l.ParamID, sensor, level) } -func (l *Label[T]) LogStr() string { +func (l *Label) LogStr() string { sensor, level := l.sensorLevelString() return fmt.Sprintf( - "(%v - %v - %v - %v - %v): ", + "[%v - %v - %v - %v - %v]: ", l.StationID, l.ParamID, l.TypeID, sensor, level, ) } @@ -61,7 +55,7 @@ func parseFilenameFields(s *string) (*int32, error) { } // Deserialize filename to LardLabel -func LabelFromFilename(filename string) (*LardLabel, error) { +func LabelFromFilename(filename string) (*Label, error) { name := strings.TrimSuffix(filename, ".csv") fields := strings.Split(name, "_") @@ -79,7 +73,7 @@ func LabelFromFilename(filename string) (*LardLabel, error) { return nil, err } - return &LardLabel{ + return &Label{ StationID: *converted[0], ParamID: *converted[1], TypeID: *converted[2], diff --git a/migrations/kvalobs/db/table.go b/migrations/kvalobs/db/table.go index f6bdbfcb..f31a9eaa 100644 --- a/migrations/kvalobs/db/table.go +++ b/migrations/kvalobs/db/table.go @@ -20,10 +20,10 @@ type DataTable = Table[DataSeries] type TextTable = Table[TextSeries] // Function used to query labels from kvalobs given an optional timespan -type LabelDumpFunc func(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*KvLabel, error) +type LabelDumpFunc func(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, error) // Function used to query timeseries from kvalobs for a specific label -type ObsDumpFunc[S DataSeries | TextSeries] func(label *KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (S, error) +type ObsDumpFunc[S DataSeries | TextSeries] func(label *Label, timespan *utils.TimeSpan, pool *pgxpool.Pool) (S, error) // Lard Import function type ImportFunc func(ts [][]any, pool *pgxpool.Pool, logStr string) (int64, error) diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go index 240ba9cc..b8513249 100644 --- a/migrations/kvalobs/dump/data.go +++ b/migrations/kvalobs/dump/data.go @@ -2,6 +2,7 @@ package dump import ( "context" + "fmt" "log/slog" "path/filepath" @@ -21,7 +22,7 @@ func DataTable(path string) db.DataTable { } } -func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { +func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, error) { // TODO: not sure about the sensor/level conditions, // they should never be NULL since they have default values different from NULL? // TODO: We probably don't even need the join, @@ -40,20 +41,22 @@ func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel slog.Info("Querying data labels...") rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) if err != nil { + slog.Error(err.Error()) return nil, err } slog.Info("Collecting data labels...") - labels := make([]*db.KvLabel, 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.KvLabel]) + labels := make([]*db.Label, 0, rows.CommandTag().RowsAffected()) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.Label]) if err != nil { + slog.Error(err.Error()) return nil, err } return labels, nil } -func dumpDataSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { +func dumpDataSeries(label *db.Label, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { // TODO: is the case useful here, we can just check for cfailed = '' in here // query := `SELECT // obstime, @@ -79,6 +82,8 @@ func dumpDataSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.P // ORDER BY // stationid, // obstime` + + // NOTE: sensor and level could be NULL, but in reality they have default values query := `SELECT obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed FROM data WHERE stationid = $1 @@ -90,23 +95,32 @@ func dumpDataSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.P AND ($7::timestamp IS NULL OR obstime < $7) ORDER BY obstime` + // Convert to string because `sensor` in Kvalobs is a BPCHAR(1) + var sensor *string + if label.Sensor != nil { + sensorval := fmt.Sprint(*label.Sensor) + sensor = &sensorval + } + rows, err := pool.Query( context.TODO(), query, label.StationID, label.TypeID, label.ParamID, - label.Sensor, + sensor, label.Level, timespan.From, timespan.To, ) if err != nil { + slog.Error(err.Error()) return nil, err } data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[db.DataObs]) if err != nil { + slog.Error(err.Error()) return nil, err } diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index c90d4e29..26f942bf 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -16,37 +16,42 @@ import ( "migrate/utils" ) -func readLabelCSV(filename string) (labels []*db.KvLabel, err error) { +func readLabelCSV(filename string) (labels []*db.Label, err error) { file, err := os.Open(filename) if err != nil { + slog.Error(err.Error()) return nil, err } defer file.Close() + slog.Info("Reading previously dumped labels...") err = gocsv.Unmarshal(file, &labels) + if err != nil { + slog.Error(err.Error()) + } return labels, err } -func writeLabelCSV(path string, labels []*db.KvLabel) error { +func writeLabelCSV(path string, labels []*db.Label) error { file, err := os.Create(path) if err != nil { + slog.Error(err.Error()) return err } slog.Info("Writing timeseries labels to " + path) - // Write number of lines as header - // file.Write([]byte(fmt.Sprintf("%v\n", len(labels)))) - if err = gocsv.Marshal(labels, file); err != nil { - return err + err = gocsv.Marshal(labels, file) + if err != nil { + slog.Error(err.Error()) } - - return nil + return err } -func writeSeriesCSV[S db.DataSeries | db.TextSeries](series S, path string, label *db.KvLabel) error { +func writeSeriesCSV[S db.DataSeries | db.TextSeries](series S, path string, label *db.Label) error { filename := filepath.Join(path, label.ToFilename()) file, err := os.Create(filename) if err != nil { + slog.Error(err.Error()) return err } @@ -60,7 +65,7 @@ func writeSeriesCSV[S db.DataSeries | db.TextSeries](series S, path string, labe return nil } -func getLabels[T db.DataSeries | db.TextSeries](table db.Table[T], pool *pgxpool.Pool, config *Config) (labels []*db.KvLabel, err error) { +func getLabels[T db.DataSeries | db.TextSeries](table db.Table[T], pool *pgxpool.Pool, config *Config) (labels []*db.Label, err error) { labelFile := table.Path + "_labels.csv" if _, err := os.Stat(labelFile); err != nil || config.UpdateLabels { @@ -68,77 +73,83 @@ func getLabels[T db.DataSeries | db.TextSeries](table db.Table[T], pool *pgxpool if err != nil { return nil, err } + return labels, writeLabelCSV(labelFile, labels) + } + return readLabelCSV(labelFile) +} - err = writeLabelCSV(labelFile, labels) - return labels, err +func getStationLabelMap(labels []*db.Label) map[int32][]*db.Label { + labelmap := make(map[int32][]*db.Label) + + var station int32 + for _, label := range labels { + if station != label.StationID { + station = label.StationID + } + labelmap[station] = append(labelmap[station], label) } - return readLabelCSV(labelFile) + return labelmap } func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool.Pool, config *Config) { - var labels []*db.KvLabel - labels, err := getLabels(table, pool, config) if err != nil { - slog.Error(err.Error()) return } + stationMap := getStationLabelMap(labels) timespan := config.TimeSpan() - utils.SetLogFile(table.Path, "dump") - // TODO: this bar is a bit deceiving if you don't dump all the labels - // Maybe should only cache the ones requested from cli? - bar := utils.NewBar(len(labels), table.Path) + utils.SetLogFile(table.Path, "dump") + defer log.SetOutput(os.Stdout) // Used to limit connections to the database semaphore := make(chan struct{}, config.MaxConn) var wg sync.WaitGroup - var stationPath string - for _, label := range labels { - bar.Add(1) + for station, labels := range stationMap { + stationPath := filepath.Join(table.Path, fmt.Sprint(station)) - if !config.ShouldProcessLabel(label) { + if !utils.IsEmptyOrContains(config.Stations, station) { continue } - thisPath := filepath.Join(table.Path, fmt.Sprint(label.StationID)) - if thisPath != stationPath { - stationPath = thisPath - if err := os.MkdirAll(stationPath, os.ModePerm); err != nil { - slog.Error(err.Error()) - continue - } - } + // TODO: this bar is a bit deceiving if you don't dump all the labels + // Maybe should only cache the ones requested from cli? + bar := utils.NewBar(len(labels), stationPath) - wg.Add(1) - semaphore <- struct{}{} - go func() { - defer func() { - wg.Done() - // Release semaphore - <-semaphore - }() + for _, label := range labels { + // TODO: only add to the bar if the label is processed? + bar.Add(1) - series, err := table.DumpSeries(label, timespan, pool) - if err != nil { - slog.Error(err.Error()) - return - } - - if err := writeSeriesCSV(series, stationPath, label); err != nil { - slog.Error(err.Error()) - return + if !config.ShouldProcessLabel(label) { + continue } - slog.Info(label.LogStr() + "dumped successfully") - }() + wg.Add(1) + semaphore <- struct{}{} + go func() { + defer func() { + wg.Done() + // Release semaphore + <-semaphore + }() + + series, err := table.DumpSeries(label, timespan, pool) + if err != nil { + return + } + + if err := writeSeriesCSV(series, stationPath, label); err != nil { + return + } + + slog.Info(label.LogStr() + "dumped successfully") + }() + } + wg.Wait() } - wg.Wait() - - log.SetOutput(os.Stdout) } func dumpDB(database db.DB, config *Config) { @@ -156,10 +167,15 @@ func dumpDB(database db.DB, config *Config) { } if config.ChosenTable(db.DATA_TABLE_NAME) { - dumpTable(DataTable(path), pool, config) + table := DataTable(path) + + dumpTable(table, pool, config) } if config.ChosenTable(db.TEXT_TABLE_NAME) { - dumpTable(TextTable(path), pool, config) + table := TextTable(path) + utils.SetLogFile(table.Path, "dump") + + dumpTable(table, pool, config) } } diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index 794af68a..c631de65 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -16,7 +16,7 @@ import ( // func joinTS(first, second []lard.Label) type Config struct { - db.BaseConfig[string] + db.BaseConfig UpdateLabels bool `help:"Overwrites the label CSV files"` MaxConn int `arg:"-n" default:"4" help:"Max number of allowed concurrent connections to Kvalobs"` } diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index 74337c1d..21c00c75 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -21,7 +21,7 @@ func TextTable(path string) db.TextTable { } } -func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel, error) { +func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, error) { // OGquery := `SELECT DISTINCT // stationid, // typeid, @@ -64,20 +64,22 @@ func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.KvLabel slog.Info("Querying text labels...") rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) if err != nil { + slog.Error(err.Error()) return nil, err } slog.Info("Collecting text labels...") - labels := make([]*db.KvLabel, 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.KvLabel]) + labels := make([]*db.Label, 0, rows.CommandTag().RowsAffected()) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.Label]) if err != nil { + slog.Error(err.Error()) return nil, err } return labels, nil } -func dumpTextSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { +func dumpTextSeries(label *db.Label, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { // query := ` // SELECT // obstime, @@ -113,11 +115,13 @@ func dumpTextSeries(label *db.KvLabel, timespan *utils.TimeSpan, pool *pgxpool.P timespan.To, ) if err != nil { + slog.Error(err.Error()) return nil, err } data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[db.TextObs]) if err != nil { + slog.Error(err.Error()) return nil, err } diff --git a/migrations/kvalobs/import/data.go b/migrations/kvalobs/import/data.go index 1f603fc9..64c1faff 100644 --- a/migrations/kvalobs/import/data.go +++ b/migrations/kvalobs/import/data.go @@ -3,13 +3,14 @@ package port import ( "bufio" "log/slog" - "migrate/kvalobs/db" - "migrate/lard" "os" "path/filepath" "strconv" "strings" "time" + + "migrate/kvalobs/db" + "migrate/lard" ) func readDataFiles() []lard.Label { diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index ff80582c..12e7ca75 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -4,15 +4,17 @@ import ( "fmt" "log" "log/slog" - "migrate/kvalobs/db" - "migrate/lard" - "migrate/utils" "os" "path/filepath" + "strconv" "sync" "time" "github.com/jackc/pgx/v5/pgxpool" + + "migrate/kvalobs/db" + "migrate/lard" + "migrate/utils" ) func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *lard.PermitMaps, pool *pgxpool.Pool, config *Config) (int64, error) { @@ -21,10 +23,14 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la slog.Error(err.Error()) return 0, err } - fmt.Println(stations) var rowsInserted int64 for _, station := range stations { + stnr, err := strconv.ParseInt(station.Name(), 10, 32) + if err != nil || !utils.IsEmptyOrContains(config.Stations, int32(stnr)) { + continue + } + stationDir := filepath.Join(table.Path, station.Name()) labels, err := os.ReadDir(stationDir) if err != nil { @@ -32,12 +38,10 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la continue } + bar := utils.NewBar(len(labels), stationDir) var wg sync.WaitGroup - - var stationRows int64 - - bar := utils.NewBar(len(labels), station.Name()) for _, file := range labels { + // TODO: only add if label was processed? bar.Add(1) label, err := db.LabelFromFilename(file.Name()) @@ -64,8 +68,8 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la defer wg.Done() lardLabel := lard.Label(*label) - // TODO: figure out if we should (0, 0) sensor level pair to (NULL, NULL) - // TODO: figure where to get fromtime, kvalobs directly? Stinfosys? + // TODO: figure out if we should convert (0, 0) to (NULL, NULL) for sensor, level + // TODO: figure out where to get fromtime, kvalobs directly? Stinfosys? tsid, err := lard.GetTimeseriesID(&lardLabel, time.Now(), pool) if err != nil { slog.Error(err.Error()) @@ -88,12 +92,10 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la slog.Error(labelStr + "failed flag bulk insertion - " + err.Error()) } - stationRows += count + rowsInserted += count }() } wg.Wait() - rowsInserted += stationRows - slog.Info(fmt.Sprintf("Station %v: %v rows inserted", station.Name(), stationRows)) } outputStr := fmt.Sprintf("%v: %v total rows inserted", table.Path, rowsInserted) diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go index 2debd471..0f552edb 100644 --- a/migrations/kvalobs/import/main.go +++ b/migrations/kvalobs/import/main.go @@ -2,7 +2,6 @@ package port import ( "context" - "fmt" "log/slog" "os" @@ -14,7 +13,7 @@ import ( ) type Config struct { - db.BaseConfig[int32] + db.BaseConfig } func (config *Config) Execute() error { diff --git a/migrations/kvalobs/import/text.go b/migrations/kvalobs/import/text.go index 941e0e47..f3db8f99 100644 --- a/migrations/kvalobs/import/text.go +++ b/migrations/kvalobs/import/text.go @@ -3,13 +3,14 @@ package port import ( "bufio" "log/slog" - "migrate/kvalobs/db" - "migrate/lard" "os" "path/filepath" "strconv" "strings" "time" + + "migrate/kvalobs/db" + "migrate/lard" ) // Returns a TextTable for import diff --git a/migrations/tests/kvalobs_test.go b/migrations/tests/kvalobs_test.go index 1d3cf3aa..77d635f8 100644 --- a/migrations/tests/kvalobs_test.go +++ b/migrations/tests/kvalobs_test.go @@ -29,7 +29,7 @@ type KvalobsTestCase struct { func (t *KvalobsTestCase) mockConfig() (*port.Config, *lard.PermitMaps) { return &port.Config{ - BaseConfig: db.BaseConfig[int32]{ + BaseConfig: db.BaseConfig{ Stations: []int32{t.station}, }, }, &lard.PermitMaps{ From eca1ec1a3d9ce75fd9cb9a323844cb7cc99609f6 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Fri, 29 Nov 2024 14:51:18 +0100 Subject: [PATCH 27/67] Update metadata caching --- migrations/kdvh/import/cache/main.go | 41 ++------ migrations/kdvh/import/cache/permissions.go | 104 -------------------- migrations/kdvh/import/cache/stinfosys.go | 13 ++- migrations/lard/permissions.go | 3 +- migrations/tests/kdvh_test.go | 7 +- 5 files changed, 28 insertions(+), 140 deletions(-) delete mode 100644 migrations/kdvh/import/cache/permissions.go diff --git a/migrations/kdvh/import/cache/main.go b/migrations/kdvh/import/cache/main.go index a6d48b00..d4282e71 100644 --- a/migrations/kdvh/import/cache/main.go +++ b/migrations/kdvh/import/cache/main.go @@ -1,14 +1,10 @@ package cache import ( - "context" "errors" "fmt" "log/slog" - "os" - "time" - "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" "github.com/rickb777/period" @@ -17,39 +13,20 @@ import ( ) type Cache struct { - Offsets OffsetMap - Stinfo StinfoMap - KDVH KDVHMap - ParamPermits ParamPermitMap - StationPermits StationPermitMap + Offsets OffsetMap + Stinfo StinfoMap + KDVH KDVHMap + Permits *lard.PermitMaps } // Caches all the metadata needed for import of KDVH tables. // If any error occurs inside here the program will exit. func CacheMetadata(tables, stations, elements []string, kdvh *db.KDVH) *Cache { - slog.Info("Connecting to Stinfosys to cache metadata") - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - conn, err := pgx.Connect(ctx, os.Getenv(db.STINFO_ENV_VAR)) - if err != nil { - slog.Error("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) - os.Exit(1) - } - - stinfoMeta := cacheStinfoMeta(tables, elements, kdvh, conn) - // TODO: use the one in migrate/lard instead! - stationPermits := cacheStationPermits(conn) - paramPermits := cacheParamPermits(conn) - - conn.Close(context.TODO()) - return &Cache{ - Stinfo: stinfoMeta, - StationPermits: stationPermits, - ParamPermits: paramPermits, - Offsets: cacheParamOffsets(), - KDVH: cacheKDVH(tables, stations, elements, kdvh), + Stinfo: cacheStinfoMeta(tables, elements, kdvh), + Permits: lard.NewPermitTables(), + Offsets: cacheParamOffsets(), + KDVH: cacheKDVH(tables, stations, elements, kdvh), } } @@ -77,7 +54,7 @@ func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpoo } // Check if data for this station/element is restricted - isOpen := cache.timeseriesIsOpen(station, param.TypeID, param.ParamID) + isOpen := cache.Permits.TimeseriesIsOpen(station, param.TypeID, param.ParamID) // TODO: eventually use this to choose which table to use on insert if !isOpen { diff --git a/migrations/kdvh/import/cache/permissions.go b/migrations/kdvh/import/cache/permissions.go deleted file mode 100644 index a820226c..00000000 --- a/migrations/kdvh/import/cache/permissions.go +++ /dev/null @@ -1,104 +0,0 @@ -package cache - -import ( - "context" - "log/slog" - "os" - - "github.com/jackc/pgx/v5" -) - -type StationId = int32 -type PermitId = int32 - -type ParamPermitMap map[StationId][]ParamPermit -type StationPermitMap map[StationId]PermitId - -type ParamPermit struct { - TypeId int32 - ParamdId int32 - PermitId int32 -} - -func cacheParamPermits(conn *pgx.Conn) ParamPermitMap { - cache := make(ParamPermitMap) - - rows, err := conn.Query( - context.TODO(), - "SELECT stationid, message_formatid, paramid, permitid FROM v_station_param_policy", - ) - if err != nil { - slog.Error(err.Error()) - os.Exit(1) - } - - for rows.Next() { - var stnr StationId - var permit ParamPermit - - if err := rows.Scan(&stnr, &permit.TypeId, &permit.ParamdId, &permit.PermitId); err != nil { - slog.Error(err.Error()) - os.Exit(1) - } - - cache[stnr] = append(cache[stnr], permit) - } - - if rows.Err() != nil { - slog.Error(rows.Err().Error()) - os.Exit(1) - } - - return cache -} - -func cacheStationPermits(conn *pgx.Conn) StationPermitMap { - cache := make(StationPermitMap) - - rows, err := conn.Query( - context.TODO(), - "SELECT stationid, permitid FROM station_policy", - ) - if err != nil { - slog.Error(err.Error()) - os.Exit(1) - } - - for rows.Next() { - var stnr StationId - var permit PermitId - - if err := rows.Scan(&stnr, &permit); err != nil { - slog.Error(err.Error()) - os.Exit(1) - } - - cache[stnr] = permit - } - - if rows.Err() != nil { - slog.Error(rows.Err().Error()) - os.Exit(1) - } - - return cache -} - -func (c *Cache) timeseriesIsOpen(stnr, typeid, paramid int32) bool { - // First check param permit table - if permits, ok := c.ParamPermits[stnr]; ok { - for _, permit := range permits { - if (permit.TypeId == 0 || permit.TypeId == typeid) && - (permit.ParamdId == 0 || permit.ParamdId == paramid) { - return permit.PermitId == 1 - } - } - } - - // Otherwise check station permit table - if permit, ok := c.StationPermits[stnr]; ok { - return permit == 1 - } - - return false -} diff --git a/migrations/kdvh/import/cache/stinfosys.go b/migrations/kdvh/import/cache/stinfosys.go index c6af589f..ed8b2f27 100644 --- a/migrations/kdvh/import/cache/stinfosys.go +++ b/migrations/kdvh/import/cache/stinfosys.go @@ -32,9 +32,20 @@ type StinfoParam struct { } // Save metadata for later use by quering Stinfosys -func cacheStinfoMeta(tables, elements []string, kdvh *db.KDVH, conn *pgx.Conn) StinfoMap { +func cacheStinfoMeta(tables, elements []string, kdvh *db.KDVH) StinfoMap { cache := make(StinfoMap) + slog.Info("Connecting to Stinfosys to cache metadata") + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + conn, err := pgx.Connect(ctx, os.Getenv(db.STINFO_ENV_VAR)) + if err != nil { + slog.Error("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) + os.Exit(1) + } + defer conn.Close(ctx) + for _, table := range kdvh.Tables { if len(tables) > 0 && !slices.Contains(tables, table.TableName) { continue diff --git a/migrations/lard/permissions.go b/migrations/lard/permissions.go index 181a20f1..ca99d58f 100644 --- a/migrations/lard/permissions.go +++ b/migrations/lard/permissions.go @@ -29,7 +29,7 @@ type PermitMaps struct { } func NewPermitTables() *PermitMaps { - slog.Info("Connecting to Stinfosys to cache metadata") + slog.Info("Connecting to Stinfosys to cache permits") ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -38,6 +38,7 @@ func NewPermitTables() *PermitMaps { slog.Error("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) os.Exit(1) } + defer conn.Close(ctx) return &PermitMaps{ ParamPermits: cacheParamPermits(conn), diff --git a/migrations/tests/kdvh_test.go b/migrations/tests/kdvh_test.go index 16247b26..518840a0 100644 --- a/migrations/tests/kdvh_test.go +++ b/migrations/tests/kdvh_test.go @@ -12,6 +12,7 @@ import ( "migrate/kdvh/db" port "migrate/kdvh/import" "migrate/kdvh/import/cache" + "migrate/lard" ) type KdvhTestCase struct { @@ -38,8 +39,10 @@ func (t *KdvhTestCase) mockConfig() (*port.Config, *cache.Cache) { IsScalar: true, }, }, - StationPermits: cache.StationPermitMap{ - t.station: t.permit, + Permits: &lard.PermitMaps{ + StationPermits: lard.StationPermitMap{ + t.station: t.permit, + }, }, } } From 70d694185abc02ef87f40e2551b7ff9c2fecf7e5 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 09:13:50 +0100 Subject: [PATCH 28/67] Check if sensor and level have default values in KDVH and Kvalobs --- migrations/lard/timeseries.go | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/migrations/lard/timeseries.go b/migrations/lard/timeseries.go index 490d3c02..78cba036 100644 --- a/migrations/lard/timeseries.go +++ b/migrations/lard/timeseries.go @@ -16,6 +16,13 @@ type Label struct { Level *int32 } +func (l *Label) sensorLevelAreBothZero() bool { + if l.Sensor == nil || l.Level == nil { + return false + } + return *l.Level == 0 && *l.Sensor == 0 +} + func GetTimeseriesID(label *Label, fromtime time.Time, pool *pgxpool.Pool) (tsid int32, err error) { // Query LARD labels table err = pool.QueryRow( @@ -33,7 +40,28 @@ func GetTimeseriesID(label *Label, fromtime time.Time, pool *pgxpool.Pool) (tsid return tsid, nil } - // Otherwise insert new timeseries + // In KDVH and Kvalobs sensor and level have default values, while in LARD they are NULL + // if Obsinn does not specify them. Therefore we need to check if sensor and level are NULL + // when they are both zero. + // FIXME(?): in some cases, level and sensor are marked with (0,0) in Obsinn, + // so there might be problems if a timeseries is not present in LARD at the time of importing + if label.sensorLevelAreBothZero() { + err := pool.QueryRow( + context.TODO(), + `SELECT timeseries FROM labels.met + WHERE station_id = $1 + AND param_id = $2 + AND type_id = $3 + AND lvl IS NULL + AND sensor IS NULL`, + label.StationID, label.ParamID, label.TypeID).Scan(&tsid) + + if err == nil { + return tsid, nil + } + } + + // If none of the above worked insert a new timeseries transaction, err := pool.Begin(context.TODO()) if err != nil { return tsid, err From dabd954a3fac5507fb86943452279a72cd750917 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 09:39:32 +0100 Subject: [PATCH 29/67] Move whole loop body inside goroutine --- migrations/kvalobs/dump/dump.go | 13 +++++---- migrations/kvalobs/import/import.go | 42 ++++++++++++++--------------- 2 files changed, 27 insertions(+), 28 deletions(-) diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 26f942bf..ad4fda2d 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -120,22 +120,21 @@ func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool bar := utils.NewBar(len(labels), stationPath) for _, label := range labels { - // TODO: only add to the bar if the label is processed? - bar.Add(1) - - if !config.ShouldProcessLabel(label) { - continue - } - wg.Add(1) semaphore <- struct{}{} + go func() { defer func() { wg.Done() + bar.Add(1) // Release semaphore <-semaphore }() + if !config.ShouldProcessLabel(label) { + return + } + series, err := table.DumpSeries(label, timespan, pool) if err != nil { return diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 12e7ca75..d8b3df7f 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -41,31 +41,31 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la bar := utils.NewBar(len(labels), stationDir) var wg sync.WaitGroup for _, file := range labels { - // TODO: only add if label was processed? - bar.Add(1) - - label, err := db.LabelFromFilename(file.Name()) - if err != nil { - slog.Error(err.Error()) - continue - } + wg.Add(1) + go func() { + defer func() { + wg.Done() + bar.Add(1) + }() - if !config.ShouldProcessLabel(label) { - continue - } + label, err := db.LabelFromFilename(file.Name()) + if err != nil { + slog.Error(err.Error()) + return + } - labelStr := label.LogStr() + if !config.ShouldProcessLabel(label) { + return + } - // Check if data for this station/element is restricted - if !permits.TimeseriesIsOpen(label.StationID, label.TypeID, label.ParamID) { - // TODO: eventually use this to choose which table to use on insert - slog.Warn(labelStr + "timeseries data is restricted, skipping") - continue - } + labelStr := label.LogStr() - wg.Add(1) - go func() { - defer wg.Done() + // Check if data for this station/element is restricted + if !permits.TimeseriesIsOpen(label.StationID, label.TypeID, label.ParamID) { + // TODO: eventually use this to choose which table to use on insert + slog.Warn(labelStr + "timeseries data is restricted, skipping") + return + } lardLabel := lard.Label(*label) // TODO: figure out if we should convert (0, 0) to (NULL, NULL) for sensor, level From ad80e2c4ac9632e7d0f8678b97f86425f40391db Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 09:48:18 +0100 Subject: [PATCH 30/67] Remove comments --- migrations/kvalobs/dump/data.go | 49 ++++-------------------- migrations/kvalobs/dump/text.go | 59 +++-------------------------- migrations/kvalobs/import/data.go | 5 --- migrations/kvalobs/import/import.go | 3 +- 4 files changed, 13 insertions(+), 103 deletions(-) diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go index b8513249..c723e8ef 100644 --- a/migrations/kvalobs/dump/data.go +++ b/migrations/kvalobs/dump/data.go @@ -23,20 +23,11 @@ func DataTable(path string) db.DataTable { } func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, error) { - // TODO: not sure about the sensor/level conditions, - // they should never be NULL since they have default values different from NULL? - // TODO: We probably don't even need the join, - // because `name` (`param_code`) is not present in our `labels.met`? - // query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level, name FROM data - // LEFT JOIN param USING (paramid) - // WHERE name IS NOT NUL - // AND sensor IS NOT NULL - // AND level IS NOT NULL - // AND ($1::timestamp IS NULL OR obstime >= $1) - // AND ($2::timestamp IS NULL OR obstime < $2)` - query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level FROM data - WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2) - ORDER BY stationid` + query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level + FROM data + WHERE ($1::timestamp IS NULL OR obstime >= $1) + AND ($2::timestamp IS NULL OR obstime < $2) + ORDER BY stationid` slog.Info("Querying data labels...") rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) @@ -57,32 +48,6 @@ func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, } func dumpDataSeries(label *db.Label, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { - // TODO: is the case useful here, we can just check for cfailed = '' in here - // query := `SELECT - // obstime, - // original, - // tbtime, - // CASE - // WHEN original = corrected AND cfailed = '' THEN NULL - // ELSE corrected - // END, - // controlinfo, - // useinfo, - // cfailed - // FROM - // data - // WHERE - // stationid = $1 - // AND typeid = $2 - // AND paramid = $3 - // AND sensor = $4 - // AND level = $5 - // AND obstime >= $6 - // TODO: should we keep these? Maybe obstime is actually useful - // ORDER BY - // stationid, - // obstime` - // NOTE: sensor and level could be NULL, but in reality they have default values query := `SELECT obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed FROM data @@ -91,8 +56,8 @@ func dumpDataSeries(label *db.Label, timespan *utils.TimeSpan, pool *pgxpool.Poo AND paramid = $3 AND sensor = $4 AND level = $5 - AND ($6::timestamp IS NULL OR obstime >= $6) - AND ($7::timestamp IS NULL OR obstime < $7) + AND ($6::timestamp IS NULL OR obstime >= $6) + AND ($7::timestamp IS NULL OR obstime < $7) ORDER BY obstime` // Convert to string because `sensor` in Kvalobs is a BPCHAR(1) diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index 21c00c75..0e3403d8 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -22,44 +22,12 @@ func TextTable(path string) db.TextTable { } func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, error) { - // OGquery := `SELECT DISTINCT - // stationid, - // typeid, - // paramid, - // 0 AS sensor, - // 0 AS level, - // name AS code - // FROM - // text_data - // LEFT JOIN - // param USING (paramid) - // WHERE - // obstime >= $1 - // TODO: probably don't need this? - // AND obstime <= $2 - // AND name IS NOT NULL - // TODO: do we need this order by? As far as I can see, - // it's used to compare text_data and scalar_data timeseries - // ORDER BY - // stationid, - // typeid, - // paramid, - // level, - // sensor` - // NOTE: `param` table is empty in histkvalobs - // TODO: We probably don't even need the join, - // because `name` (`param_code`) is not present in our `labels.met`? - // query := `SELECT DISTINCT stationid, typeid, paramid, name FROM text_data - // LEFT JOIN param USING (paramid) - // WHERE name IS NOT NULL - // AND ($1::timestamp IS NULL OR obstime >= $1) - // AND ($2::timestamp IS NULL OR obstime < $2)` - // - // TODO: should sensor/level be NULL or 0 - query := `SELECT DISTINCT stationid, typeid, paramid, NULL AS sensor, NULL AS level FROM text_data - WHERE ($1::timestamp IS NULL OR obstime >= $1) AND ($2::timestamp IS NULL OR obstime < $2) - ORDER BY stationid` + query := `SELECT DISTINCT stationid, typeid, paramid, NULL AS sensor, NULL AS level + FROM text_data + WHERE ($1::timestamp IS NULL OR obstime >= $1) + AND ($2::timestamp IS NULL OR obstime < $2) + ORDER BY stationid` slog.Info("Querying text labels...") rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) @@ -80,23 +48,6 @@ func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, } func dumpTextSeries(label *db.Label, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { - // query := ` - // SELECT - // obstime, - // original AS originaltext, - // tbtime - // FROM - // text_data - // WHERE - // stationid = $1 - // AND typeid = $2 - // AND paramid = $3 - // AND obstime >= $4 - // AND obstime <= $5 - // TODO: should we keep these? Maybe obstime is actually useful - // ORDER BY - // stationid, - // obstime` query := `SELECT obstime, original, tbtime FROM text_data WHERE stationid = $1 AND typeid = $2 diff --git a/migrations/kvalobs/import/data.go b/migrations/kvalobs/import/data.go index 64c1faff..183ba140 100644 --- a/migrations/kvalobs/import/data.go +++ b/migrations/kvalobs/import/data.go @@ -13,11 +13,6 @@ import ( "migrate/lard" ) -func readDataFiles() []lard.Label { - // TODO: - return nil -} - // Returns a DataTable for import func DataTable(path string) db.DataTable { return db.DataTable{ diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index d8b3df7f..c2464ab0 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -68,7 +68,6 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la } lardLabel := lard.Label(*label) - // TODO: figure out if we should convert (0, 0) to (NULL, NULL) for sensor, level // TODO: figure out where to get fromtime, kvalobs directly? Stinfosys? tsid, err := lard.GetTimeseriesID(&lardLabel, time.Now(), pool) if err != nil { @@ -106,7 +105,7 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la return rowsInserted, nil } -// TODO: here we trust that kvalobs and stinfosys have the same +// TODO: while importing we trust that kvalobs and stinfosys have the same // non scalar parameters, which might not be the case func ImportDB(database db.DB, permits *lard.PermitMaps, pool *pgxpool.Pool, config *Config) { path := filepath.Join(config.Path, database.Name) From 0b94eaa91884c732ce0613ea1d9fa7eaa060be03 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 12:19:28 +0100 Subject: [PATCH 31/67] Get fromtime and totime from the station_metadata table --- migrations/kdvh/import/cache/kdvh.go | 17 ++- migrations/kdvh/import/cache/main.go | 16 +-- migrations/kdvh/import/import.go | 4 +- migrations/kvalobs/dump/dump.go | 11 +- migrations/kvalobs/import/cache/main.go | 108 ++++++++++++++++++ migrations/kvalobs/import/import.go | 32 ++++-- migrations/kvalobs/import/main.go | 10 +- migrations/lard/permissions.go | 4 +- migrations/lard/timeseries.go | 14 ++- .../text/18700/18700_1000_316__.csv | 0 migrations/tests/kdvh_test.go | 4 +- migrations/tests/kvalobs_test.go | 34 +++--- 12 files changed, 192 insertions(+), 62 deletions(-) create mode 100644 migrations/kvalobs/import/cache/main.go rename migrations/tests/files/{histkvalobs => kvalobs}/text/18700/18700_1000_316__.csv (100%) diff --git a/migrations/kdvh/import/cache/kdvh.go b/migrations/kdvh/import/cache/kdvh.go index 7b896755..7bc8b8b9 100644 --- a/migrations/kdvh/import/cache/kdvh.go +++ b/migrations/kdvh/import/cache/kdvh.go @@ -11,10 +11,11 @@ import ( "github.com/jackc/pgx/v5" "migrate/kdvh/db" + "migrate/utils" ) // Map of `from_time` and `to_time` for each (table, station, element) triplet. Not present for all parameters -type KDVHMap = map[KDVHKey]Timespan +type KDVHMap = map[KDVHKey]utils.TimeSpan // Used for lookup of fromtime and totime from KDVH type KDVHKey struct { @@ -26,12 +27,7 @@ func newKDVHKey(elem, table string, stnr int32) KDVHKey { return KDVHKey{StinfoKey{ElemCode: elem, TableName: table}, stnr} } -// Timespan stored in KDVH for a given (table, station, element) triplet -type Timespan struct { - FromTime *time.Time `db:"fdato"` - ToTime *time.Time `db:"tdato"` -} - +// Cache timeseries timespan from KDVH func cacheKDVH(tables, stations, elements []string, kdvh *db.KDVH) KDVHMap { cache := make(KDVHMap) @@ -67,13 +63,14 @@ func cacheKDVH(tables, stations, elements []string, kdvh *db.KDVH) KDVHMap { for rows.Next() { var key KDVHKey - var span Timespan + var span utils.TimeSpan + err := rows.Scan( &key.Inner.TableName, &key.Station, &key.Inner.ElemCode, - &span.FromTime, - &span.ToTime, + &span.From, + &span.To, ) if err != nil { diff --git a/migrations/kdvh/import/cache/main.go b/migrations/kdvh/import/cache/main.go index d4282e71..e0e35341 100644 --- a/migrations/kdvh/import/cache/main.go +++ b/migrations/kdvh/import/cache/main.go @@ -10,13 +10,14 @@ import ( "migrate/kdvh/db" "migrate/lard" + "migrate/utils" ) type Cache struct { Offsets OffsetMap Stinfo StinfoMap KDVH KDVHMap - Permits *lard.PermitMaps + Permits lard.PermitMaps } // Caches all the metadata needed for import of KDVH tables. @@ -37,7 +38,7 @@ type TsInfo struct { Element string Offset period.Period Param StinfoParam - Span Timespan + Span utils.TimeSpan Logstr string IsOpen bool } @@ -54,9 +55,8 @@ func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpoo } // Check if data for this station/element is restricted - isOpen := cache.Permits.TimeseriesIsOpen(station, param.TypeID, param.ParamID) - // TODO: eventually use this to choose which table to use on insert + isOpen := cache.Permits.TimeseriesIsOpen(station, param.TypeID, param.ParamID) if !isOpen { slog.Warn(logstr + "Timeseries data is restricted") return nil, errors.New("Restricted data") @@ -66,7 +66,7 @@ func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpoo offset := cache.Offsets[key.Inner] // No need to check for `!ok`, timespan will be ignored if not in the map - span := cache.KDVH[key] + span, ok := cache.KDVH[key] label := lard.Label{ StationID: station, @@ -76,14 +76,14 @@ func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpoo Level: param.Hlevel, } - tsid, err := lard.GetTimeseriesID(&label, param.Fromtime, pool) + // TODO: are Param.Fromtime and Span.From different? + timespan := utils.TimeSpan{From: ¶m.Fromtime, To: span.To} + tsid, err := lard.GetTimeseriesID(&label, timespan, pool) if err != nil { slog.Error(logstr + "could not obtain timeseries - " + err.Error()) return nil, err } - // TODO: check if station is restricted - return &TsInfo{ Id: tsid, Station: station, diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index aceab4ce..e52c525d 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -173,9 +173,9 @@ func parseData(filename string, tsInfo *cache.TsInfo, convFunc ConvertFunction, } // Only import data between KDVH's defined fromtime and totime - if tsInfo.Span.FromTime != nil && obsTime.Sub(*tsInfo.Span.FromTime) < 0 { + if tsInfo.Span.From != nil && obsTime.Sub(*tsInfo.Span.From) < 0 { continue - } else if tsInfo.Span.ToTime != nil && obsTime.Sub(*tsInfo.Span.ToTime) > 0 { + } else if tsInfo.Span.To != nil && obsTime.Sub(*tsInfo.Span.To) > 0 { break } diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index ad4fda2d..fc645ab5 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -7,6 +7,7 @@ import ( "log/slog" "os" "path/filepath" + "strings" "sync" "github.com/gocarina/gocsv" @@ -65,7 +66,7 @@ func writeSeriesCSV[S db.DataSeries | db.TextSeries](series S, path string, labe return nil } -func getLabels[T db.DataSeries | db.TextSeries](table db.Table[T], pool *pgxpool.Pool, config *Config) (labels []*db.Label, err error) { +func getLabels[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool.Pool, config *Config) (labels []*db.Label, err error) { labelFile := table.Path + "_labels.csv" if _, err := os.Stat(labelFile); err != nil || config.UpdateLabels { @@ -93,6 +94,9 @@ func getStationLabelMap(labels []*db.Label) map[int32][]*db.Label { } func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool.Pool, config *Config) { + fmt.Println("Importing from " + table.Path) + defer fmt.Println(strings.Repeat("- ", 50)) + labels, err := getLabels(table, pool, config) if err != nil { return @@ -117,7 +121,7 @@ func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool // TODO: this bar is a bit deceiving if you don't dump all the labels // Maybe should only cache the ones requested from cli? - bar := utils.NewBar(len(labels), stationPath) + bar := utils.NewBar(len(labels), fmt.Sprint(station)) for _, label := range labels { wg.Add(1) @@ -167,14 +171,11 @@ func dumpDB(database db.DB, config *Config) { if config.ChosenTable(db.DATA_TABLE_NAME) { table := DataTable(path) - dumpTable(table, pool, config) } if config.ChosenTable(db.TEXT_TABLE_NAME) { table := TextTable(path) - utils.SetLogFile(table.Path, "dump") - dumpTable(table, pool, config) } } diff --git a/migrations/kvalobs/import/cache/main.go b/migrations/kvalobs/import/cache/main.go new file mode 100644 index 00000000..cb398ff9 --- /dev/null +++ b/migrations/kvalobs/import/cache/main.go @@ -0,0 +1,108 @@ +package cache + +import ( + "context" + "database/sql" + "errors" + "log/slog" + "os" + "time" + + "github.com/jackc/pgx/v5" + + "migrate/kvalobs/db" + "migrate/lard" + "migrate/utils" +) + +type KvalobsTimespan = map[MetaKey]utils.TimeSpan + +type Cache struct { + Meta KvalobsTimespan + Permits lard.PermitMaps +} + +func (c *Cache) GetSeriesTimespan(label *db.Label) (utils.TimeSpan, error) { + // First try to lookup timespan with both stationid and paramid + // TODO: should these timespans modify an existing timeseries in lard? + key := MetaKey{Stationid: label.StationID, Paramid: sql.NullInt32{Int32: label.ParamID, Valid: true}} + if timespan, ok := c.Meta[key]; ok { + return timespan, nil + } + + // Otherwise try with stationid only + key.Paramid = sql.NullInt32{} + if timespan, ok := c.Meta[key]; ok { + return timespan, nil + } + + // If there is no timespan we can't insert a new timeseries + return utils.TimeSpan{}, errors.New(label.LogStr() + "No timespan found, cannot create timeseries") +} + +func (c *Cache) TimeseriesIsOpen(stnr, typeid, paramid int32) bool { + return c.Permits.TimeseriesIsOpen(stnr, typeid, paramid) +} + +// In `station_metadata` only the stationid is required to be non-NULL +// Paramid can be optionally specified +// Typeid, sensor, and level column are all NULL, so they are not present in this struct +type MetaKey struct { + Stationid int32 + Paramid sql.NullInt32 +} + +func New(kvalobs db.DB) *Cache { + permits := lard.NewPermitTables() + timespans := cacheKvalobsTimeseriesTimespans(kvalobs) + return &Cache{Permits: permits, Meta: timespans} +} + +// Query kvalobs `station_metadata` table that stores timeseries timespans +func cacheKvalobsTimeseriesTimespans(kvalobs db.DB) KvalobsTimespan { + cache := make(KvalobsTimespan) + + slog.Info("Connecting to Stinfosys to cache metadata") + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + conn, err := pgx.Connect(ctx, os.Getenv(kvalobs.ConnEnvVar)) + if err != nil { + slog.Error("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) + os.Exit(1) + } + defer conn.Close(ctx) + + query := `SELECT stationid, paramid, fromtime, totime FROM station_metadata` + + rows, err := conn.Query(context.TODO(), query) + if err != nil { + slog.Error(err.Error()) + os.Exit(1) + } + + for rows.Next() { + var key MetaKey + var timespan utils.TimeSpan + + err := rows.Scan( + &key.Stationid, + &key.Paramid, + ×pan.From, + ×pan.To, + ) + if err != nil { + slog.Error(err.Error()) + os.Exit(1) + } + + cache[key] = timespan + } + + if rows.Err() != nil { + slog.Error(rows.Err().Error()) + os.Exit(1) + } + + return cache +} diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index c2464ab0..a8cbdbc7 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -7,17 +7,21 @@ import ( "os" "path/filepath" "strconv" + "strings" "sync" - "time" "github.com/jackc/pgx/v5/pgxpool" "migrate/kvalobs/db" + "migrate/kvalobs/import/cache" "migrate/lard" "migrate/utils" ) -func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *lard.PermitMaps, pool *pgxpool.Pool, config *Config) (int64, error) { +func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cache.Cache, pool *pgxpool.Pool, config *Config) (int64, error) { + fmt.Println("Importing from " + table.Path) + defer fmt.Println(strings.Repeat("- ", 50)) + stations, err := os.ReadDir(table.Path) if err != nil { slog.Error(err.Error()) @@ -38,7 +42,7 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la continue } - bar := utils.NewBar(len(labels), stationDir) + bar := utils.NewBar(len(labels), fmt.Sprint(" "+station.Name())) var wg sync.WaitGroup for _, file := range labels { wg.Add(1) @@ -61,29 +65,35 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la labelStr := label.LogStr() // Check if data for this station/element is restricted - if !permits.TimeseriesIsOpen(label.StationID, label.TypeID, label.ParamID) { + if !cache.TimeseriesIsOpen(label.StationID, label.TypeID, label.ParamID) { // TODO: eventually use this to choose which table to use on insert slog.Warn(labelStr + "timeseries data is restricted, skipping") return } + timespan, err := cache.GetSeriesTimespan(label) + if err != nil { + slog.Error(labelStr + err.Error()) + return + } + lardLabel := lard.Label(*label) // TODO: figure out where to get fromtime, kvalobs directly? Stinfosys? - tsid, err := lard.GetTimeseriesID(&lardLabel, time.Now(), pool) + tsid, err := lard.GetTimeseriesID(&lardLabel, timespan, pool) if err != nil { - slog.Error(err.Error()) + slog.Error(labelStr + err.Error()) return } ts, flags, err := table.ReadCSV(tsid, filepath.Join(stationDir, file.Name())) if err != nil { - slog.Error(err.Error()) + slog.Error(labelStr + err.Error()) return } count, err := table.Import(ts, pool, labelStr) if err != nil { - slog.Error("Failed bulk insertion: " + err.Error()) + slog.Error(labelStr + "Failed bulk insertion: " + err.Error()) return } @@ -107,20 +117,20 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], permits *la // TODO: while importing we trust that kvalobs and stinfosys have the same // non scalar parameters, which might not be the case -func ImportDB(database db.DB, permits *lard.PermitMaps, pool *pgxpool.Pool, config *Config) { +func ImportDB(database db.DB, cache *cache.Cache, pool *pgxpool.Pool, config *Config) { path := filepath.Join(config.Path, database.Name) if config.ChosenTable(db.DATA_TABLE_NAME) { table := DataTable(path) utils.SetLogFile(table.Path, "import") - ImportTable(table, permits, pool, config) + ImportTable(table, cache, pool, config) } if config.ChosenTable(db.TEXT_TABLE_NAME) { table := TextTable(path) utils.SetLogFile(table.Path, "import") - ImportTable(table, permits, pool, config) + ImportTable(table, cache, pool, config) } } diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go index 0f552edb..ab6d589d 100644 --- a/migrations/kvalobs/import/main.go +++ b/migrations/kvalobs/import/main.go @@ -9,6 +9,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "migrate/kvalobs/db" + "migrate/kvalobs/import/cache" "migrate/lard" ) @@ -17,7 +18,8 @@ type Config struct { } func (config *Config) Execute() error { - permits := lard.NewPermitTables() + kvalobs, histkvalobs := db.InitDBs() + cache := cache.New(kvalobs) pool, err := pgxpool.New(context.Background(), os.Getenv(lard.LARD_ENV_VAR)) if err != nil { @@ -25,14 +27,12 @@ func (config *Config) Execute() error { } defer pool.Close() - kvalobs, histkvalobs := db.InitDBs() - if config.ChosenDB(kvalobs.Name) { - ImportDB(kvalobs, permits, pool, config) + ImportDB(kvalobs, cache, pool, config) } if config.ChosenDB(histkvalobs.Name) { - ImportDB(histkvalobs, permits, pool, config) + ImportDB(histkvalobs, cache, pool, config) } return nil diff --git a/migrations/lard/permissions.go b/migrations/lard/permissions.go index ca99d58f..ebd4099d 100644 --- a/migrations/lard/permissions.go +++ b/migrations/lard/permissions.go @@ -28,7 +28,7 @@ type PermitMaps struct { StationPermits StationPermitMap } -func NewPermitTables() *PermitMaps { +func NewPermitTables() PermitMaps { slog.Info("Connecting to Stinfosys to cache permits") ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -40,7 +40,7 @@ func NewPermitTables() *PermitMaps { } defer conn.Close(ctx) - return &PermitMaps{ + return PermitMaps{ ParamPermits: cacheParamPermits(conn), StationPermits: cacheStationPermits(conn), } diff --git a/migrations/lard/timeseries.go b/migrations/lard/timeseries.go index 78cba036..13690f16 100644 --- a/migrations/lard/timeseries.go +++ b/migrations/lard/timeseries.go @@ -2,7 +2,8 @@ package lard import ( "context" - "time" + "errors" + "migrate/utils" "github.com/jackc/pgx/v5/pgxpool" ) @@ -23,7 +24,7 @@ func (l *Label) sensorLevelAreBothZero() bool { return *l.Level == 0 && *l.Sensor == 0 } -func GetTimeseriesID(label *Label, fromtime time.Time, pool *pgxpool.Pool) (tsid int32, err error) { +func GetTimeseriesID(label *Label, timespan utils.TimeSpan, pool *pgxpool.Pool) (tsid int32, err error) { // Query LARD labels table err = pool.QueryRow( context.TODO(), @@ -61,16 +62,21 @@ func GetTimeseriesID(label *Label, fromtime time.Time, pool *pgxpool.Pool) (tsid } } + if timespan.From == nil { + return tsid, errors.New("Fromtime should never be null when creating new timeseries") + } + // If none of the above worked insert a new timeseries transaction, err := pool.Begin(context.TODO()) if err != nil { return tsid, err } + // TODO: should we set `deactivated` to true if `totime` is not NULL? err = transaction.QueryRow( context.TODO(), - `INSERT INTO public.timeseries (fromtime) VALUES ($1) RETURNING id`, - fromtime, + `INSERT INTO public.timeseries (fromtime, totime) VALUES ($1, $2) RETURNING id`, + timespan.From, timespan.To, ).Scan(&tsid) if err != nil { return tsid, err diff --git a/migrations/tests/files/histkvalobs/text/18700/18700_1000_316__.csv b/migrations/tests/files/kvalobs/text/18700/18700_1000_316__.csv similarity index 100% rename from migrations/tests/files/histkvalobs/text/18700/18700_1000_316__.csv rename to migrations/tests/files/kvalobs/text/18700/18700_1000_316__.csv diff --git a/migrations/tests/kdvh_test.go b/migrations/tests/kdvh_test.go index 518840a0..25716586 100644 --- a/migrations/tests/kdvh_test.go +++ b/migrations/tests/kdvh_test.go @@ -1,4 +1,4 @@ -package main +package tests import ( "context" @@ -39,7 +39,7 @@ func (t *KdvhTestCase) mockConfig() (*port.Config, *cache.Cache) { IsScalar: true, }, }, - Permits: &lard.PermitMaps{ + Permits: lard.PermitMaps{ StationPermits: lard.StationPermitMap{ t.station: t.permit, }, diff --git a/migrations/tests/kvalobs_test.go b/migrations/tests/kvalobs_test.go index 77d635f8..e1b7d8de 100644 --- a/migrations/tests/kvalobs_test.go +++ b/migrations/tests/kvalobs_test.go @@ -1,16 +1,19 @@ -package main +package tests import ( "context" "log" "path/filepath" "testing" + "time" "github.com/jackc/pgx/v5/pgxpool" "migrate/kvalobs/db" port "migrate/kvalobs/import" + "migrate/kvalobs/import/cache" "migrate/lard" + "migrate/utils" ) const LARD_STRING string = "host=localhost user=postgres dbname=postgres password=postgres" @@ -27,14 +30,21 @@ type KvalobsTestCase struct { expectedRows int64 } -func (t *KvalobsTestCase) mockConfig() (*port.Config, *lard.PermitMaps) { +func (t *KvalobsTestCase) mockConfig() (*port.Config, *cache.Cache) { + fromtime, _ := time.Parse(time.DateOnly, "1900-01-01") return &port.Config{ BaseConfig: db.BaseConfig{ Stations: []int32{t.station}, }, - }, &lard.PermitMaps{ - StationPermits: lard.StationPermitMap{ - t.station: t.permit, + }, + &cache.Cache{ + Meta: map[cache.MetaKey]utils.TimeSpan{ + {Stationid: t.station}: {From: &fromtime}, + }, + Permits: lard.PermitMaps{ + StationPermits: lard.StationPermitMap{ + t.station: t.permit, + }, }, } } @@ -65,14 +75,13 @@ func TestImportDataKvalobs(t *testing.T) { } for _, c := range cases { - config, permits := c.mockConfig() - insertedRows, err := port.ImportTable(c.table, permits, pool, config) + config, cache := c.mockConfig() + insertedRows, err := port.ImportTable(c.table, cache, pool, config) switch { case err != nil: t.Fatal(err) case insertedRows != c.expectedRows: - // t.Log(insertedRows) t.Fail() } } @@ -97,16 +106,15 @@ func TestImportTextKvalobs(t *testing.T) { } defer pool.Close() - _, histkvalobs := db.InitDBs() + kvalobs, _ := db.InitDBs() cases := []KvalobsTextCase{ - // TextCase(KvalobsTestCase{db: kvalobs, station: 18700, paramid: 212, permit: 1, expectedRows: 100}), - TextCase(KvalobsTestCase{db: histkvalobs, station: 18700, permit: 1, expectedRows: 182}), + TextCase(KvalobsTestCase{db: kvalobs, station: 18700, permit: 1, expectedRows: 182}), } for _, c := range cases { - config, permits := c.mockConfig() - insertedRows, err := port.ImportTable(c.table, permits, pool, config) + config, cache := c.mockConfig() + insertedRows, err := port.ImportTable(c.table, cache, pool, config) switch { case err != nil: From b8868cfe544c92306426e3fc4a36e3b173ffd050 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 12:23:23 +0100 Subject: [PATCH 32/67] Fix formatting --- db/flags.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/flags.sql b/db/flags.sql index 432e54b2..289d6aa8 100644 --- a/db/flags.sql +++ b/db/flags.sql @@ -19,7 +19,7 @@ CREATE TABLE IF NOT EXISTS flags.old_databases ( corrected REAL NULL, controlinfo TEXT NULL, useinfo TEXT NULL, - cfailed TEXT NULL , + cfailed TEXT NULL, CONSTRAINT unique_old_flags_timeseries_obstime UNIQUE (timeseries, obstime) ); CREATE INDEX IF NOT EXISTS old_flags_obtime_index ON flags.old_databases (obstime); From 8806dba1e8838317e852ddc1bc78d979bbc957fa Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 12:24:46 +0100 Subject: [PATCH 33/67] Use const from different package --- migrations/kdvh/db/main.go | 1 - migrations/kdvh/import/cache/stinfosys.go | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/migrations/kdvh/db/main.go b/migrations/kdvh/db/main.go index 0c1967bb..d2fe9445 100644 --- a/migrations/kdvh/db/main.go +++ b/migrations/kdvh/db/main.go @@ -1,7 +1,6 @@ package db const KDVH_ENV_VAR string = "KDVH_PROXY_CONN" -const STINFO_ENV_VAR string = "STINFO_STRING" // Map of all tables found in KDVH, with set max import year type KDVH struct { diff --git a/migrations/kdvh/import/cache/stinfosys.go b/migrations/kdvh/import/cache/stinfosys.go index ed8b2f27..64c8d09c 100644 --- a/migrations/kdvh/import/cache/stinfosys.go +++ b/migrations/kdvh/import/cache/stinfosys.go @@ -10,6 +10,7 @@ import ( "github.com/jackc/pgx/v5" "migrate/kdvh/db" + "migrate/lard" ) // Map of metadata used to query timeseries ID in LARD @@ -39,7 +40,7 @@ func cacheStinfoMeta(tables, elements []string, kdvh *db.KDVH) StinfoMap { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - conn, err := pgx.Connect(ctx, os.Getenv(db.STINFO_ENV_VAR)) + conn, err := pgx.Connect(ctx, os.Getenv(lard.STINFO_ENV_VAR)) if err != nil { slog.Error("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) os.Exit(1) From 5d6e541aadb1450e754357c9a3b4162f5c789f63 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 12:40:12 +0100 Subject: [PATCH 34/67] Use different function to check if value is empty or equal --- migrations/kvalobs/db/base_config.go | 10 ---------- migrations/kvalobs/dump/dump.go | 4 ++-- migrations/kvalobs/dump/main.go | 5 +++-- migrations/kvalobs/import/import.go | 4 ++-- migrations/kvalobs/import/main.go | 5 +++-- migrations/utils/utils.go | 4 ++++ 6 files changed, 14 insertions(+), 18 deletions(-) diff --git a/migrations/kvalobs/db/base_config.go b/migrations/kvalobs/db/base_config.go index ff24bc18..743a8a99 100644 --- a/migrations/kvalobs/db/base_config.go +++ b/migrations/kvalobs/db/base_config.go @@ -36,13 +36,3 @@ func (config *BaseConfig) ShouldProcessLabel(label *Label) bool { func (config *BaseConfig) TimeSpan() *utils.TimeSpan { return &utils.TimeSpan{From: config.FromTime.Inner(), To: config.ToTime.Inner()} } - -// Check if the `--db` flag was passed in -func (config *BaseConfig) ChosenDB(name string) bool { - return config.Database == "" || config.Database == name -} - -// Check if the `--table` flag was passed in -func (config *BaseConfig) ChosenTable(name string) bool { - return config.Table == "" || config.Table == name -} diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index fc645ab5..c1cf6cbb 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -169,12 +169,12 @@ func dumpDB(database db.DB, config *Config) { return } - if config.ChosenTable(db.DATA_TABLE_NAME) { + if utils.IsEmptyOrEqual(config.Table, db.DATA_TABLE_NAME) { table := DataTable(path) dumpTable(table, pool, config) } - if config.ChosenTable(db.TEXT_TABLE_NAME) { + if utils.IsEmptyOrEqual(config.Table, db.TEXT_TABLE_NAME) { table := TextTable(path) dumpTable(table, pool, config) } diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index c631de65..ae641bf9 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -2,6 +2,7 @@ package dump import ( "migrate/kvalobs/db" + "migrate/utils" ) // Same timeseries could be in both 'data' and 'text_data' tables @@ -24,11 +25,11 @@ type Config struct { func (config *Config) Execute() { kvalobs, histkvalobs := db.InitDBs() - if config.ChosenDB(kvalobs.Name) { + if utils.IsEmptyOrEqual(config.Database, kvalobs.Name) { dumpDB(kvalobs, config) } - if config.ChosenDB(histkvalobs.Name) { + if utils.IsEmptyOrEqual(config.Database, histkvalobs.Name) { dumpDB(histkvalobs, config) } } diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index a8cbdbc7..87f3f8ba 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -120,14 +120,14 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cach func ImportDB(database db.DB, cache *cache.Cache, pool *pgxpool.Pool, config *Config) { path := filepath.Join(config.Path, database.Name) - if config.ChosenTable(db.DATA_TABLE_NAME) { + if utils.IsEmptyOrEqual(config.Table, db.DATA_TABLE_NAME) { table := DataTable(path) utils.SetLogFile(table.Path, "import") ImportTable(table, cache, pool, config) } - if config.ChosenTable(db.TEXT_TABLE_NAME) { + if utils.IsEmptyOrEqual(config.Table, db.TEXT_TABLE_NAME) { table := TextTable(path) utils.SetLogFile(table.Path, "import") diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go index ab6d589d..e37e441a 100644 --- a/migrations/kvalobs/import/main.go +++ b/migrations/kvalobs/import/main.go @@ -11,6 +11,7 @@ import ( "migrate/kvalobs/db" "migrate/kvalobs/import/cache" "migrate/lard" + "migrate/utils" ) type Config struct { @@ -27,11 +28,11 @@ func (config *Config) Execute() error { } defer pool.Close() - if config.ChosenDB(kvalobs.Name) { + if utils.IsEmptyOrEqual(config.Database, kvalobs.Name) { ImportDB(kvalobs, cache, pool, config) } - if config.ChosenDB(histkvalobs.Name) { + if utils.IsEmptyOrEqual(config.Database, histkvalobs.Name) { ImportDB(histkvalobs, cache, pool, config) } diff --git a/migrations/utils/utils.go b/migrations/utils/utils.go index 7035e253..554c1553 100644 --- a/migrations/utils/utils.go +++ b/migrations/utils/utils.go @@ -30,6 +30,10 @@ func NewBar(size int, description string) *progressbar.ProgressBar { ) } +func IsEmptyOrEqual(first, second string) bool { + return first == "" || first == second +} + // Filters elements of a slice by comparing them to the elements of a reference slice. // formatMsg is an optional format string with a single format argument that can be used // to add context on why the element may be missing from the reference slice From 08204fb2c79a215b1e4f80cd04822480083ff1ce Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 13:00:40 +0100 Subject: [PATCH 35/67] Filter out fake null values --- migrations/kvalobs/db/main.go | 4 +++- migrations/kvalobs/import/data.go | 27 +++++++++++++++++++-------- migrations/lard/main.go | 6 +++--- 3 files changed, 25 insertions(+), 12 deletions(-) diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go index 09282dc8..3c82815c 100644 --- a/migrations/kvalobs/db/main.go +++ b/migrations/kvalobs/db/main.go @@ -88,7 +88,9 @@ import ( const DATA_TABLE_NAME string = "data" const TEXT_TABLE_NAME string = "text" // text_data -var NULL_VALUES []float64 = []float64{-34767, -34766} +// Special values that are treated as NULL in Kvalobs +// TODO: are there more values we should be looking for? +var NULL_VALUES []float32 = []float32{-32767, -32766} type DataSeries = []*DataObs diff --git a/migrations/kvalobs/import/data.go b/migrations/kvalobs/import/data.go index 183ba140..45b27b94 100644 --- a/migrations/kvalobs/import/data.go +++ b/migrations/kvalobs/import/data.go @@ -5,6 +5,7 @@ import ( "log/slog" "os" "path/filepath" + "slices" "strconv" "strings" "time" @@ -39,6 +40,8 @@ func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { // Skip header reader.Scan() + var originalPtr, correctedPtr *float32 + // Parse observations data := make([][]any, 0, rowCount) flags := make([][]any, 0, rowCount) @@ -52,8 +55,6 @@ func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { return nil, nil, err } - // TODO: probably should insert corrected to data table - // and keep original in flags table? obsvalue64, err := strconv.ParseFloat(fields[1], 32) if err != nil { return nil, nil, err @@ -64,26 +65,36 @@ func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { return nil, nil, err } - obsvalue := float32(obsvalue64) + // Filter out special values that in Kvalobs stand for null observations + original := float32(obsvalue64) + if !slices.Contains(db.NULL_VALUES, original) { + originalPtr = &original + } + corrected := float32(corrected64) + if !slices.Contains(db.NULL_VALUES, corrected) { + correctedPtr = &corrected + } + // Corrected value is inserted in main data table lardObs := lard.DataObs{ Id: tsid, Obstime: obstime, - Data: &obsvalue, + Data: correctedPtr, } - var cfailed *string = nil + var cfailed *string if fields[6] != "" { cfailed = &fields[6] } + // Original value is saved in flag table flag := lard.Flag{ Id: tsid, Obstime: obstime, - Corrected: &corrected, - Controlinfo: &fields[4], - Useinfo: &fields[5], + Original: originalPtr, + Controlinfo: &fields[4], // Never null + Useinfo: &fields[5], // Never null Cfailed: cfailed, } diff --git a/migrations/lard/main.go b/migrations/lard/main.go index fe5fe558..435c2ad0 100644 --- a/migrations/lard/main.go +++ b/migrations/lard/main.go @@ -38,8 +38,8 @@ type Flag struct { Id int32 // Time of observation Obstime time.Time - // Corrected value after QC tests - Corrected *float32 + // Original value after QC tests + Original *float32 // Flag encoding quality control status Controlinfo *string // Flag encoding quality control status @@ -50,5 +50,5 @@ type Flag struct { func (o *Flag) ToRow() []any { // "timeseries", "obstime", "corrected","controlinfo", "useinfo", "cfailed" - return []any{o.Id, o.Obstime, o.Corrected, o.Controlinfo, o.Useinfo, o.Cfailed} + return []any{o.Id, o.Obstime, o.Original, o.Controlinfo, o.Useinfo, o.Cfailed} } From 58064ccc60b081413f3176daff2e47fd8a983a8f Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 13:00:46 +0100 Subject: [PATCH 36/67] Update docs --- migrations/kvalobs/db/main.go | 35 ++++++++++------------------------- 1 file changed, 10 insertions(+), 25 deletions(-) diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go index 3c82815c..b53ed2bc 100644 --- a/migrations/kvalobs/db/main.go +++ b/migrations/kvalobs/db/main.go @@ -9,8 +9,8 @@ import ( // 2) `histkvalobs` for data older than // // Both contain the same tables: -// - `algorithms`: empty (???) - stores procedure info for QC checks -// - `checks`: empty (???) +// - `algorithms`: stores procedure code (!!!) for QC checks +// - `checks`: stores tags and signatures of QC tests // - `data`: stores numerical observations, associated metadata, and QC info // // Column | Type | Collation | Nullable | Default @@ -29,25 +29,9 @@ import ( // cfailed | text | | | // // - `default_missing`: -// - `default_missing_values`: -// -// - `model`: -// Column | Type | Collation | Nullable | Default -// ---------+---------+-----------+----------+--------- -// modelid | integer | | not null | -// name | text | | | -// comment | text | | | -// -// - `model_data`: -// Column | Type | Collation | Nullable | Default -// -----------+-----------------------------+-----------+----------+--------- -// stationid | integer | | not null | -// obstime | timestamp without time zone | | not null | -// paramid | integer | | not null | -// level | integer | | not null | -// modelid | integer | | not null | -// original | double precision | | | -// +// - `default_missing_values`: default values for some paramids (-32767) +// - `model`: stores model names +// - `model_data`: stores model data for different stations, paramids, etc. // - `param`: part of stinfosys `param` table // Column | Type | Collation | Nullable | Default // -------------+---------+-----------+----------+--------- @@ -59,10 +43,11 @@ import ( // comment | text | | | // scalar | boolean | | | true // -// - `pdata`: same as `data` without the `original` column and all `paramid` null??? +// TODO: should we dump this one as well? +// - `pdata`: same structure as data? // - `station`: station metadata such as (lat, lon, height, name, wmonr, etc) -// - `station_metadata`: this one seems to map well to our `labels.met`? -// Problem is `typeid`, `sensor`, and `level` are always NULL +// - `station_metadata`: Stores fromtime and totime for `stationid` and optionally `paramid`. +// `typeid`, `sensor`, and `level` are always NULL. // // - `text_data`: Similar to `data`, but without QC info nor sensor/level // @@ -75,7 +60,7 @@ import ( // tbtime | timestamp without time zone | | not null | // typeid | integer | | not null | // -// In `histkvalobs` only data tables seem to be non-empty +// NOTE: In `histkvalobs` only `data` and `text_data` are non-empty. // // IMPORTANT: considerations for migrations to LARD // - LARD stores Timeseries labels (stationid, paramid, typeid, sensor, level) in a separate table From d9dca14ed6b650f622d9e013358db261660a2bd3 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 13:22:36 +0100 Subject: [PATCH 37/67] Prettify --- migrations/kdvh/dump/dump.go | 19 ++++++++++------ migrations/kdvh/dump/dump_functions.go | 6 ++--- migrations/kdvh/import/import.go | 31 +++++++++++++++----------- migrations/kvalobs/dump/dump.go | 2 +- migrations/kvalobs/import/import.go | 4 ++-- migrations/utils/utils.go | 4 ++-- 6 files changed, 38 insertions(+), 28 deletions(-) diff --git a/migrations/kdvh/dump/dump.go b/migrations/kdvh/dump/dump.go index 59619be4..87e3e88f 100644 --- a/migrations/kdvh/dump/dump.go +++ b/migrations/kdvh/dump/dump.go @@ -20,6 +20,9 @@ import ( var INVALID_COLUMNS = []string{"dato", "stnr", "typeid", "season", "xxx"} func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { + fmt.Printf("Dumping %s...\n", table.TableName) + defer fmt.Println(strings.Repeat("- ", 50)) + if err := os.MkdirAll(filepath.Join(config.Path, table.Path), os.ModePerm); err != nil { slog.Error(err.Error()) return @@ -40,23 +43,26 @@ func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { // Used to limit connections to the database semaphore := make(chan struct{}, config.MaxConn) - bar := utils.NewBar(len(stations), table.TableName) - bar.RenderBlank() for _, station := range stations { - path := filepath.Join(config.Path, table.Path, string(station)) + path := filepath.Join(config.Path, table.Path, station) if err := os.MkdirAll(path, os.ModePerm); err != nil { slog.Error(err.Error()) return } + bar := utils.NewBar(len(elements), fmt.Sprint(" "+station)) + var wg sync.WaitGroup for _, element := range elements { + wg.Add(1) + // This blocks if the channel is full semaphore <- struct{}{} - - wg.Add(1) go func() { - defer wg.Done() + defer func() { + wg.Done() + bar.Add(1) + }() err := dumpFunc( path, @@ -78,7 +84,6 @@ func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { }() } wg.Wait() - bar.Add(1) } } diff --git a/migrations/kdvh/dump/dump_functions.go b/migrations/kdvh/dump/dump_functions.go index db6fb82f..1ad0989b 100644 --- a/migrations/kdvh/dump/dump_functions.go +++ b/migrations/kdvh/dump/dump_functions.go @@ -41,7 +41,7 @@ func fileExists(filename string, overwrite bool) error { if _, err := os.Stat(filename); err == nil && !overwrite { return errors.New( fmt.Sprintf( - "Skipping dump of '%s' because dumped file already exists and the --overwrite flag was not provided", + "Skipping dump of %q because dumped file already exists and the --overwrite flag was not provided", filename, )) } @@ -59,12 +59,12 @@ func fetchYearRange(tableName, station string, pool *pgxpool.Pool) (int64, int64 begin, err := strconv.ParseInt(beginStr, 10, 64) if err != nil { - return 0, 0, fmt.Errorf("Could not parse year '%s': %s", beginStr, err) + return 0, 0, fmt.Errorf("Could not parse year %q: %s", beginStr, err) } end, err := strconv.ParseInt(endStr, 10, 64) if err != nil { - return 0, 0, fmt.Errorf("Could not parse year '%s': %s", endStr, err) + return 0, 0, fmt.Errorf("Could not parse year %q: %s", endStr, err) } return begin, end, nil diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index e52c525d..ded4db26 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -25,6 +25,9 @@ import ( var INVALID_ELEMENTS = []string{"TYPEID", "TAM_NORMAL_9120", "RRA_NORMAL_9120", "OT", "OTN", "OTX", "DD06", "DD12", "DD18"} func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config *Config) (rowsInserted int64) { + fmt.Printf("Importing %s...\n", table.TableName) + defer fmt.Println(strings.Repeat("- ", 50)) + stations, err := os.ReadDir(filepath.Join(config.Path, table.Path)) if err != nil { slog.Warn(err.Error()) @@ -49,21 +52,23 @@ func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config continue } - bar := utils.NewBar(len(elements), stationDir) + bar := utils.NewBar(len(elements), fmt.Sprint(" "+station.Name())) var wg sync.WaitGroup for _, element := range elements { - bar.Add(1) - elemCode, err := getElementCode(element, config.Elements) - if err != nil { - if config.Verbose { - slog.Info(err.Error()) - } - continue - } - wg.Add(1) go func() { - defer wg.Done() + defer func() { + wg.Done() + bar.Add(1) + }() + + elemCode, err := getElementCode(element, config.Elements) + if err != nil { + if config.Verbose { + slog.Info(err.Error()) + } + return + } tsInfo, err := cache.NewTsInfo(table.TableName, elemCode, stnr, pool) if err != nil { @@ -132,11 +137,11 @@ func getElementCode(element os.DirEntry, elementList []string) (string, error) { elemCode := strings.ToUpper(strings.TrimSuffix(element.Name(), ".csv")) if len(elementList) > 0 && !slices.Contains(elementList, elemCode) { - return "", errors.New(fmt.Sprintf("Element '%s' not in the list, skipping", elemCode)) + return "", errors.New(fmt.Sprintf("Element %q not in the list, skipping", elemCode)) } if elemcodeIsInvalid(elemCode) { - return "", errors.New(fmt.Sprintf("Element '%s' not set for import, skipping", elemCode)) + return "", errors.New(fmt.Sprintf("Element %q not set for import, skipping", elemCode)) } return elemCode, nil } diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index c1cf6cbb..27a77b81 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -94,7 +94,7 @@ func getStationLabelMap(labels []*db.Label) map[int32][]*db.Label { } func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool.Pool, config *Config) { - fmt.Println("Importing from " + table.Path) + fmt.Printf("Dumping to %q...\n", table.Path) defer fmt.Println(strings.Repeat("- ", 50)) labels, err := getLabels(table, pool, config) diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 87f3f8ba..eb4d0fdc 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -19,7 +19,7 @@ import ( ) func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cache.Cache, pool *pgxpool.Pool, config *Config) (int64, error) { - fmt.Println("Importing from " + table.Path) + fmt.Printf("Importing from %q...\n", table.Path) defer fmt.Println(strings.Repeat("- ", 50)) stations, err := os.ReadDir(table.Path) @@ -93,7 +93,7 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cach count, err := table.Import(ts, pool, labelStr) if err != nil { - slog.Error(labelStr + "Failed bulk insertion: " + err.Error()) + slog.Error(labelStr + "Failed bulk insertion - " + err.Error()) return } diff --git a/migrations/utils/utils.go b/migrations/utils/utils.go index 554c1553..a2f7c8c3 100644 --- a/migrations/utils/utils.go +++ b/migrations/utils/utils.go @@ -43,7 +43,7 @@ func FilterSlice[T comparable](slice, reference []T, formatMsg string) []T { } if formatMsg == "" { - formatMsg = "Value '%s' not present in reference slice, skipping" + formatMsg = "Value '%v' not present in reference slice, skipping" } // I hate this so much @@ -72,7 +72,7 @@ func SetLogFile(table, procedure string) { filename := fmt.Sprintf("%s_%s_log.txt", table, procedure) fh, err := os.Create(filename) if err != nil { - slog.Error(fmt.Sprintf("Could not create log '%s': %s", filename, err)) + slog.Error(fmt.Sprintf("Could not create log %q: %s", filename, err)) return } log.SetOutput(fh) From 4055dbe40aa7858c23a1ba2622c610e7e319b6c2 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 13:22:43 +0100 Subject: [PATCH 38/67] Rework comments --- migrations/kvalobs/db/base_config.go | 1 - migrations/kvalobs/dump/main.go | 13 +++---------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/migrations/kvalobs/db/base_config.go b/migrations/kvalobs/db/base_config.go index 743a8a99..8a301ac5 100644 --- a/migrations/kvalobs/db/base_config.go +++ b/migrations/kvalobs/db/base_config.go @@ -24,7 +24,6 @@ type BaseConfig struct { } func (config *BaseConfig) ShouldProcessLabel(label *Label) bool { - // (config.Ts == nil || slices.Contains(config.Ts, ts.ID)) || return utils.IsEmptyOrContains(config.ParamIds, label.ParamID) && // utils.IsEmptyOrContains(config.Stations, label.StationID) && utils.IsEmptyOrContains(config.TypeIds, label.TypeID) && diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index ae641bf9..e2530739 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -5,16 +5,9 @@ import ( "migrate/utils" ) -// Same timeseries could be in both 'data' and 'text_data' tables -// First of all, why? -// Second, do we care? -// func readDataAndText(label *lard.Label, pool *pgxpool.Pool, config *DumpConfig) Data { -// // Supposed to join text anf number data to single slice -// return nil -// } -// -// TODO: not sure what to do with this one -// func joinTS(first, second []lard.Label) +// TODO: there were some comments in the original script about +// the fact that the same timeseries could be in both +// 'data' and 'text_data' type Config struct { db.BaseConfig From 02f0d587ed34220407a061e94667642a559aac51 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 13:28:36 +0100 Subject: [PATCH 39/67] Unify env variable naming --- migrations/kdvh/db/main.go | 2 +- migrations/lard/main.go | 2 +- migrations/lard/permissions.go | 2 +- migrations/main.go | 10 +++++++--- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/migrations/kdvh/db/main.go b/migrations/kdvh/db/main.go index d2fe9445..698d4068 100644 --- a/migrations/kdvh/db/main.go +++ b/migrations/kdvh/db/main.go @@ -1,6 +1,6 @@ package db -const KDVH_ENV_VAR string = "KDVH_PROXY_CONN" +const KDVH_ENV_VAR string = "KDVH_PROXY_CONN_STRING" // Map of all tables found in KDVH, with set max import year type KDVH struct { diff --git a/migrations/lard/main.go b/migrations/lard/main.go index 435c2ad0..82e2bbce 100644 --- a/migrations/lard/main.go +++ b/migrations/lard/main.go @@ -2,7 +2,7 @@ package lard import "time" -const LARD_ENV_VAR string = "LARD_STRING" +const LARD_ENV_VAR string = "LARD_CONN_STRING" // Struct mimicking the `public.data` table type DataObs struct { diff --git a/migrations/lard/permissions.go b/migrations/lard/permissions.go index ebd4099d..b0b7df5e 100644 --- a/migrations/lard/permissions.go +++ b/migrations/lard/permissions.go @@ -9,7 +9,7 @@ import ( "github.com/jackc/pgx/v5" ) -const STINFO_ENV_VAR string = "STINFO_STRING" +const STINFO_ENV_VAR string = "STINFO_CONN_STRING" type StationId = int32 type PermitId = int32 diff --git a/migrations/main.go b/migrations/main.go index 2d7f5c26..59d4790c 100644 --- a/migrations/main.go +++ b/migrations/main.go @@ -20,12 +20,16 @@ type CmdArgs struct { func main() { log.SetFlags(log.LstdFlags | log.Lshortfile) - // The following env variables are needed: + // The following env variables are required: // 1. Dump - // - kdvh: "KDVH_PROXY_CONN" + // - kdvh: "KDVH_CONN_STRING" + // - kvalobs: "KVALOBS_CONN_STRING", "HISTKVALOBS_CONN_STRING" // // 2. Import - // - kdvh: "LARD_STRING", "STINFO_STRING", "KDVH_PROXY_CONN" + // - kdvh: "LARD_CONN_STRING", "STINFO_CONN_STRING", "KDVH_CONN_STRING" + // - kvalobs: "LARD_CONN_STRING", "STINFO_CONN_STRING", "KVALOBS_CONN_STRING" + // + // NOTE: KDVH_CONN_STRING refers to the proxy err := godotenv.Load() if err != nil { fmt.Println(err) From e55f887acd20905a32b4afbcb9c2acac3f5b4c5c Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 13:43:40 +0100 Subject: [PATCH 40/67] Need to create the station directory --- migrations/kvalobs/dump/dump.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 27a77b81..3a1a5014 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -119,6 +119,11 @@ func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool continue } + if err := os.MkdirAll(stationPath, os.ModePerm); err != nil { + slog.Error(err.Error()) + return + } + // TODO: this bar is a bit deceiving if you don't dump all the labels // Maybe should only cache the ones requested from cli? bar := utils.NewBar(len(labels), fmt.Sprint(station)) From fcd1696754261455cc0237fc66e7516868bc2d86 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 13:43:57 +0100 Subject: [PATCH 41/67] More visual fixes --- migrations/kdvh/dump/dump.go | 7 ++++++- migrations/kdvh/import/import.go | 8 +++++++- migrations/kvalobs/dump/dump.go | 12 +++++++----- migrations/kvalobs/import/import.go | 8 ++++++-- 4 files changed, 26 insertions(+), 9 deletions(-) diff --git a/migrations/kdvh/dump/dump.go b/migrations/kdvh/dump/dump.go index 87e3e88f..7e23ab6d 100644 --- a/migrations/kdvh/dump/dump.go +++ b/migrations/kdvh/dump/dump.go @@ -3,6 +3,7 @@ package dump import ( "context" "fmt" + "log" "log/slog" "os" "path/filepath" @@ -21,7 +22,10 @@ var INVALID_COLUMNS = []string{"dato", "stnr", "typeid", "season", "xxx"} func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { fmt.Printf("Dumping %s...\n", table.TableName) - defer fmt.Println(strings.Repeat("- ", 50)) + defer func() { + fmt.Println(strings.Repeat("- ", 50)) + log.SetOutput(os.Stdout) + }() if err := os.MkdirAll(filepath.Join(config.Path, table.Path), os.ModePerm); err != nil { slog.Error(err.Error()) @@ -51,6 +55,7 @@ func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { } bar := utils.NewBar(len(elements), fmt.Sprint(" "+station)) + bar.RenderBlank() var wg sync.WaitGroup for _, element := range elements { diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index ded4db26..9e9d77f8 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -4,6 +4,7 @@ import ( "bufio" "errors" "fmt" + "log" "log/slog" "os" "path/filepath" @@ -26,7 +27,10 @@ var INVALID_ELEMENTS = []string{"TYPEID", "TAM_NORMAL_9120", "RRA_NORMAL_9120", func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config *Config) (rowsInserted int64) { fmt.Printf("Importing %s...\n", table.TableName) - defer fmt.Println(strings.Repeat("- ", 50)) + defer func() { + fmt.Println(strings.Repeat("- ", 50)) + log.SetOutput(os.Stdout) + }() stations, err := os.ReadDir(filepath.Join(config.Path, table.Path)) if err != nil { @@ -53,6 +57,8 @@ func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config } bar := utils.NewBar(len(elements), fmt.Sprint(" "+station.Name())) + bar.RenderBlank() + var wg sync.WaitGroup for _, element := range elements { wg.Add(1) diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 3a1a5014..d833ad64 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -94,8 +94,12 @@ func getStationLabelMap(labels []*db.Label) map[int32][]*db.Label { } func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool.Pool, config *Config) { + utils.SetLogFile(table.Path, "dump") fmt.Printf("Dumping to %q...\n", table.Path) - defer fmt.Println(strings.Repeat("- ", 50)) + defer func() { + fmt.Println(strings.Repeat("- ", 50)) + log.SetOutput(os.Stdout) + }() labels, err := getLabels(table, pool, config) if err != nil { @@ -105,9 +109,6 @@ func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool stationMap := getStationLabelMap(labels) timespan := config.TimeSpan() - utils.SetLogFile(table.Path, "dump") - defer log.SetOutput(os.Stdout) - // Used to limit connections to the database semaphore := make(chan struct{}, config.MaxConn) var wg sync.WaitGroup @@ -126,7 +127,8 @@ func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool // TODO: this bar is a bit deceiving if you don't dump all the labels // Maybe should only cache the ones requested from cli? - bar := utils.NewBar(len(labels), fmt.Sprint(station)) + bar := utils.NewBar(len(labels), fmt.Sprintf(" %v", station)) + bar.RenderBlank() for _, label := range labels { wg.Add(1) diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index eb4d0fdc..403b65e1 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -20,7 +20,10 @@ import ( func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cache.Cache, pool *pgxpool.Pool, config *Config) (int64, error) { fmt.Printf("Importing from %q...\n", table.Path) - defer fmt.Println(strings.Repeat("- ", 50)) + defer func() { + fmt.Println(strings.Repeat("- ", 50)) + log.SetOutput(os.Stdout) + }() stations, err := os.ReadDir(table.Path) if err != nil { @@ -43,6 +46,8 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cach } bar := utils.NewBar(len(labels), fmt.Sprint(" "+station.Name())) + bar.RenderBlank() + var wg sync.WaitGroup for _, file := range labels { wg.Add(1) @@ -111,7 +116,6 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cach slog.Info(outputStr) fmt.Println(outputStr) - log.SetOutput(os.Stdout) return rowsInserted, nil } From 55c5b4c55c17b112e91e9ad9e30c2aaf039c341e Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 13:57:29 +0100 Subject: [PATCH 42/67] Need to call bar.Add before wg.Done --- migrations/kdvh/dump/dump.go | 4 ++-- migrations/kdvh/import/import.go | 4 ++-- migrations/kvalobs/dump/dump.go | 4 ++-- migrations/kvalobs/import/cache/main.go | 4 ++-- migrations/kvalobs/import/import.go | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/migrations/kdvh/dump/dump.go b/migrations/kdvh/dump/dump.go index 7e23ab6d..b1fc0c7f 100644 --- a/migrations/kdvh/dump/dump.go +++ b/migrations/kdvh/dump/dump.go @@ -54,7 +54,7 @@ func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { return } - bar := utils.NewBar(len(elements), fmt.Sprint(" "+station)) + bar := utils.NewBar(len(elements), fmt.Sprintf("%10s", station)) bar.RenderBlank() var wg sync.WaitGroup @@ -65,8 +65,8 @@ func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { semaphore <- struct{}{} go func() { defer func() { - wg.Done() bar.Add(1) + wg.Done() }() err := dumpFunc( diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index 9e9d77f8..30d70b84 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -56,7 +56,7 @@ func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config continue } - bar := utils.NewBar(len(elements), fmt.Sprint(" "+station.Name())) + bar := utils.NewBar(len(elements), fmt.Sprintf("%10s", station.Name())) bar.RenderBlank() var wg sync.WaitGroup @@ -64,8 +64,8 @@ func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config wg.Add(1) go func() { defer func() { - wg.Done() bar.Add(1) + wg.Done() }() elemCode, err := getElementCode(element, config.Elements) diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index d833ad64..7d36c6da 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -127,7 +127,7 @@ func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool // TODO: this bar is a bit deceiving if you don't dump all the labels // Maybe should only cache the ones requested from cli? - bar := utils.NewBar(len(labels), fmt.Sprintf(" %v", station)) + bar := utils.NewBar(len(labels), fmt.Sprintf("%10d", station)) bar.RenderBlank() for _, label := range labels { @@ -136,8 +136,8 @@ func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool go func() { defer func() { - wg.Done() bar.Add(1) + wg.Done() // Release semaphore <-semaphore }() diff --git a/migrations/kvalobs/import/cache/main.go b/migrations/kvalobs/import/cache/main.go index cb398ff9..f1b7e5f6 100644 --- a/migrations/kvalobs/import/cache/main.go +++ b/migrations/kvalobs/import/cache/main.go @@ -62,13 +62,13 @@ func New(kvalobs db.DB) *Cache { func cacheKvalobsTimeseriesTimespans(kvalobs db.DB) KvalobsTimespan { cache := make(KvalobsTimespan) - slog.Info("Connecting to Stinfosys to cache metadata") + slog.Info("Connecting to Kvalobs to cache metadata") ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() conn, err := pgx.Connect(ctx, os.Getenv(kvalobs.ConnEnvVar)) if err != nil { - slog.Error("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) + slog.Error("Could not connect to Kvalobs. Make sure to be connected to the VPN. " + err.Error()) os.Exit(1) } defer conn.Close(ctx) diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 403b65e1..21c3baf5 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -45,7 +45,7 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cach continue } - bar := utils.NewBar(len(labels), fmt.Sprint(" "+station.Name())) + bar := utils.NewBar(len(labels), fmt.Sprintf("%10s", station.Name())) bar.RenderBlank() var wg sync.WaitGroup @@ -53,8 +53,8 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cach wg.Add(1) go func() { defer func() { - wg.Done() bar.Add(1) + wg.Done() }() label, err := db.LabelFromFilename(file.Name()) From 733aa10c2f80a5f55e2481297688f4aa8b5072bd Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 14:01:41 +0100 Subject: [PATCH 43/67] Insert timeseries even if fromtime is null --- migrations/kvalobs/import/cache/main.go | 7 ++++--- migrations/lard/timeseries.go | 5 ----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/migrations/kvalobs/import/cache/main.go b/migrations/kvalobs/import/cache/main.go index f1b7e5f6..65a90e6d 100644 --- a/migrations/kvalobs/import/cache/main.go +++ b/migrations/kvalobs/import/cache/main.go @@ -3,7 +3,6 @@ package cache import ( "context" "database/sql" - "errors" "log/slog" "os" "time" @@ -36,8 +35,10 @@ func (c *Cache) GetSeriesTimespan(label *db.Label) (utils.TimeSpan, error) { return timespan, nil } - // If there is no timespan we can't insert a new timeseries - return utils.TimeSpan{}, errors.New(label.LogStr() + "No timespan found, cannot create timeseries") + // If there is no timespan we insert null fromtime and totime + // TODO: is this really what we want to do? + // Is there another place where to find this information? + return utils.TimeSpan{}, nil } func (c *Cache) TimeseriesIsOpen(stnr, typeid, paramid int32) bool { diff --git a/migrations/lard/timeseries.go b/migrations/lard/timeseries.go index 13690f16..185cc51d 100644 --- a/migrations/lard/timeseries.go +++ b/migrations/lard/timeseries.go @@ -2,7 +2,6 @@ package lard import ( "context" - "errors" "migrate/utils" "github.com/jackc/pgx/v5/pgxpool" @@ -62,10 +61,6 @@ func GetTimeseriesID(label *Label, timespan utils.TimeSpan, pool *pgxpool.Pool) } } - if timespan.From == nil { - return tsid, errors.New("Fromtime should never be null when creating new timeseries") - } - // If none of the above worked insert a new timeseries transaction, err := pool.Begin(context.TODO()) if err != nil { From cd49eb5dc6dcf0b89e84edf98f1348f81ec5b4cd Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 14:02:21 +0100 Subject: [PATCH 44/67] Move constructor close to struct --- migrations/kvalobs/import/cache/main.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/migrations/kvalobs/import/cache/main.go b/migrations/kvalobs/import/cache/main.go index 65a90e6d..bc368006 100644 --- a/migrations/kvalobs/import/cache/main.go +++ b/migrations/kvalobs/import/cache/main.go @@ -21,6 +21,12 @@ type Cache struct { Permits lard.PermitMaps } +func New(kvalobs db.DB) *Cache { + permits := lard.NewPermitTables() + timespans := cacheKvalobsTimeseriesTimespans(kvalobs) + return &Cache{Permits: permits, Meta: timespans} +} + func (c *Cache) GetSeriesTimespan(label *db.Label) (utils.TimeSpan, error) { // First try to lookup timespan with both stationid and paramid // TODO: should these timespans modify an existing timeseries in lard? @@ -53,12 +59,6 @@ type MetaKey struct { Paramid sql.NullInt32 } -func New(kvalobs db.DB) *Cache { - permits := lard.NewPermitTables() - timespans := cacheKvalobsTimeseriesTimespans(kvalobs) - return &Cache{Permits: permits, Meta: timespans} -} - // Query kvalobs `station_metadata` table that stores timeseries timespans func cacheKvalobsTimeseriesTimespans(kvalobs db.DB) KvalobsTimespan { cache := make(KvalobsTimespan) From b6d30507fb020c4ac67056181830068d5886da82 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 2 Dec 2024 15:53:00 +0100 Subject: [PATCH 45/67] Update docs --- migrations/kvalobs/db/main.go | 49 ++++++++++++++++++++++++++++------- migrations/main.go | 6 ++--- 2 files changed, 41 insertions(+), 14 deletions(-) diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go index b53ed2bc..e7636615 100644 --- a/migrations/kvalobs/db/main.go +++ b/migrations/kvalobs/db/main.go @@ -11,7 +11,7 @@ import ( // Both contain the same tables: // - `algorithms`: stores procedure code (!!!) for QC checks // - `checks`: stores tags and signatures of QC tests -// - `data`: stores numerical observations, associated metadata, and QC info +// - `data`: a view that joins `observations` and `obsvalue` // // Column | Type | Collation | Nullable | Default // -------------+-----------------------------+-----------+----------+---------------------------- @@ -32,6 +32,36 @@ import ( // - `default_missing_values`: default values for some paramids (-32767) // - `model`: stores model names // - `model_data`: stores model data for different stations, paramids, etc. +// +// - `observations`: stores sequential observation IDs for each observations (note the lack of paramid) +// Column | Type | Collation | Nullable | +// ---------------+-----------------------------+-----------+----------+ +// observationid | bigint | | not null | +// stationid | integer | | not null | +// typeid | integer | | not null | +// obstime | timestamp without time zone | | not null | +// tbtime | timestamp without time zone | | not null | +// +// - `obsdata`: where the actual scalar data is stored +// Column | Type | Collation | Nullable | Default +// ---------------+------------------+-----------+----------+---------------------------- +// observationid | bigint | | | +// original | double precision | | not null | +// paramid | integer | | not null | +// sensor | character(1) | | | '0'::bpchar +// level | integer | | | 0 +// corrected | double precision | | not null | +// controlinfo | character(16) | | | '0000000000000000'::bpchar +// useinfo | character(16) | | | '0000000000000000'::bpchar +// cfailed | text | | | +// +// - `obstextdata`: where the actual text data is stored +// Column | Type | Collation | Nullable | Default | +// ---------------+---------+-----------+----------+---------+ +// observationid | bigint | | | | +// original | text | | not null | | +// paramid | integer | | not null | | +// // - `param`: part of stinfosys `param` table // Column | Type | Collation | Nullable | Default // -------------+---------+-----------+----------+--------- @@ -43,13 +73,12 @@ import ( // comment | text | | | // scalar | boolean | | | true // -// TODO: should we dump this one as well? -// - `pdata`: same structure as data? +// - `pdata`: view similar to `data` but with paramid converted to param code // - `station`: station metadata such as (lat, lon, height, name, wmonr, etc) // - `station_metadata`: Stores fromtime and totime for `stationid` and optionally `paramid`. // `typeid`, `sensor`, and `level` are always NULL. // -// - `text_data`: Similar to `data`, but without QC info nor sensor/level +// - `text_data`: view that joins `observations` and `obstextdata` // // Column | Type | Collation | Nullable | Default // -----------+-----------------------------+-----------+----------+--------- @@ -63,12 +92,12 @@ import ( // NOTE: In `histkvalobs` only `data` and `text_data` are non-empty. // // IMPORTANT: considerations for migrations to LARD -// - LARD stores Timeseries labels (stationid, paramid, typeid, sensor, level) in a separate table -// - (sensor, level) can be NULL, while in Kvalobs they have default values (0,0) -// => POSSIBLE INCONSISTENCY when importing to LARD -// - Timestamps are UTC -// - Kvalobs doesn't have the concept of timeseries ID, -// instead there is a sequential ID associated with each observation row +// - LARD stores Timeseries labels (stationid, paramid, typeid, sensor, level) in a separate table +// - (sensor, level) can be NULL, while in Kvalobs they have default values (0,0) +// => POSSIBLE INCONSISTENCY when importing to LARD +// - Timestamps are UTC +// - Kvalobs doesn't have the concept of timeseries ID, +// instead there is a sequential ID associated with each observation row const DATA_TABLE_NAME string = "data" const TEXT_TABLE_NAME string = "text" // text_data diff --git a/migrations/main.go b/migrations/main.go index 59d4790c..84d9eaa5 100644 --- a/migrations/main.go +++ b/migrations/main.go @@ -22,14 +22,12 @@ func main() { // The following env variables are required: // 1. Dump - // - kdvh: "KDVH_CONN_STRING" + // - kdvh: "KDVH_PROXY_CONN_STRING" // - kvalobs: "KVALOBS_CONN_STRING", "HISTKVALOBS_CONN_STRING" // // 2. Import - // - kdvh: "LARD_CONN_STRING", "STINFO_CONN_STRING", "KDVH_CONN_STRING" + // - kdvh: "LARD_CONN_STRING", "STINFO_CONN_STRING", "KDVH_PROXY_CONN_STRING" // - kvalobs: "LARD_CONN_STRING", "STINFO_CONN_STRING", "KVALOBS_CONN_STRING" - // - // NOTE: KDVH_CONN_STRING refers to the proxy err := godotenv.Load() if err != nil { fmt.Println(err) From 7a0d98656fd48b1fcbde0443dd4df9f87a743949 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 3 Dec 2024 10:05:29 +0100 Subject: [PATCH 46/67] Drop indices when importing from Kvalobs --- migrations/kdvh/import/main.go | 35 ++------------------------- migrations/kvalobs/import/main.go | 17 +++++++++++++ migrations/utils/indices.go | 40 +++++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 33 deletions(-) create mode 100644 migrations/utils/indices.go diff --git a/migrations/kdvh/import/main.go b/migrations/kdvh/import/main.go index 941ffcb1..2be51595 100644 --- a/migrations/kdvh/import/main.go +++ b/migrations/kdvh/import/main.go @@ -51,14 +51,14 @@ func (config *Config) Execute() { defer pool.Close() if config.Reindex { - dropIndices(pool) + utils.DropIndices(pool) } // Recreate indices even in case the main function panics defer func() { r := recover() if config.Reindex { - createIndices(pool) + utils.CreateIndices(pool) } if r != nil { @@ -85,34 +85,3 @@ func (config *Config) Execute() { log.SetOutput(os.Stdout) slog.Info("Import complete!") } - -func dropIndices(pool *pgxpool.Pool) { - slog.Info("Dropping table indices...") - - file, err := os.ReadFile("../db/drop_indices.sql") - if err != nil { - panic(err.Error()) - } - - _, err = pool.Exec(context.Background(), string(file)) - if err != nil { - panic(err.Error()) - } -} - -func createIndices(pool *pgxpool.Pool) { - slog.Info("Recreating table indices...") - - files := []string{"../db/public.sql", "../db/flags.sql"} - for _, filename := range files { - file, err := os.ReadFile(filename) - if err != nil { - panic(err.Error()) - } - - _, err = pool.Exec(context.Background(), string(file)) - if err != nil { - panic(err.Error()) - } - } -} diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go index e37e441a..f7ce2fa2 100644 --- a/migrations/kvalobs/import/main.go +++ b/migrations/kvalobs/import/main.go @@ -16,6 +16,7 @@ import ( type Config struct { db.BaseConfig + Reindex bool `help:"Drop PG indices before insertion. Might improve performance"` } func (config *Config) Execute() error { @@ -28,6 +29,22 @@ func (config *Config) Execute() error { } defer pool.Close() + if config.Reindex { + utils.DropIndices(pool) + } + + // Recreate indices even in case the main function panics + defer func() { + r := recover() + if config.Reindex { + utils.CreateIndices(pool) + } + + if r != nil { + panic(r) + } + }() + if utils.IsEmptyOrEqual(config.Database, kvalobs.Name) { ImportDB(kvalobs, cache, pool, config) } diff --git a/migrations/utils/indices.go b/migrations/utils/indices.go new file mode 100644 index 00000000..b75ee276 --- /dev/null +++ b/migrations/utils/indices.go @@ -0,0 +1,40 @@ +package utils + +import ( + "context" + "log/slog" + "os" + + "github.com/jackc/pgx/v5/pgxpool" +) + +func DropIndices(pool *pgxpool.Pool) { + slog.Info("Dropping table indices...") + + file, err := os.ReadFile("../db/drop_indices.sql") + if err != nil { + panic(err.Error()) + } + + _, err = pool.Exec(context.Background(), string(file)) + if err != nil { + panic(err.Error()) + } +} + +func CreateIndices(pool *pgxpool.Pool) { + slog.Info("Recreating table indices...") + + files := []string{"../db/public.sql", "../db/flags.sql"} + for _, filename := range files { + file, err := os.ReadFile(filename) + if err != nil { + panic(err.Error()) + } + + _, err = pool.Exec(context.Background(), string(file)) + if err != nil { + panic(err.Error()) + } + } +} From 3f5f2e59a743b8d0d4418d80abd3e592594c91be Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 3 Dec 2024 10:08:41 +0100 Subject: [PATCH 47/67] Remove unnecessary calls to log.SetOutput --- migrations/kdvh/dump/dump.go | 6 +----- migrations/kdvh/import/import.go | 6 +----- migrations/kvalobs/dump/dump.go | 6 +----- migrations/kvalobs/import/import.go | 6 +----- 4 files changed, 4 insertions(+), 20 deletions(-) diff --git a/migrations/kdvh/dump/dump.go b/migrations/kdvh/dump/dump.go index b1fc0c7f..ec0db49b 100644 --- a/migrations/kdvh/dump/dump.go +++ b/migrations/kdvh/dump/dump.go @@ -3,7 +3,6 @@ package dump import ( "context" "fmt" - "log" "log/slog" "os" "path/filepath" @@ -22,10 +21,7 @@ var INVALID_COLUMNS = []string{"dato", "stnr", "typeid", "season", "xxx"} func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { fmt.Printf("Dumping %s...\n", table.TableName) - defer func() { - fmt.Println(strings.Repeat("- ", 50)) - log.SetOutput(os.Stdout) - }() + defer fmt.Println(strings.Repeat("- ", 50)) if err := os.MkdirAll(filepath.Join(config.Path, table.Path), os.ModePerm); err != nil { slog.Error(err.Error()) diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index 30d70b84..ba850adb 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -4,7 +4,6 @@ import ( "bufio" "errors" "fmt" - "log" "log/slog" "os" "path/filepath" @@ -27,10 +26,7 @@ var INVALID_ELEMENTS = []string{"TYPEID", "TAM_NORMAL_9120", "RRA_NORMAL_9120", func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config *Config) (rowsInserted int64) { fmt.Printf("Importing %s...\n", table.TableName) - defer func() { - fmt.Println(strings.Repeat("- ", 50)) - log.SetOutput(os.Stdout) - }() + defer fmt.Println(strings.Repeat("- ", 50)) stations, err := os.ReadDir(filepath.Join(config.Path, table.Path)) if err != nil { diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 7d36c6da..68363bcf 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -3,7 +3,6 @@ package dump import ( "context" "fmt" - "log" "log/slog" "os" "path/filepath" @@ -96,10 +95,7 @@ func getStationLabelMap(labels []*db.Label) map[int32][]*db.Label { func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool.Pool, config *Config) { utils.SetLogFile(table.Path, "dump") fmt.Printf("Dumping to %q...\n", table.Path) - defer func() { - fmt.Println(strings.Repeat("- ", 50)) - log.SetOutput(os.Stdout) - }() + defer fmt.Println(strings.Repeat("- ", 50)) labels, err := getLabels(table, pool, config) if err != nil { diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 21c3baf5..981f29eb 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -2,7 +2,6 @@ package port import ( "fmt" - "log" "log/slog" "os" "path/filepath" @@ -20,10 +19,7 @@ import ( func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cache.Cache, pool *pgxpool.Pool, config *Config) (int64, error) { fmt.Printf("Importing from %q...\n", table.Path) - defer func() { - fmt.Println(strings.Repeat("- ", 50)) - log.SetOutput(os.Stdout) - }() + defer fmt.Println(strings.Repeat("- ", 50)) stations, err := os.ReadDir(table.Path) if err != nil { From 9500603afcd46409b96022422f5c7d183cd7ca6f Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 3 Dec 2024 11:05:26 +0100 Subject: [PATCH 48/67] Simplify getStationLabelMap --- migrations/kvalobs/dump/dump.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 68363bcf..4222813a 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -81,12 +81,8 @@ func getLabels[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool func getStationLabelMap(labels []*db.Label) map[int32][]*db.Label { labelmap := make(map[int32][]*db.Label) - var station int32 for _, label := range labels { - if station != label.StationID { - station = label.StationID - } - labelmap[station] = append(labelmap[station], label) + labelmap[label.StationID] = append(labelmap[label.StationID], label) } return labelmap From 5ec43da6283e6e3b9dd62aa691fb08a90541a647 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 3 Dec 2024 12:26:52 +0100 Subject: [PATCH 49/67] Add timestamp to log filename --- migrations/utils/utils.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/migrations/utils/utils.go b/migrations/utils/utils.go index a2f7c8c3..4920499a 100644 --- a/migrations/utils/utils.go +++ b/migrations/utils/utils.go @@ -8,6 +8,7 @@ import ( "slices" "strconv" "strings" + "time" "github.com/schollz/progressbar/v3" ) @@ -69,7 +70,7 @@ func SaveToFile(values []string, filename string) error { } func SetLogFile(table, procedure string) { - filename := fmt.Sprintf("%s_%s_log.txt", table, procedure) + filename := fmt.Sprintf("%s_%s_%s.log", table, procedure, time.Now().Format(time.RFC3339)) fh, err := os.Create(filename) if err != nil { slog.Error(fmt.Sprintf("Could not create log %q: %s", filename, err)) From ddc193bc72d2e3d56fb202317a55d2893004b4c9 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 3 Dec 2024 13:54:58 +0100 Subject: [PATCH 50/67] Fix bugs with label dump --- migrations/kvalobs/db/labels.go | 47 ++++++++++++++++++++++++++++----- migrations/kvalobs/db/table.go | 1 - migrations/kvalobs/dump/data.go | 2 +- migrations/kvalobs/dump/text.go | 2 +- 4 files changed, 43 insertions(+), 9 deletions(-) diff --git a/migrations/kvalobs/db/labels.go b/migrations/kvalobs/db/labels.go index d4d2be06..8a46d17d 100644 --- a/migrations/kvalobs/db/labels.go +++ b/migrations/kvalobs/db/labels.go @@ -3,19 +3,23 @@ package db import ( "errors" "fmt" + "log/slog" "migrate/utils" + "os" "strconv" "strings" + + "github.com/gocarina/gocsv" ) // Kvalobs specific label type Label struct { - StationID int32 - ParamID int32 - TypeID int32 + StationID int32 `db:"stationid"` + ParamID int32 `db:"paramid"` + TypeID int32 `db:"typeid"` // These two are not present in the `text_data` tabl - Sensor *int32 // bpchar(1) in `data` table - Level *int32 + Sensor *int32 `db:"sensor"` // bpchar(1) in `data` table + Level *int32 `db:"level"` } func (l *Label) sensorLevelString() (string, string) { @@ -31,7 +35,7 @@ func (l *Label) sensorLevelString() (string, string) { func (l *Label) ToFilename() string { sensor, level := l.sensorLevelString() - return fmt.Sprintf("%v_%v_%v_%v_%v.csv", l.StationID, l.ParamID, l.ParamID, sensor, level) + return fmt.Sprintf("%v_%v_%v_%v_%v.csv", l.StationID, l.ParamID, l.TypeID, sensor, level) } func (l *Label) LogStr() string { @@ -42,6 +46,37 @@ func (l *Label) LogStr() string { ) } +func ReadLabelCSV(path string) (labels []*Label, err error) { + file, err := os.Open(path) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + defer file.Close() + + slog.Info("Reading previously dumped labels from " + path) + err = gocsv.Unmarshal(file, &labels) + if err != nil { + slog.Error(err.Error()) + } + return labels, err +} + +func WriteLabelCSV(path string, labels []*Label) error { + file, err := os.Create(path) + if err != nil { + slog.Error(err.Error()) + return err + } + + slog.Info("Writing timeseries labels to " + path) + err = gocsv.Marshal(labels, file) + if err != nil { + slog.Error(err.Error()) + } + return err +} + func parseFilenameFields(s *string) (*int32, error) { if *s == "" { return nil, nil diff --git a/migrations/kvalobs/db/table.go b/migrations/kvalobs/db/table.go index f31a9eaa..f4bf5d8e 100644 --- a/migrations/kvalobs/db/table.go +++ b/migrations/kvalobs/db/table.go @@ -8,7 +8,6 @@ import ( // Maps to `data` and `text_data` tables in Kvalobs type Table[S DataSeries | TextSeries] struct { - // Name string // Name of the table Path string // Path of the dumped table DumpLabels LabelDumpFunc // Function that dumps labels from the table DumpSeries ObsDumpFunc[S] // Function that dumps observations from the table diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go index c723e8ef..387b71af 100644 --- a/migrations/kvalobs/dump/data.go +++ b/migrations/kvalobs/dump/data.go @@ -38,7 +38,7 @@ func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, slog.Info("Collecting data labels...") labels := make([]*db.Label, 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.Label]) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByName[db.Label]) if err != nil { slog.Error(err.Error()) return nil, err diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index 0e3403d8..5b77bba7 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -38,7 +38,7 @@ func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, slog.Info("Collecting text labels...") labels := make([]*db.Label, 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByPos[db.Label]) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByName[db.Label]) if err != nil { slog.Error(err.Error()) return nil, err From c41379193ecdc87ca85766af28ac9ce4a4ae019d Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 3 Dec 2024 14:20:34 +0100 Subject: [PATCH 51/67] Add possibility to only dump labels --- migrations/kvalobs/dump/dump.go | 6 ++++-- migrations/kvalobs/dump/main.go | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 4222813a..2446c839 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -89,12 +89,14 @@ func getStationLabelMap(labels []*db.Label) map[int32][]*db.Label { } func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool.Pool, config *Config) { - utils.SetLogFile(table.Path, "dump") + if !config.LabelsOnly { + utils.SetLogFile(table.Path, "dump") + } fmt.Printf("Dumping to %q...\n", table.Path) defer fmt.Println(strings.Repeat("- ", 50)) labels, err := getLabels(table, pool, config) - if err != nil { + if err != nil || config.LabelsOnly { return } diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index e2530739..4e483d10 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -11,7 +11,8 @@ import ( type Config struct { db.BaseConfig - UpdateLabels bool `help:"Overwrites the label CSV files"` + LabelsOnly bool `arg:"--labels-only" help:"Only dump labels"` + UpdateLabels bool `arg:"--labels-update" help:"Overwrites the label CSV files"` MaxConn int `arg:"-n" default:"4" help:"Max number of allowed concurrent connections to Kvalobs"` } From af535d36498f1b1458f44dc1fd1d057d63346200 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 3 Dec 2024 14:21:14 +0100 Subject: [PATCH 52/67] Add 'check' package --- migrations/kdvh/dump/dump.go | 2 +- migrations/kdvh/import/import.go | 2 +- migrations/kvalobs/check/main.go | 139 ++++++++++++++++++++++++++++ migrations/kvalobs/dump/dump.go | 37 +------- migrations/kvalobs/import/import.go | 2 +- migrations/kvalobs/main.go | 8 +- 6 files changed, 151 insertions(+), 39 deletions(-) create mode 100644 migrations/kvalobs/check/main.go diff --git a/migrations/kdvh/dump/dump.go b/migrations/kdvh/dump/dump.go index ec0db49b..86e2f36e 100644 --- a/migrations/kdvh/dump/dump.go +++ b/migrations/kdvh/dump/dump.go @@ -21,7 +21,7 @@ var INVALID_COLUMNS = []string{"dato", "stnr", "typeid", "season", "xxx"} func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { fmt.Printf("Dumping %s...\n", table.TableName) - defer fmt.Println(strings.Repeat("- ", 50)) + defer fmt.Println(strings.Repeat("- ", 40)) if err := os.MkdirAll(filepath.Join(config.Path, table.Path), os.ModePerm); err != nil { slog.Error(err.Error()) diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index ba850adb..3e53d1f8 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -26,7 +26,7 @@ var INVALID_ELEMENTS = []string{"TYPEID", "TAM_NORMAL_9120", "RRA_NORMAL_9120", func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config *Config) (rowsInserted int64) { fmt.Printf("Importing %s...\n", table.TableName) - defer fmt.Println(strings.Repeat("- ", 50)) + defer fmt.Println(strings.Repeat("- ", 40)) stations, err := os.ReadDir(filepath.Join(config.Path, table.Path)) if err != nil { diff --git a/migrations/kvalobs/check/main.go b/migrations/kvalobs/check/main.go new file mode 100644 index 00000000..5ea2b82e --- /dev/null +++ b/migrations/kvalobs/check/main.go @@ -0,0 +1,139 @@ +package check + +import ( + "context" + "fmt" + "log" + "os" + "path/filepath" + "slices" + "strings" + "time" + + "github.com/jackc/pgx/v5" + + "migrate/kvalobs/db" + "migrate/lard" + "migrate/utils" +) + +type Config struct { + Path string `arg:"-p" default:"./dumps" help:"Directory of the dumped data"` + CheckName string `arg:"positional" required:"true" help:"Choices: ['overlap', 'non-scalars']"` +} + +func (c *Config) Execute() { + kvalobs, histkvalobs := db.InitDBs() + if utils.IsEmptyOrEqual(c.CheckName, "overlap") { + fmt.Println("Checking if some param IDs are stored in both the `data` and `text_data` tables") + c.checkDataAndTextParamsOverlap(&kvalobs) + c.checkDataAndTextParamsOverlap(&histkvalobs) + } + if utils.IsEmptyOrEqual(c.CheckName, "non-scalars") { + fmt.Println("Checking if param IDs in `text_data` match non-scalar parameters in Stinfosys") + stinfoParams := getStinfoNonScalars() + c.checkNonScalars(&kvalobs, stinfoParams) + c.checkNonScalars(&histkvalobs, stinfoParams) + } +} + +// Simply checks if some params are found both in the data and text_data +func (c *Config) checkDataAndTextParamsOverlap(database *db.DB) { + defer fmt.Println(strings.Repeat("- ", 40)) + datapath := filepath.Join(c.Path, database.Name, db.DATA_TABLE_NAME+"_labels.csv") + textpath := filepath.Join(c.Path, database.Name, db.TEXT_TABLE_NAME+"_labels.csv") + + dataParamids, derr := loadParamids(datapath) + textParamids, terr := loadParamids(textpath) + if derr != nil || terr != nil { + return + } + + ids := make([]int32, 0, len(textParamids)) + for id := range dataParamids { + if _, ok := textParamids[id]; ok { + ids = append(ids, id) + } + } + + slices.Sort(ids) + for _, id := range ids { + fmt.Printf("ParamID %5d exists in both data and text tables\n", id) + } +} + +func loadParamids(path string) (map[int32]int32, error) { + labels, err := db.ReadLabelCSV(path) + if err != nil { + log.Println(err) + return nil, err + } + paramids := uniqueParamids(labels) + return paramids, nil + +} + +// Creates hashset of paramids +func uniqueParamids(labels []*db.Label) map[int32]int32 { + paramids := make(map[int32]int32) + for _, label := range labels { + paramids[label.ParamID] += 1 + } + return paramids +} + +type StinfoPair struct { + ParamID int32 `db:"paramid"` + IsScalar bool `db:"scalar"` +} + +func getStinfoNonScalars() []int32 { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + conn, err := pgx.Connect(ctx, os.Getenv(lard.STINFO_ENV_VAR)) + if err != nil { + log.Fatal("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) + } + defer conn.Close(ctx) + + rows, err := conn.Query(context.TODO(), "SELECT paramid FROM param WHERE scalar = false ORDER BY paramid") + if err != nil { + log.Fatal(err) + } + nonscalars, err := pgx.CollectRows(rows, pgx.RowTo[int32]) + if err != nil { + log.Fatal(err) + } + return nonscalars +} + +// Checks that text params in Kvalobs are considered non-scalar in Stinfosys +func (c *Config) checkNonScalars(database *db.DB, nonscalars []int32) { + defer fmt.Println(strings.Repeat("- ", 40)) + path := filepath.Join(c.Path, database.Name, db.TEXT_TABLE_NAME+"_labels.csv") + kvParamids, err := loadParamids(path) + if err != nil { + return + } + + for _, id := range nonscalars { + if _, ok := kvParamids[id]; ok { + fmt.Printf("MATCH: ParamID %5d is text in both Stinfosys and Kvalobs\n", id) + delete(kvParamids, id) + } else { + fmt.Printf(" FAIL: ParamID %5d is text in Stinfosys, but not in Kvalobs\n", id) + } + } + + idsLeft := make([]int32, 0, len(kvParamids)) + for id := range kvParamids { + idsLeft = append(idsLeft, id) + } + + slices.Sort(idsLeft) + for _, id := range idsLeft { + fmt.Printf(" FAIL: ParamID %5d is text in Kvalobs, but not in Stinfosys\n", id) + } + +} diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 2446c839..6edc749a 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -16,37 +16,6 @@ import ( "migrate/utils" ) -func readLabelCSV(filename string) (labels []*db.Label, err error) { - file, err := os.Open(filename) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - defer file.Close() - - slog.Info("Reading previously dumped labels...") - err = gocsv.Unmarshal(file, &labels) - if err != nil { - slog.Error(err.Error()) - } - return labels, err -} - -func writeLabelCSV(path string, labels []*db.Label) error { - file, err := os.Create(path) - if err != nil { - slog.Error(err.Error()) - return err - } - - slog.Info("Writing timeseries labels to " + path) - err = gocsv.Marshal(labels, file) - if err != nil { - slog.Error(err.Error()) - } - return err -} - func writeSeriesCSV[S db.DataSeries | db.TextSeries](series S, path string, label *db.Label) error { filename := filepath.Join(path, label.ToFilename()) file, err := os.Create(filename) @@ -73,9 +42,9 @@ func getLabels[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool if err != nil { return nil, err } - return labels, writeLabelCSV(labelFile, labels) + return labels, db.WriteLabelCSV(labelFile, labels) } - return readLabelCSV(labelFile) + return db.ReadLabelCSV(labelFile) } func getStationLabelMap(labels []*db.Label) map[int32][]*db.Label { @@ -93,7 +62,7 @@ func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool utils.SetLogFile(table.Path, "dump") } fmt.Printf("Dumping to %q...\n", table.Path) - defer fmt.Println(strings.Repeat("- ", 50)) + defer fmt.Println(strings.Repeat("- ", 40)) labels, err := getLabels(table, pool, config) if err != nil || config.LabelsOnly { diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 981f29eb..6763cb8f 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -19,7 +19,7 @@ import ( func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cache.Cache, pool *pgxpool.Pool, config *Config) (int64, error) { fmt.Printf("Importing from %q...\n", table.Path) - defer fmt.Println(strings.Repeat("- ", 50)) + defer fmt.Println(strings.Repeat("- ", 40)) stations, err := os.ReadDir(table.Path) if err != nil { diff --git a/migrations/kvalobs/main.go b/migrations/kvalobs/main.go index ef60d60b..f8cbb053 100644 --- a/migrations/kvalobs/main.go +++ b/migrations/kvalobs/main.go @@ -6,13 +6,15 @@ import ( "github.com/alexflint/go-arg" + "migrate/kvalobs/check" "migrate/kvalobs/dump" port "migrate/kvalobs/import" ) type Cmd struct { - Dump *dump.Config `arg:"subcommand" help:"Dump tables from Kvalobs to CSV"` - Import *port.Config `arg:"subcommand" help:"Import CSV file dumped from Kvalobs"` + Dump *dump.Config `arg:"subcommand" help:"Dump tables from Kvalobs to CSV"` + Import *port.Config `arg:"subcommand" help:"Import CSV file dumped from Kvalobs"` + Check *check.Config `arg:"subcommand" help:"Performs various checks on kvalobs timeseries"` } func (c *Cmd) Execute(parser *arg.Parser) { @@ -21,6 +23,8 @@ func (c *Cmd) Execute(parser *arg.Parser) { c.Dump.Execute() case c.Import != nil: c.Import.Execute() + case c.Check != nil: + c.Check.Execute() default: fmt.Println("Error: passing a subcommand is required.") fmt.Println() From 7f66e17df6e86fc1c82dd0c9938a77733e8102b4 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 3 Dec 2024 15:03:18 +0100 Subject: [PATCH 53/67] Drop flags.old_databases in favor of flags.kvdata --- db/flags.sql | 12 ----------- migrations/kdvh/import/convert_functions.go | 24 +++++++++++++++------ migrations/kvalobs/import/data.go | 4 ++-- migrations/lard/import.go | 4 ++-- migrations/lard/main.go | 8 ++++--- 5 files changed, 27 insertions(+), 25 deletions(-) diff --git a/db/flags.sql b/db/flags.sql index 289d6aa8..0bc9348a 100644 --- a/db/flags.sql +++ b/db/flags.sql @@ -12,15 +12,3 @@ CREATE TABLE IF NOT EXISTS flags.kvdata ( ); CREATE INDEX IF NOT EXISTS kvdata_obtime_index ON flags.kvdata (obstime); CREATE INDEX IF NOT EXISTS kvdata_timeseries_index ON flags.kvdata USING HASH (timeseries); - -CREATE TABLE IF NOT EXISTS flags.old_databases ( - timeseries INT4 REFERENCES public.timeseries, - obstime TIMESTAMPTZ NOT NULL, - corrected REAL NULL, - controlinfo TEXT NULL, - useinfo TEXT NULL, - cfailed TEXT NULL, - CONSTRAINT unique_old_flags_timeseries_obstime UNIQUE (timeseries, obstime) -); -CREATE INDEX IF NOT EXISTS old_flags_obtime_index ON flags.old_databases (obstime); -CREATE INDEX IF NOT EXISTS old_flags_timeseries_index ON flags.old_databases USING HASH (timeseries); diff --git a/migrations/kdvh/import/convert_functions.go b/migrations/kdvh/import/convert_functions.go index c0dcf881..6340e0a5 100644 --- a/migrations/kdvh/import/convert_functions.go +++ b/migrations/kdvh/import/convert_functions.go @@ -92,8 +92,10 @@ func Convert(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { lard.Flag{ Id: obs.Id, Obstime: obs.obstime, - Useinfo: obs.Useinfo(), + Original: valPtr, + Corrected: valPtr, Controlinfo: &controlinfo, + Useinfo: obs.Useinfo(), }, nil } @@ -141,8 +143,10 @@ func ConvertEdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { lard.Flag{ Id: obs.Id, Obstime: obs.obstime, - Useinfo: obs.Useinfo(), + Original: valPtr, + Corrected: valPtr, Controlinfo: &controlinfo, + Useinfo: obs.Useinfo(), }, nil } @@ -190,8 +194,10 @@ func ConvertPdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { lard.Flag{ Id: obs.Id, Obstime: obs.obstime, - Useinfo: obs.Useinfo(), + Original: valPtr, + Corrected: valPtr, Controlinfo: &controlinfo, + Useinfo: obs.Useinfo(), }, nil } @@ -241,8 +247,10 @@ func ConvertNdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { lard.Flag{ Id: obs.Id, Obstime: obs.obstime, - Useinfo: obs.Useinfo(), + Original: valPtr, + Corrected: valPtr, Controlinfo: &controlinfo, + Useinfo: obs.Useinfo(), }, nil } @@ -295,6 +303,8 @@ func ConvertVdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { lard.Flag{ Id: obs.Id, Obstime: obs.obstime, + Original: valPtr, + Corrected: valPtr, Useinfo: &useinfo, Controlinfo: &controlinfo, }, nil @@ -305,11 +315,11 @@ func ConvertDiurnalInterpolated(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.F if err != nil { return lard.DataObs{}, lard.TextObs{}, lard.Flag{}, err } - + valPtr := addr(float32(val)) return lard.DataObs{ Id: obs.Id, Obstime: obs.obstime, - Data: addr(float32(val)), + Data: valPtr, }, lard.TextObs{ Id: obs.Id, @@ -319,6 +329,8 @@ func ConvertDiurnalInterpolated(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.F lard.Flag{ Id: obs.Id, Obstime: obs.obstime, + Original: valPtr, + Corrected: valPtr, Useinfo: addr(DIURNAL_INTERPOLATED_USEINFO), Controlinfo: addr(VALUE_MANUALLY_INTERPOLATED), }, nil diff --git a/migrations/kvalobs/import/data.go b/migrations/kvalobs/import/data.go index 45b27b94..79bf4093 100644 --- a/migrations/kvalobs/import/data.go +++ b/migrations/kvalobs/import/data.go @@ -80,7 +80,7 @@ func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { lardObs := lard.DataObs{ Id: tsid, Obstime: obstime, - Data: correctedPtr, + Data: originalPtr, } var cfailed *string @@ -88,11 +88,11 @@ func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { cfailed = &fields[6] } - // Original value is saved in flag table flag := lard.Flag{ Id: tsid, Obstime: obstime, Original: originalPtr, + Corrected: correctedPtr, Controlinfo: &fields[4], // Never null Useinfo: &fields[5], // Never null Cfailed: cfailed, diff --git a/migrations/lard/import.go b/migrations/lard/import.go index 9729f95e..3617b5ea 100644 --- a/migrations/lard/import.go +++ b/migrations/lard/import.go @@ -55,8 +55,8 @@ func InsertFlags(ts [][]any, pool *pgxpool.Pool, logStr string) error { size := len(ts) count, err := pool.CopyFrom( context.TODO(), - pgx.Identifier{"flags", "old_databases"}, - []string{"timeseries", "obstime", "corrected", "controlinfo", "useinfo", "cfailed"}, + pgx.Identifier{"flags", "kvdata"}, + []string{"timeseries", "obstime", "original", "corrected", "controlinfo", "useinfo", "cfailed"}, pgx.CopyFromRows(ts), ) if err != nil { diff --git a/migrations/lard/main.go b/migrations/lard/main.go index 82e2bbce..b3307e07 100644 --- a/migrations/lard/main.go +++ b/migrations/lard/main.go @@ -32,14 +32,16 @@ func (o *TextObs) ToRow() []any { return []any{o.Id, o.Obstime, o.Text} } -// Struct mimicking the `flags.old_databases` table +// Struct mimicking the `flags.kvdata` table type Flag struct { // Timeseries ID Id int32 // Time of observation Obstime time.Time - // Original value after QC tests + // Original value before QC tests Original *float32 + // Corrected value after QC tests + Corrected *float32 // Flag encoding quality control status Controlinfo *string // Flag encoding quality control status @@ -50,5 +52,5 @@ type Flag struct { func (o *Flag) ToRow() []any { // "timeseries", "obstime", "corrected","controlinfo", "useinfo", "cfailed" - return []any{o.Id, o.Obstime, o.Original, o.Controlinfo, o.Useinfo, o.Cfailed} + return []any{o.Id, o.Obstime, o.Original, o.Corrected, o.Controlinfo, o.Useinfo, o.Cfailed} } From b1cc685e091532ff7de5f73529d43776d951dee4 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 4 Dec 2024 17:14:59 +0100 Subject: [PATCH 54/67] Rework and simplify overall structure and add edge cases --- migrations/kdvh/import/cache/kdvh.go | 13 ++- migrations/kdvh/import/cache/main.go | 61 +++++----- migrations/kdvh/import/cache/offsets.go | 5 +- migrations/kdvh/import/cache/stinfosys.go | 96 ---------------- migrations/kdvh/import/import.go | 10 +- migrations/kdvh/import/main.go | 8 +- migrations/kvalobs/check/main.go | 25 ++-- migrations/kvalobs/db/labels.go | 23 +++- migrations/kvalobs/db/main.go | 9 +- migrations/kvalobs/db/table.go | 22 ++-- migrations/kvalobs/dump/data.go | 26 ++--- migrations/kvalobs/dump/dump.go | 33 +++--- migrations/kvalobs/dump/main.go | 1 + migrations/kvalobs/dump/text.go | 14 +-- migrations/kvalobs/import/cache/main.go | 21 ++-- migrations/kvalobs/import/data.go | 107 ++++++++++++++---- migrations/kvalobs/import/import.go | 39 +++---- migrations/kvalobs/import/main.go | 25 ++-- migrations/kvalobs/import/text.go | 104 +++++++++++++---- migrations/stinfosys/elem_map.go | 73 ++++++++++++ migrations/stinfosys/main.go | 23 ++++ migrations/stinfosys/non_scalars.go | 44 +++++++ migrations/{lard => stinfosys}/permissions.go | 22 +--- migrations/stinfosys/timeseries.go | 45 ++++++++ migrations/tests/kdvh_test.go | 8 +- migrations/tests/kvalobs_test.go | 85 +++++--------- 26 files changed, 570 insertions(+), 372 deletions(-) delete mode 100644 migrations/kdvh/import/cache/stinfosys.go create mode 100644 migrations/stinfosys/elem_map.go create mode 100644 migrations/stinfosys/main.go create mode 100644 migrations/stinfosys/non_scalars.go rename migrations/{lard => stinfosys}/permissions.go (77%) create mode 100644 migrations/stinfosys/timeseries.go diff --git a/migrations/kdvh/import/cache/kdvh.go b/migrations/kdvh/import/cache/kdvh.go index 7bc8b8b9..d756c650 100644 --- a/migrations/kdvh/import/cache/kdvh.go +++ b/migrations/kdvh/import/cache/kdvh.go @@ -10,7 +10,8 @@ import ( "github.com/jackc/pgx/v5" - "migrate/kdvh/db" + kdvh "migrate/kdvh/db" + "migrate/stinfosys" "migrate/utils" ) @@ -19,30 +20,30 @@ type KDVHMap = map[KDVHKey]utils.TimeSpan // Used for lookup of fromtime and totime from KDVH type KDVHKey struct { - Inner StinfoKey + Inner stinfosys.Key Station int32 } func newKDVHKey(elem, table string, stnr int32) KDVHKey { - return KDVHKey{StinfoKey{ElemCode: elem, TableName: table}, stnr} + return KDVHKey{stinfosys.Key{ElemCode: elem, TableName: table}, stnr} } // Cache timeseries timespan from KDVH -func cacheKDVH(tables, stations, elements []string, kdvh *db.KDVH) KDVHMap { +func cacheKDVH(tables, stations, elements []string, database *kdvh.KDVH) KDVHMap { cache := make(KDVHMap) slog.Info("Connecting to KDVH proxy to cache metadata") ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - conn, err := pgx.Connect(ctx, os.Getenv(db.KDVH_ENV_VAR)) + conn, err := pgx.Connect(ctx, os.Getenv(kdvh.KDVH_ENV_VAR)) if err != nil { slog.Error("Could not connect to KDVH proxy. Make sure to be connected to the VPN: " + err.Error()) os.Exit(1) } defer conn.Close(context.TODO()) - for _, t := range kdvh.Tables { + for _, t := range database.Tables { if len(tables) > 0 && !slices.Contains(tables, t.TableName) { continue } diff --git a/migrations/kdvh/import/cache/main.go b/migrations/kdvh/import/cache/main.go index e0e35341..81767a4c 100644 --- a/migrations/kdvh/import/cache/main.go +++ b/migrations/kdvh/import/cache/main.go @@ -8,46 +8,49 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/rickb777/period" - "migrate/kdvh/db" + kdvh "migrate/kdvh/db" "migrate/lard" + "migrate/stinfosys" "migrate/utils" ) type Cache struct { - Offsets OffsetMap - Stinfo StinfoMap - KDVH KDVHMap - Permits lard.PermitMaps + Offsets OffsetMap + Timespans KDVHMap + Elements stinfosys.ElemMap + Permits stinfosys.PermitMaps } // Caches all the metadata needed for import of KDVH tables. // If any error occurs inside here the program will exit. -func CacheMetadata(tables, stations, elements []string, kdvh *db.KDVH) *Cache { +func CacheMetadata(tables, stations, elements []string, database *kdvh.KDVH) *Cache { + stconn, ctx := stinfosys.Connect() + defer stconn.Close(ctx) + return &Cache{ - Stinfo: cacheStinfoMeta(tables, elements, kdvh), - Permits: lard.NewPermitTables(), - Offsets: cacheParamOffsets(), - KDVH: cacheKDVH(tables, stations, elements, kdvh), + Elements: stinfosys.CacheElemMap(stconn), + Permits: stinfosys.NewPermitTables(stconn), + Offsets: cacheParamOffsets(), + Timespans: cacheKDVH(tables, stations, elements, database), } } // Convenience struct that holds information for a specific timeseries type TsInfo struct { - Id int32 - Station int32 - Element string - Offset period.Period - Param StinfoParam - Span utils.TimeSpan - Logstr string - IsOpen bool + Id int32 + Station int32 + Element string + Offset period.Period + Param stinfosys.Param + Timespan utils.TimeSpan + Logstr string } func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpool.Pool) (*TsInfo, error) { logstr := fmt.Sprintf("[%v - %v - %v]: ", table, station, element) key := newKDVHKey(element, table, station) - param, ok := cache.Stinfo[key.Inner] + param, ok := cache.Elements[key.Inner] if !ok { // TODO: should it fail here? How do we deal with data without metadata? slog.Error(logstr + "Missing metadata in Stinfosys") @@ -66,7 +69,7 @@ func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpoo offset := cache.Offsets[key.Inner] // No need to check for `!ok`, timespan will be ignored if not in the map - span, ok := cache.KDVH[key] + timespan, ok := cache.Timespans[key] label := lard.Label{ StationID: station, @@ -77,21 +80,19 @@ func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpoo } // TODO: are Param.Fromtime and Span.From different? - timespan := utils.TimeSpan{From: ¶m.Fromtime, To: span.To} - tsid, err := lard.GetTimeseriesID(&label, timespan, pool) + tsid, err := lard.GetTimeseriesID(&label, utils.TimeSpan{From: ¶m.Fromtime, To: timespan.To}, pool) if err != nil { slog.Error(logstr + "could not obtain timeseries - " + err.Error()) return nil, err } return &TsInfo{ - Id: tsid, - Station: station, - Element: element, - Offset: offset, - Param: param, - Span: span, - Logstr: logstr, - IsOpen: isOpen, + Id: tsid, + Station: station, + Element: element, + Offset: offset, + Param: param, + Timespan: timespan, + Logstr: logstr, }, nil } diff --git a/migrations/kdvh/import/cache/offsets.go b/migrations/kdvh/import/cache/offsets.go index e39a934b..e51c490b 100644 --- a/migrations/kdvh/import/cache/offsets.go +++ b/migrations/kdvh/import/cache/offsets.go @@ -2,6 +2,7 @@ package cache import ( "log/slog" + "migrate/stinfosys" "os" "github.com/gocarina/gocsv" @@ -9,7 +10,7 @@ import ( ) // Map of offsets used to correct KDVH times for specific parameters -type OffsetMap = map[StinfoKey]period.Period +type OffsetMap = map[stinfosys.Key]period.Period // Caches how to modify the obstime (in KDVH) for certain paramids func cacheParamOffsets() OffsetMap { @@ -58,7 +59,7 @@ func cacheParamOffsets() OffsetMap { os.Exit(1) } - cache[StinfoKey{ElemCode: row.ElemCode, TableName: row.TableName}] = migrationOffset + cache[stinfosys.Key{ElemCode: row.ElemCode, TableName: row.TableName}] = migrationOffset } return cache diff --git a/migrations/kdvh/import/cache/stinfosys.go b/migrations/kdvh/import/cache/stinfosys.go deleted file mode 100644 index 64c8d09c..00000000 --- a/migrations/kdvh/import/cache/stinfosys.go +++ /dev/null @@ -1,96 +0,0 @@ -package cache - -import ( - "context" - "log/slog" - "os" - "slices" - "time" - - "github.com/jackc/pgx/v5" - - "migrate/kdvh/db" - "migrate/lard" -) - -// Map of metadata used to query timeseries ID in LARD -type StinfoMap = map[StinfoKey]StinfoParam - -// StinfoKey is used for lookup of parameter offsets and metadata from Stinfosys -type StinfoKey struct { - ElemCode string - TableName string -} - -// Subset of elem_map_cfnames_param query with only param info -type StinfoParam struct { - TypeID int32 - ParamID int32 - Hlevel *int32 - Sensor int32 - Fromtime time.Time - IsScalar bool -} - -// Save metadata for later use by quering Stinfosys -func cacheStinfoMeta(tables, elements []string, kdvh *db.KDVH) StinfoMap { - cache := make(StinfoMap) - - slog.Info("Connecting to Stinfosys to cache metadata") - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - conn, err := pgx.Connect(ctx, os.Getenv(lard.STINFO_ENV_VAR)) - if err != nil { - slog.Error("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) - os.Exit(1) - } - defer conn.Close(ctx) - - for _, table := range kdvh.Tables { - if len(tables) > 0 && !slices.Contains(tables, table.TableName) { - continue - } - - // select paramid, elem_code, scalar from elem_map_cfnames_param join param using(paramid) where scalar = false - query := `SELECT elem_code, table_name, typeid, paramid, hlevel, sensor, fromtime, scalar - FROM elem_map_cfnames_param - JOIN param USING(paramid) - WHERE table_name = $1 - AND ($2::text[] = '{}' OR elem_code = ANY($2))` - - rows, err := conn.Query(context.TODO(), query, table.TableName, elements) - if err != nil { - slog.Error(err.Error()) - os.Exit(1) - } - - for rows.Next() { - var key StinfoKey - var param StinfoParam - err := rows.Scan( - &key.ElemCode, - &key.TableName, - ¶m.TypeID, - ¶m.ParamID, - ¶m.Hlevel, - ¶m.Sensor, - ¶m.Fromtime, - ¶m.IsScalar, - ) - if err != nil { - slog.Error(err.Error()) - os.Exit(1) - } - - cache[key] = param - } - - if rows.Err() != nil { - slog.Error(rows.Err().Error()) - os.Exit(1) - } - } - - return cache -} diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index 3e53d1f8..72627c9b 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -15,7 +15,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" - "migrate/kdvh/db" + kdvh "migrate/kdvh/db" "migrate/kdvh/import/cache" "migrate/lard" "migrate/utils" @@ -24,7 +24,7 @@ import ( // TODO: add CALL_SIGN? It's not in stinfosys? var INVALID_ELEMENTS = []string{"TYPEID", "TAM_NORMAL_9120", "RRA_NORMAL_9120", "OT", "OTN", "OTX", "DD06", "DD12", "DD18"} -func ImportTable(table *db.Table, cache *cache.Cache, pool *pgxpool.Pool, config *Config) (rowsInserted int64) { +func ImportTable(table *kdvh.Table, cache *cache.Cache, pool *pgxpool.Pool, config *Config) (rowsInserted int64) { fmt.Printf("Importing %s...\n", table.TableName) defer fmt.Println(strings.Repeat("- ", 40)) @@ -150,7 +150,7 @@ func getElementCode(element os.DirEntry, elementList []string) (string, error) { // Parses the observations in the CSV file, converts them with the table // ConvertFunction and returns three arrays that can be passed to pgx.CopyFromRows -func parseData(filename string, tsInfo *cache.TsInfo, convFunc ConvertFunction, table *db.Table, config *Config) ([][]any, [][]any, [][]any, error) { +func parseData(filename string, tsInfo *cache.TsInfo, convFunc ConvertFunction, table *kdvh.Table, config *Config) ([][]any, [][]any, [][]any, error) { file, err := os.Open(filename) if err != nil { slog.Warn(err.Error()) @@ -180,9 +180,9 @@ func parseData(filename string, tsInfo *cache.TsInfo, convFunc ConvertFunction, } // Only import data between KDVH's defined fromtime and totime - if tsInfo.Span.From != nil && obsTime.Sub(*tsInfo.Span.From) < 0 { + if tsInfo.Timespan.From != nil && obsTime.Sub(*tsInfo.Timespan.From) < 0 { continue - } else if tsInfo.Span.To != nil && obsTime.Sub(*tsInfo.Span.To) > 0 { + } else if tsInfo.Timespan.To != nil && obsTime.Sub(*tsInfo.Timespan.To) > 0 { break } diff --git a/migrations/kdvh/import/main.go b/migrations/kdvh/import/main.go index 2be51595..223900e5 100644 --- a/migrations/kdvh/import/main.go +++ b/migrations/kdvh/import/main.go @@ -10,7 +10,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" - "migrate/kdvh/db" + kdvh "migrate/kdvh/db" "migrate/kdvh/import/cache" "migrate/lard" "migrate/utils" @@ -37,10 +37,10 @@ func (config *Config) Execute() { } slog.Info("Import started!") - kdvh := db.Init() + database := kdvh.Init() // Cache metadata from Stinfosys, KDVH, and local `product_offsets.csv` - cache := cache.CacheMetadata(config.Tables, config.Stations, config.Elements, kdvh) + cache := cache.CacheMetadata(config.Tables, config.Stations, config.Elements, database) // Create connection pool for LARD pool, err := pgxpool.New(context.TODO(), os.Getenv(lard.LARD_ENV_VAR)) @@ -66,7 +66,7 @@ func (config *Config) Execute() { } }() - for _, table := range kdvh.Tables { + for _, table := range database.Tables { if len(config.Tables) > 0 && !slices.Contains(config.Tables, table.TableName) { continue } diff --git a/migrations/kvalobs/check/main.go b/migrations/kvalobs/check/main.go index 5ea2b82e..9ca90fbc 100644 --- a/migrations/kvalobs/check/main.go +++ b/migrations/kvalobs/check/main.go @@ -13,7 +13,7 @@ import ( "github.com/jackc/pgx/v5" "migrate/kvalobs/db" - "migrate/lard" + "migrate/stinfosys" "migrate/utils" ) @@ -91,7 +91,7 @@ func getStinfoNonScalars() []int32 { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - conn, err := pgx.Connect(ctx, os.Getenv(lard.STINFO_ENV_VAR)) + conn, err := pgx.Connect(ctx, os.Getenv(stinfosys.STINFO_ENV_VAR)) if err != nil { log.Fatal("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) } @@ -111,23 +111,28 @@ func getStinfoNonScalars() []int32 { // Checks that text params in Kvalobs are considered non-scalar in Stinfosys func (c *Config) checkNonScalars(database *db.DB, nonscalars []int32) { defer fmt.Println(strings.Repeat("- ", 40)) - path := filepath.Join(c.Path, database.Name, db.TEXT_TABLE_NAME+"_labels.csv") - kvParamids, err := loadParamids(path) - if err != nil { + datapath := filepath.Join(c.Path, database.Name, db.DATA_TABLE_NAME+"_labels.csv") + textpath := filepath.Join(c.Path, database.Name, db.TEXT_TABLE_NAME+"_labels.csv") + + dataParamids, derr := loadParamids(datapath) + textParamids, terr := loadParamids(textpath) + if derr != nil || terr != nil { return } for _, id := range nonscalars { - if _, ok := kvParamids[id]; ok { + if _, ok := textParamids[id]; ok { fmt.Printf("MATCH: ParamID %5d is text in both Stinfosys and Kvalobs\n", id) - delete(kvParamids, id) - } else { + delete(textParamids, id) + } else if _, ok := dataParamids[id]; ok { fmt.Printf(" FAIL: ParamID %5d is text in Stinfosys, but not in Kvalobs\n", id) + } else { + fmt.Printf(" WARN: ParamID %5d not found in Kvalobs\n", id) } } - idsLeft := make([]int32, 0, len(kvParamids)) - for id := range kvParamids { + idsLeft := make([]int32, 0, len(textParamids)) + for id := range textParamids { idsLeft = append(idsLeft, id) } diff --git a/migrations/kvalobs/db/labels.go b/migrations/kvalobs/db/labels.go index 8a46d17d..0ddedddb 100644 --- a/migrations/kvalobs/db/labels.go +++ b/migrations/kvalobs/db/labels.go @@ -4,14 +4,19 @@ import ( "errors" "fmt" "log/slog" + "migrate/lard" "migrate/utils" "os" + "slices" "strconv" "strings" "github.com/gocarina/gocsv" ) +var METAR_CLOUD_TYPES []int32 = []int32{2751, 2752, 2753, 2754} +var SPECIAL_CLOUD_TYPES []int32 = []int32{305, 306, 307, 308} + // Kvalobs specific label type Label struct { StationID int32 `db:"stationid"` @@ -20,6 +25,15 @@ type Label struct { // These two are not present in the `text_data` tabl Sensor *int32 `db:"sensor"` // bpchar(1) in `data` table Level *int32 `db:"level"` + // LogStr string +} + +func (l *Label) IsMetarCloudType() bool { + return slices.Contains(METAR_CLOUD_TYPES, l.ParamID) +} + +func (l *Label) IsSpecialCloudType() bool { + return slices.Contains(SPECIAL_CLOUD_TYPES, l.ParamID) } func (l *Label) sensorLevelString() (string, string) { @@ -46,6 +60,11 @@ func (l *Label) LogStr() string { ) } +func (l *Label) ToLard() *lard.Label { + label := lard.Label(*l) + return &label +} + func ReadLabelCSV(path string) (labels []*Label, err error) { file, err := os.Open(path) if err != nil { @@ -94,8 +113,8 @@ func LabelFromFilename(filename string) (*Label, error) { name := strings.TrimSuffix(filename, ".csv") fields := strings.Split(name, "_") - if len(fields) < 5 { - return nil, errors.New("Too few fields in file name: " + filename) + if len(fields) != 5 { + return nil, errors.New("Wrong number of fields in file name: " + filename) } ptrs := make([]*string, len(fields)) diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go index e7636615..50d42701 100644 --- a/migrations/kvalobs/db/main.go +++ b/migrations/kvalobs/db/main.go @@ -28,6 +28,8 @@ import ( // useinfo | character(16) | | | '0000000000000000'::bpchar // cfailed | text | | | // +// - `data_history`: stores observations similar to `data`, but not sure what history refers to +// // - `default_missing`: // - `default_missing_values`: default values for some paramids (-32767) // - `model`: stores model names @@ -89,7 +91,9 @@ import ( // tbtime | timestamp without time zone | | not null | // typeid | integer | | not null | // -// NOTE: In `histkvalobs` only `data` and `text_data` are non-empty. +// - `text_data_history`: stores observations similar to `text_data`, but not sure what history refers to +// +// NOTE: In `histkvalobs` only `data`, `data_history`, `text_data`, and `text_data_history` are non-empty. // // IMPORTANT: considerations for migrations to LARD // - LARD stores Timeseries labels (stationid, paramid, typeid, sensor, level) in a separate table @@ -128,11 +132,14 @@ type TextObs struct { Tbtime time.Time `db:"tbtime"` } +// Basic Metadata for a Kvalobs database type DB struct { Name string + Path string ConnEnvVar string } +// Returns two `DB` structs with metadata for the prod and hist databases func InitDBs() (DB, DB) { kvalobs := DB{Name: "kvalobs", ConnEnvVar: "KVALOBS_CONN_STRING"} histkvalobs := DB{Name: "histkvalobs", ConnEnvVar: "HISTKVALOBS_CONN_STRING"} diff --git a/migrations/kvalobs/db/table.go b/migrations/kvalobs/db/table.go index f4bf5d8e..959532a2 100644 --- a/migrations/kvalobs/db/table.go +++ b/migrations/kvalobs/db/table.go @@ -7,25 +7,21 @@ import ( ) // Maps to `data` and `text_data` tables in Kvalobs -type Table[S DataSeries | TextSeries] struct { - Path string // Path of the dumped table - DumpLabels LabelDumpFunc // Function that dumps labels from the table - DumpSeries ObsDumpFunc[S] // Function that dumps observations from the table - Import ImportFunc // Function that ingests observations into LARD - ReadCSV ReadCSVFunc // Function that reads dumped CSV files +type Table struct { + Path string // Path of the dumped table + DumpLabels LabelDumpFunc // Function that dumps labels from the table + DumpSeries ObsDumpFunc // Function that dumps observations from the table + Import ImportFunc // Function that parses dumps and ingests observations into LARD } -type DataTable = Table[DataSeries] -type TextTable = Table[TextSeries] - // Function used to query labels from kvalobs given an optional timespan type LabelDumpFunc func(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, error) -// Function used to query timeseries from kvalobs for a specific label -type ObsDumpFunc[S DataSeries | TextSeries] func(label *Label, timespan *utils.TimeSpan, pool *pgxpool.Pool) (S, error) +// Function used to query timeseries from kvalobs for a specific label and dump them inside path +type ObsDumpFunc func(label *Label, timespan *utils.TimeSpan, path string, pool *pgxpool.Pool) error // Lard Import function -type ImportFunc func(ts [][]any, pool *pgxpool.Pool, logStr string) (int64, error) +type ImportFunc func(tsid int32, label *Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) // How to read dumped CSV, returns one array for observations and one for flags -type ReadCSVFunc func(tsid int32, filename string) ([][]any, [][]any, error) +type ReadCSVFunc func(tsid int32, label *Label, filename string) ([][]any, [][]any, error) diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go index 387b71af..9e97aca1 100644 --- a/migrations/kvalobs/dump/data.go +++ b/migrations/kvalobs/dump/data.go @@ -9,20 +9,20 @@ import ( "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" - "migrate/kvalobs/db" + kvalobs "migrate/kvalobs/db" "migrate/utils" ) // Returns a DataTable for dump -func DataTable(path string) db.DataTable { - return db.DataTable{ - Path: filepath.Join(path, db.DATA_TABLE_NAME), +func DataTable(path string) kvalobs.Table { + return kvalobs.Table{ + Path: filepath.Join(path, kvalobs.DATA_TABLE_NAME), DumpLabels: dumpDataLabels, DumpSeries: dumpDataSeries, } } -func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, error) { +func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*kvalobs.Label, error) { query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level FROM data WHERE ($1::timestamp IS NULL OR obstime >= $1) @@ -37,8 +37,8 @@ func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, } slog.Info("Collecting data labels...") - labels := make([]*db.Label, 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByName[db.Label]) + labels := make([]*kvalobs.Label, 0, rows.CommandTag().RowsAffected()) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByName[kvalobs.Label]) if err != nil { slog.Error(err.Error()) return nil, err @@ -47,7 +47,7 @@ func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, return labels, nil } -func dumpDataSeries(label *db.Label, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.DataSeries, error) { +func dumpDataSeries(label *kvalobs.Label, timespan *utils.TimeSpan, path string, pool *pgxpool.Pool) error { // NOTE: sensor and level could be NULL, but in reality they have default values query := `SELECT obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed FROM data @@ -79,15 +79,13 @@ func dumpDataSeries(label *db.Label, timespan *utils.TimeSpan, pool *pgxpool.Poo timespan.To, ) if err != nil { - slog.Error(err.Error()) - return nil, err + return err } - data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[db.DataObs]) + data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[kvalobs.DataObs]) if err != nil { - slog.Error(err.Error()) - return nil, err + return err } - return data, nil + return writeSeriesCSV(data, path, label) } diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 6edc749a..252fab41 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -12,11 +12,11 @@ import ( "github.com/gocarina/gocsv" "github.com/jackc/pgx/v5/pgxpool" - "migrate/kvalobs/db" + kvalobs "migrate/kvalobs/db" "migrate/utils" ) -func writeSeriesCSV[S db.DataSeries | db.TextSeries](series S, path string, label *db.Label) error { +func writeSeriesCSV[S kvalobs.DataSeries | kvalobs.TextSeries](series S, path string, label *kvalobs.Label) error { filename := filepath.Join(path, label.ToFilename()) file, err := os.Create(filename) if err != nil { @@ -34,7 +34,7 @@ func writeSeriesCSV[S db.DataSeries | db.TextSeries](series S, path string, labe return nil } -func getLabels[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool.Pool, config *Config) (labels []*db.Label, err error) { +func getLabels(table kvalobs.Table, pool *pgxpool.Pool, config *Config) (labels []*kvalobs.Label, err error) { labelFile := table.Path + "_labels.csv" if _, err := os.Stat(labelFile); err != nil || config.UpdateLabels { @@ -42,13 +42,13 @@ func getLabels[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool if err != nil { return nil, err } - return labels, db.WriteLabelCSV(labelFile, labels) + return labels, kvalobs.WriteLabelCSV(labelFile, labels) } - return db.ReadLabelCSV(labelFile) + return kvalobs.ReadLabelCSV(labelFile) } -func getStationLabelMap(labels []*db.Label) map[int32][]*db.Label { - labelmap := make(map[int32][]*db.Label) +func getStationLabelMap(labels []*kvalobs.Label) map[int32][]*kvalobs.Label { + labelmap := make(map[int32][]*kvalobs.Label) for _, label := range labels { labelmap[label.StationID] = append(labelmap[label.StationID], label) @@ -57,7 +57,7 @@ func getStationLabelMap(labels []*db.Label) map[int32][]*db.Label { return labelmap } -func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool.Pool, config *Config) { +func dumpTable(table kvalobs.Table, pool *pgxpool.Pool, config *Config) { if !config.LabelsOnly { utils.SetLogFile(table.Path, "dump") } @@ -109,23 +109,20 @@ func dumpTable[S db.DataSeries | db.TextSeries](table db.Table[S], pool *pgxpool return } - series, err := table.DumpSeries(label, timespan, pool) - if err != nil { + logStr := label.LogStr() + if err := table.DumpSeries(label, timespan, stationPath, pool); err != nil { + slog.Info(logStr + err.Error()) return } - if err := writeSeriesCSV(series, stationPath, label); err != nil { - return - } - - slog.Info(label.LogStr() + "dumped successfully") + slog.Info(logStr + "dumped successfully") }() } wg.Wait() } } -func dumpDB(database db.DB, config *Config) { +func dumpDB(database kvalobs.DB, config *Config) { pool, err := pgxpool.New(context.Background(), os.Getenv(database.ConnEnvVar)) if err != nil { slog.Error(fmt.Sprint("Could not connect to Kvalobs:", err)) @@ -139,12 +136,12 @@ func dumpDB(database db.DB, config *Config) { return } - if utils.IsEmptyOrEqual(config.Table, db.DATA_TABLE_NAME) { + if utils.IsEmptyOrEqual(config.Table, kvalobs.DATA_TABLE_NAME) { table := DataTable(path) dumpTable(table, pool, config) } - if utils.IsEmptyOrEqual(config.Table, db.TEXT_TABLE_NAME) { + if utils.IsEmptyOrEqual(config.Table, kvalobs.TEXT_TABLE_NAME) { table := TextTable(path) dumpTable(table, pool, config) } diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index 4e483d10..7d800063 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -18,6 +18,7 @@ type Config struct { func (config *Config) Execute() { kvalobs, histkvalobs := db.InitDBs() + // tables := []*db.Table{} if utils.IsEmptyOrEqual(config.Database, kvalobs.Name) { dumpDB(kvalobs, config) diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go index 5b77bba7..c2bb49a2 100644 --- a/migrations/kvalobs/dump/text.go +++ b/migrations/kvalobs/dump/text.go @@ -13,8 +13,8 @@ import ( ) // Returns a TextTable for dump -func TextTable(path string) db.TextTable { - return db.TextTable{ +func TextTable(path string) db.Table { + return db.Table{ Path: filepath.Join(path, db.TEXT_TABLE_NAME), DumpLabels: dumpTextLabels, DumpSeries: dumpTextSeries, @@ -47,7 +47,7 @@ func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, return labels, nil } -func dumpTextSeries(label *db.Label, timespan *utils.TimeSpan, pool *pgxpool.Pool) (db.TextSeries, error) { +func dumpTextSeries(label *db.Label, timespan *utils.TimeSpan, path string, pool *pgxpool.Pool) error { query := `SELECT obstime, original, tbtime FROM text_data WHERE stationid = $1 AND typeid = $2 @@ -66,15 +66,13 @@ func dumpTextSeries(label *db.Label, timespan *utils.TimeSpan, pool *pgxpool.Poo timespan.To, ) if err != nil { - slog.Error(err.Error()) - return nil, err + return err } data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[db.TextObs]) if err != nil { - slog.Error(err.Error()) - return nil, err + return err } - return data, nil + return writeSeriesCSV(data, path, label) } diff --git a/migrations/kvalobs/import/cache/main.go b/migrations/kvalobs/import/cache/main.go index bc368006..cc3c4a7c 100644 --- a/migrations/kvalobs/import/cache/main.go +++ b/migrations/kvalobs/import/cache/main.go @@ -10,19 +10,26 @@ import ( "github.com/jackc/pgx/v5" "migrate/kvalobs/db" - "migrate/lard" + "migrate/stinfosys" "migrate/utils" ) -type KvalobsTimespan = map[MetaKey]utils.TimeSpan +type KvalobsTimespanMap = map[MetaKey]utils.TimeSpan type Cache struct { - Meta KvalobsTimespan - Permits lard.PermitMaps + Meta KvalobsTimespanMap + Permits stinfosys.PermitMaps + // Params stinfosys.ScalarMap // Don't need them } func New(kvalobs db.DB) *Cache { - permits := lard.NewPermitTables() + conn, ctx := stinfosys.Connect() + defer conn.Close(ctx) + + permits := stinfosys.NewPermitTables(conn) + // params := stinfosys.GetParamScalarMap(conn) + // timeseries := + timespans := cacheKvalobsTimeseriesTimespans(kvalobs) return &Cache{Permits: permits, Meta: timespans} } @@ -60,8 +67,8 @@ type MetaKey struct { } // Query kvalobs `station_metadata` table that stores timeseries timespans -func cacheKvalobsTimeseriesTimespans(kvalobs db.DB) KvalobsTimespan { - cache := make(KvalobsTimespan) +func cacheKvalobsTimeseriesTimespans(kvalobs db.DB) KvalobsTimespanMap { + cache := make(KvalobsTimespanMap) slog.Info("Connecting to Kvalobs to cache metadata") ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) diff --git a/migrations/kvalobs/import/data.go b/migrations/kvalobs/import/data.go index 79bf4093..75f44ad2 100644 --- a/migrations/kvalobs/import/data.go +++ b/migrations/kvalobs/import/data.go @@ -10,45 +10,76 @@ import ( "strings" "time" - "migrate/kvalobs/db" + kvalobs "migrate/kvalobs/db" "migrate/lard" + + "github.com/jackc/pgx/v5/pgxpool" ) // Returns a DataTable for import -func DataTable(path string) db.DataTable { - return db.DataTable{ - Path: filepath.Join(path, db.DATA_TABLE_NAME), - Import: lard.InsertData, - ReadCSV: ReadDataCSV, +func DataTable(path string) kvalobs.Table { + return kvalobs.Table{ + Path: filepath.Join(path, kvalobs.DATA_TABLE_NAME), + Import: importData, } } -func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { +func importData(tsid int32, label *kvalobs.Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) { file, err := os.Open(filename) if err != nil { - slog.Error(err.Error()) - return nil, nil, err + slog.Error(logStr + err.Error()) + return 0, err } defer file.Close() - reader := bufio.NewScanner(file) + scanner := bufio.NewScanner(file) // Parse number of rows - reader.Scan() - rowCount, _ := strconv.Atoi(reader.Text()) + scanner.Scan() + rowCount, _ := strconv.Atoi(scanner.Text()) // Skip header - reader.Scan() + scanner.Scan() - var originalPtr, correctedPtr *float32 + if label.IsSpecialCloudType() { + text, err := parseSpecialCloudType(tsid, rowCount, scanner) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + count, err := lard.InsertTextData(text, pool, logStr) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + return count, nil + } + + data, flags, err := parseDataCSV(tsid, rowCount, scanner) + count, err := lard.InsertData(data, pool, logStr) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + if err := lard.InsertFlags(flags, pool, logStr); err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + return count, nil +} - // Parse observations +func parseDataCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, [][]any, error) { data := make([][]any, 0, rowCount) flags := make([][]any, 0, rowCount) - for reader.Scan() { + var originalPtr, correctedPtr *float32 + for scanner.Scan() { // obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed // We don't parse tbtime - fields := strings.Split(reader.Text(), ",") + fields := strings.Split(scanner.Text(), ",") obstime, err := time.Parse(time.RFC3339, fields[0]) if err != nil { @@ -65,18 +96,18 @@ func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { return nil, nil, err } - // Filter out special values that in Kvalobs stand for null observations original := float32(obsvalue64) - if !slices.Contains(db.NULL_VALUES, original) { + corrected := float32(corrected64) + + // Filter out special values that in Kvalobs stand for null observations + if !slices.Contains(kvalobs.NULL_VALUES, original) { originalPtr = &original } - - corrected := float32(corrected64) - if !slices.Contains(db.NULL_VALUES, corrected) { + if !slices.Contains(kvalobs.NULL_VALUES, corrected) { correctedPtr = &corrected } - // Corrected value is inserted in main data table + // Original value is inserted in main data table lardObs := lard.DataObs{ Id: tsid, Obstime: obstime, @@ -93,8 +124,8 @@ func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { Obstime: obstime, Original: originalPtr, Corrected: correctedPtr, - Controlinfo: &fields[4], // Never null - Useinfo: &fields[5], // Never null + Controlinfo: &fields[4], // Never null, has default values in KValobs + Useinfo: &fields[5], // Never null, has default values in KValobs Cfailed: cfailed, } @@ -104,3 +135,29 @@ func ReadDataCSV(tsid int32, filename string) ([][]any, [][]any, error) { return data, flags, nil } + +// Function for paramids 305, 306, 307, 308 that were stored as scalar data +// but should be treated as text +func parseSpecialCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { + data := make([][]any, 0, rowCount) + for scanner.Scan() { + // obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed + // TODO: should parse everything and return the flags? + fields := strings.Split(scanner.Text(), ",") + + obstime, err := time.Parse(time.RFC3339, fields[0]) + if err != nil { + return nil, err + } + + lardObs := lard.TextObs{ + Id: tsid, + Obstime: obstime, + Text: &fields[1], + } + + data = append(data, lardObs.ToRow()) + } + + return data, nil +} diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 6763cb8f..4fa0d80e 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -11,13 +11,13 @@ import ( "github.com/jackc/pgx/v5/pgxpool" - "migrate/kvalobs/db" + kvalobs "migrate/kvalobs/db" "migrate/kvalobs/import/cache" "migrate/lard" "migrate/utils" ) -func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cache.Cache, pool *pgxpool.Pool, config *Config) (int64, error) { +func ImportTable(table kvalobs.Table, cache *cache.Cache, pool *pgxpool.Pool, config *Config) (int64, error) { fmt.Printf("Importing from %q...\n", table.Path) defer fmt.Println(strings.Repeat("- ", 40)) @@ -53,7 +53,7 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cach wg.Done() }() - label, err := db.LabelFromFilename(file.Name()) + label, err := kvalobs.LabelFromFilename(file.Name()) if err != nil { slog.Error(err.Error()) return @@ -63,45 +63,34 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cach return } - labelStr := label.LogStr() - + logStr := label.LogStr() // Check if data for this station/element is restricted if !cache.TimeseriesIsOpen(label.StationID, label.TypeID, label.ParamID) { // TODO: eventually use this to choose which table to use on insert - slog.Warn(labelStr + "timeseries data is restricted, skipping") + slog.Warn(logStr + "timeseries data is restricted, skipping") return } timespan, err := cache.GetSeriesTimespan(label) if err != nil { - slog.Error(labelStr + err.Error()) + slog.Error(logStr + err.Error()) return } - lardLabel := lard.Label(*label) // TODO: figure out where to get fromtime, kvalobs directly? Stinfosys? - tsid, err := lard.GetTimeseriesID(&lardLabel, timespan, pool) + tsid, err := lard.GetTimeseriesID(label.ToLard(), timespan, pool) if err != nil { - slog.Error(labelStr + err.Error()) + slog.Error(logStr + err.Error()) return } - ts, flags, err := table.ReadCSV(tsid, filepath.Join(stationDir, file.Name())) + filename := filepath.Join(stationDir, file.Name()) + count, err := table.Import(tsid, label, filename, logStr, pool) if err != nil { - slog.Error(labelStr + err.Error()) + // Logged inside table.Import return } - count, err := table.Import(ts, pool, labelStr) - if err != nil { - slog.Error(labelStr + "Failed bulk insertion - " + err.Error()) - return - } - - if err := lard.InsertFlags(flags, pool, labelStr); err != nil { - slog.Error(labelStr + "failed flag bulk insertion - " + err.Error()) - } - rowsInserted += count }() } @@ -117,17 +106,17 @@ func ImportTable[S db.DataSeries | db.TextSeries](table db.Table[S], cache *cach // TODO: while importing we trust that kvalobs and stinfosys have the same // non scalar parameters, which might not be the case -func ImportDB(database db.DB, cache *cache.Cache, pool *pgxpool.Pool, config *Config) { +func ImportDB(database kvalobs.DB, cache *cache.Cache, pool *pgxpool.Pool, config *Config) { path := filepath.Join(config.Path, database.Name) - if utils.IsEmptyOrEqual(config.Table, db.DATA_TABLE_NAME) { + if utils.IsEmptyOrEqual(config.Table, kvalobs.DATA_TABLE_NAME) { table := DataTable(path) utils.SetLogFile(table.Path, "import") ImportTable(table, cache, pool, config) } - if utils.IsEmptyOrEqual(config.Table, db.TEXT_TABLE_NAME) { + if utils.IsEmptyOrEqual(config.Table, kvalobs.TEXT_TABLE_NAME) { table := TextTable(path) utils.SetLogFile(table.Path, "import") diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go index f7ce2fa2..b3a4bf07 100644 --- a/migrations/kvalobs/import/main.go +++ b/migrations/kvalobs/import/main.go @@ -8,20 +8,29 @@ import ( "github.com/jackc/pgx/v5/pgxpool" - "migrate/kvalobs/db" + kvalobs "migrate/kvalobs/db" "migrate/kvalobs/import/cache" "migrate/lard" "migrate/utils" ) +// NOTE: +// - for both kvalobs and histkvalobs: +// - all stinfo non-scalar params that can be found in Kvalobs are stored in `text_data` +// - 305, 306, 307, 308 are also in `data` but should be treated as `text_data` +// => should always use readDataCSV and lard.InsertData for these +// - only for histkvalobs +// - 2751, 2752, 2753, 2754 are in `text_data` but should be treated as `data`? +// => These are more complicated, but probably we should + type Config struct { - db.BaseConfig + kvalobs.BaseConfig Reindex bool `help:"Drop PG indices before insertion. Might improve performance"` } func (config *Config) Execute() error { - kvalobs, histkvalobs := db.InitDBs() - cache := cache.New(kvalobs) + prod, hist := kvalobs.InitDBs() + cache := cache.New(prod) pool, err := pgxpool.New(context.Background(), os.Getenv(lard.LARD_ENV_VAR)) if err != nil { @@ -45,12 +54,12 @@ func (config *Config) Execute() error { } }() - if utils.IsEmptyOrEqual(config.Database, kvalobs.Name) { - ImportDB(kvalobs, cache, pool, config) + if utils.IsEmptyOrEqual(config.Database, prod.Name) { + ImportDB(prod, cache, pool, config) } - if utils.IsEmptyOrEqual(config.Database, histkvalobs.Name) { - ImportDB(histkvalobs, cache, pool, config) + if utils.IsEmptyOrEqual(config.Database, hist.Name) { + ImportDB(hist, cache, pool, config) } return nil diff --git a/migrations/kvalobs/import/text.go b/migrations/kvalobs/import/text.go index f3db8f99..75312db2 100644 --- a/migrations/kvalobs/import/text.go +++ b/migrations/kvalobs/import/text.go @@ -9,45 +9,77 @@ import ( "strings" "time" - "migrate/kvalobs/db" + kvalobs "migrate/kvalobs/db" "migrate/lard" + + "github.com/jackc/pgx/v5/pgxpool" ) // Returns a TextTable for import -func TextTable(path string) db.TextTable { - return db.TextTable{ - Path: filepath.Join(path, db.TEXT_TABLE_NAME), - Import: lard.InsertTextData, - ReadCSV: ReadTextCSV, +func TextTable(path string) kvalobs.Table { + return kvalobs.Table{ + Path: filepath.Join(path, kvalobs.TEXT_TABLE_NAME), + Import: importText, } } -func ReadTextCSV(tsid int32, filename string) ([][]any, [][]any, error) { +func importText(tsid int32, label *kvalobs.Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) { file, err := os.Open(filename) if err != nil { - slog.Error(err.Error()) - return nil, nil, err + slog.Error(logStr + err.Error()) + return 0, err } defer file.Close() - reader := bufio.NewScanner(file) + scanner := bufio.NewScanner(file) // Parse number of rows - reader.Scan() - rowCount, _ := strconv.Atoi(reader.Text()) + scanner.Scan() + rowCount, _ := strconv.Atoi(scanner.Text()) // Skip header - reader.Scan() + scanner.Scan() + + if label.IsMetarCloudType() { + data, err := parseMetarCloudType(tsid, rowCount, scanner) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + count, err := lard.InsertData(data, pool, logStr) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + return count, nil + } + + text, err := parseTextCSV(tsid, rowCount, scanner) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + count, err := lard.InsertTextData(text, pool, logStr) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } - // Parse observations + return count, nil +} + +// Text obs are not flagged +func parseTextCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { data := make([][]any, 0, rowCount) - for reader.Scan() { + for scanner.Scan() { // obstime, original, tbtime - fields := strings.Split(reader.Text(), ",") + fields := strings.Split(scanner.Text(), ",") obstime, err := time.Parse(time.RFC3339, fields[0]) if err != nil { - return nil, nil, err + return nil, err } lardObs := lard.TextObs{ @@ -59,6 +91,40 @@ func ReadTextCSV(tsid int32, filename string) ([][]any, [][]any, error) { data = append(data, lardObs.ToRow()) } - // Text obs are not flagged - return data, nil, nil + return data, nil +} + +// Function for paramids 2751, 2752, 2753, 2754 that were stored as text data +// but should instead be treated as scalars +// TODO: I'm not sure these params should be scalars given that the other cloud types are not. +// Should all cloud types be integers? +func parseMetarCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { + data := make([][]any, 0, rowCount) + for scanner.Scan() { + // obstime, original, tbtime + fields := strings.Split(scanner.Text(), ",") + + obstime, err := time.Parse(time.RFC3339, fields[0]) + if err != nil { + return nil, err + } + + val, err := strconv.ParseFloat(fields[1], 32) + if err != nil { + return nil, err + } + + original := float32(val) + lardObs := lard.DataObs{ + Id: tsid, + Obstime: obstime, + Data: &original, + } + + data = append(data, lardObs.ToRow()) + } + + // TODO: Original text obs were not flagged, so we don't return a flags? + // Or should we return default values? + return data, nil } diff --git a/migrations/stinfosys/elem_map.go b/migrations/stinfosys/elem_map.go new file mode 100644 index 00000000..0dd746fc --- /dev/null +++ b/migrations/stinfosys/elem_map.go @@ -0,0 +1,73 @@ +package stinfosys + +import ( + "context" + "log/slog" + "os" + "time" + + "github.com/jackc/pgx/v5" +) + +// Map of metadata used to query timeseries ID in LARD +type ElemMap = map[Key]Param + +// Key is used for lookup of parameter offsets and metadata from Stinfosys +type Key struct { + ElemCode string + TableName string +} + +// Subset of elem_map_cfnames_param query with only param info +type Param struct { + TypeID int32 + ParamID int32 + Hlevel *int32 + Sensor int32 + Fromtime time.Time + IsScalar bool +} + +// Save metadata for later use by quering Stinfosys +func CacheElemMap(conn *pgx.Conn) ElemMap { + cache := make(ElemMap) + + rows, err := conn.Query( + context.TODO(), + `SELECT elem_code, table_name, typeid, paramid, hlevel, sensor, fromtime, scalar + FROM elem_map_cfnames_param + JOIN param USING(paramid)`, + ) + if err != nil { + slog.Error(err.Error()) + os.Exit(1) + } + + for rows.Next() { + var key Key + var param Param + err := rows.Scan( + &key.ElemCode, + &key.TableName, + ¶m.TypeID, + ¶m.ParamID, + ¶m.Hlevel, + ¶m.Sensor, + ¶m.Fromtime, + ¶m.IsScalar, + ) + if err != nil { + slog.Error(err.Error()) + os.Exit(1) + } + + cache[key] = param + } + + if rows.Err() != nil { + slog.Error(rows.Err().Error()) + os.Exit(1) + } + + return cache +} diff --git a/migrations/stinfosys/main.go b/migrations/stinfosys/main.go new file mode 100644 index 00000000..7cf002a7 --- /dev/null +++ b/migrations/stinfosys/main.go @@ -0,0 +1,23 @@ +package stinfosys + +import ( + "context" + "log" + "os" + "time" + + "github.com/jackc/pgx/v5" +) + +const STINFOSYS_ENV_VAR string = "STINFO_CONN_STRING" + +func Connect() (*pgx.Conn, context.Context) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + conn, err := pgx.Connect(ctx, os.Getenv(STINFOSYS_ENV_VAR)) + if err != nil { + log.Fatal("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) + } + return conn, ctx +} diff --git a/migrations/stinfosys/non_scalars.go b/migrations/stinfosys/non_scalars.go new file mode 100644 index 00000000..def5e59a --- /dev/null +++ b/migrations/stinfosys/non_scalars.go @@ -0,0 +1,44 @@ +package stinfosys + +import ( + "context" + "log" + + "github.com/jackc/pgx/v5" +) + +func getNonScalars(conn *pgx.Conn) []int32 { + rows, err := conn.Query(context.TODO(), "SELECT paramid FROM param WHERE scalar = false ORDER BY paramid") + if err != nil { + log.Fatal(err) + } + nonscalars, err := pgx.CollectRows(rows, pgx.RowTo[int32]) + if err != nil { + log.Fatal(err) + } + return nonscalars +} + +// Tells if a paramid is scalar or not +type ScalarMap = map[int32]bool + +func GetParamScalarMap(conn *pgx.Conn) ScalarMap { + cache := make(ScalarMap) + + rows, err := conn.Query(context.TODO(), "SELECT paramid, scalar FROM param") + if err != nil { + log.Fatal(err) + } + + for rows.Next() { + var paramid int32 + var isScalar bool + if err := rows.Scan(¶mid, &isScalar); err != nil { + log.Fatal(err) + } + cache[paramid] = isScalar + } + + return cache + +} diff --git a/migrations/lard/permissions.go b/migrations/stinfosys/permissions.go similarity index 77% rename from migrations/lard/permissions.go rename to migrations/stinfosys/permissions.go index b0b7df5e..ad2ed874 100644 --- a/migrations/lard/permissions.go +++ b/migrations/stinfosys/permissions.go @@ -1,10 +1,9 @@ -package lard +package stinfosys import ( "context" "log/slog" "os" - "time" "github.com/jackc/pgx/v5" ) @@ -28,18 +27,7 @@ type PermitMaps struct { StationPermits StationPermitMap } -func NewPermitTables() PermitMaps { - slog.Info("Connecting to Stinfosys to cache permits") - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - conn, err := pgx.Connect(ctx, os.Getenv(STINFO_ENV_VAR)) - if err != nil { - slog.Error("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) - os.Exit(1) - } - defer conn.Close(ctx) - +func NewPermitTables(conn *pgx.Conn) PermitMaps { return PermitMaps{ ParamPermits: cacheParamPermits(conn), StationPermits: cacheStationPermits(conn), @@ -110,9 +98,9 @@ func cacheStationPermits(conn *pgx.Conn) StationPermitMap { return cache } -func (c *PermitMaps) TimeseriesIsOpen(stnr, typeid, paramid int32) bool { +func (permits *PermitMaps) TimeseriesIsOpen(stnr, typeid, paramid int32) bool { // First check param permit table - if permits, ok := c.ParamPermits[stnr]; ok { + if permits, ok := permits.ParamPermits[stnr]; ok { for _, permit := range permits { if (permit.TypeId == 0 || permit.TypeId == typeid) && (permit.ParamdId == 0 || permit.ParamdId == paramid) { @@ -122,7 +110,7 @@ func (c *PermitMaps) TimeseriesIsOpen(stnr, typeid, paramid int32) bool { } // Otherwise check station permit table - if permit, ok := c.StationPermits[stnr]; ok { + if permit, ok := permits.StationPermits[stnr]; ok { return permit == 1 } diff --git a/migrations/stinfosys/timeseries.go b/migrations/stinfosys/timeseries.go new file mode 100644 index 00000000..9de4936b --- /dev/null +++ b/migrations/stinfosys/timeseries.go @@ -0,0 +1,45 @@ +package stinfosys + +import ( + "context" + "log" + kvalobs "migrate/kvalobs/db" + "migrate/utils" + + "github.com/jackc/pgx/v5" +) + +type TimespanMap = map[kvalobs.Label]utils.TimeSpan + +func getTimeseries(conn *pgx.Conn) TimespanMap { + cache := make(TimespanMap) + + rows, err := conn.Query(context.TODO(), + `SELECT stationid, message_formatid, paramid, sensor, level, fromtime, totime + FROM time_series`) + if err != nil { + log.Fatal(err) + } + + for rows.Next() { + var label kvalobs.Label + var timespan utils.TimeSpan + + err := rows.Scan( + &label.StationID, + &label.TypeID, + &label.ParamID, + &label.Sensor, + &label.Level, + ×pan.From, + ×pan.To, + ) + if err != nil { + log.Fatal(err) + } + + cache[label] = timespan + } + + return cache +} diff --git a/migrations/tests/kdvh_test.go b/migrations/tests/kdvh_test.go index 25716586..6e1e183f 100644 --- a/migrations/tests/kdvh_test.go +++ b/migrations/tests/kdvh_test.go @@ -12,7 +12,7 @@ import ( "migrate/kdvh/db" port "migrate/kdvh/import" "migrate/kdvh/import/cache" - "migrate/lard" + "migrate/stinfosys" ) type KdvhTestCase struct { @@ -33,14 +33,14 @@ func (t *KdvhTestCase) mockConfig() (*port.Config, *cache.Cache) { Sep: ";", }, &cache.Cache{ - Stinfo: cache.StinfoMap{ + Elements: stinfosys.ElemMap{ {ElemCode: t.elem, TableName: t.table}: { Fromtime: time.Date(2001, 7, 1, 9, 0, 0, 0, time.UTC), IsScalar: true, }, }, - Permits: lard.PermitMaps{ - StationPermits: lard.StationPermitMap{ + Permits: stinfosys.PermitMaps{ + StationPermits: stinfosys.StationPermitMap{ t.station: t.permit, }, }, diff --git a/migrations/tests/kvalobs_test.go b/migrations/tests/kvalobs_test.go index e1b7d8de..ae923502 100644 --- a/migrations/tests/kvalobs_test.go +++ b/migrations/tests/kvalobs_test.go @@ -9,10 +9,10 @@ import ( "github.com/jackc/pgx/v5/pgxpool" - "migrate/kvalobs/db" + kvalobs "migrate/kvalobs/db" port "migrate/kvalobs/import" "migrate/kvalobs/import/cache" - "migrate/lard" + "migrate/stinfosys" "migrate/utils" ) @@ -20,7 +20,8 @@ const LARD_STRING string = "host=localhost user=postgres dbname=postgres passwor const DUMPS_PATH string = "./files" type KvalobsTestCase struct { - db db.DB + db kvalobs.DB + table kvalobs.Table station int32 paramid int32 typeid int32 @@ -33,7 +34,7 @@ type KvalobsTestCase struct { func (t *KvalobsTestCase) mockConfig() (*port.Config, *cache.Cache) { fromtime, _ := time.Parse(time.DateOnly, "1900-01-01") return &port.Config{ - BaseConfig: db.BaseConfig{ + BaseConfig: kvalobs.BaseConfig{ Stations: []int32{t.station}, }, }, @@ -41,24 +42,14 @@ func (t *KvalobsTestCase) mockConfig() (*port.Config, *cache.Cache) { Meta: map[cache.MetaKey]utils.TimeSpan{ {Stationid: t.station}: {From: &fromtime}, }, - Permits: lard.PermitMaps{ - StationPermits: lard.StationPermitMap{ + Permits: stinfosys.PermitMaps{ + StationPermits: stinfosys.StationPermitMap{ t.station: t.permit, }, }, } } -type KvalobsDataCase struct { - KvalobsTestCase - table db.DataTable -} - -func DataCase(ktc KvalobsTestCase) KvalobsDataCase { - path := filepath.Join(DUMPS_PATH, ktc.db.Name) - return KvalobsDataCase{ktc, port.DataTable(path)} -} - func TestImportDataKvalobs(t *testing.T) { log.SetFlags(log.LstdFlags | log.Lshortfile) @@ -68,48 +59,26 @@ func TestImportDataKvalobs(t *testing.T) { } defer pool.Close() - _, histkvalobs := db.InitDBs() - - cases := []KvalobsDataCase{ - DataCase(KvalobsTestCase{db: histkvalobs, station: 18700, paramid: 313, permit: 1, expectedRows: 39}), - } - - for _, c := range cases { - config, cache := c.mockConfig() - insertedRows, err := port.ImportTable(c.table, cache, pool, config) - - switch { - case err != nil: - t.Fatal(err) - case insertedRows != c.expectedRows: - t.Fail() - } - } -} - -type KvalobsTextCase struct { - KvalobsTestCase - table db.TextTable -} - -func TextCase(ktc KvalobsTestCase) KvalobsTextCase { - path := filepath.Join(DUMPS_PATH, ktc.db.Name) - return KvalobsTextCase{ktc, port.TextTable(path)} -} - -func TestImportTextKvalobs(t *testing.T) { - log.SetFlags(log.LstdFlags | log.Lshortfile) - - pool, err := pgxpool.New(context.TODO(), LARD_STRING) - if err != nil { - t.Log("Could not connect to Lard:", err) - } - defer pool.Close() - - kvalobs, _ := db.InitDBs() - - cases := []KvalobsTextCase{ - TextCase(KvalobsTestCase{db: kvalobs, station: 18700, permit: 1, expectedRows: 182}), + prod, hist := kvalobs.InitDBs() + prod.Path = filepath.Join(DUMPS_PATH, prod.Name) + hist.Path = filepath.Join(DUMPS_PATH, hist.Name) + + cases := []KvalobsTestCase{ + { + db: hist, + table: port.DataTable(hist.Path), + station: 18700, + paramid: 313, + permit: 1, + expectedRows: 39, + }, + { + db: prod, + table: port.TextTable(prod.Path), + station: 18700, + permit: 1, + expectedRows: 182, + }, } for _, c := range cases { From 47302deb1fddaf9347caab129746ef1d4a5753e6 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 5 Dec 2024 09:18:32 +0100 Subject: [PATCH 55/67] Run cleanup even when tests fail --- justfile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/justfile b/justfile index 9e111a00..7b969f0a 100644 --- a/justfile +++ b/justfile @@ -6,20 +6,21 @@ test_all: setup && clean cargo test --workspace --no-fail-fast -- --nocapture --test-threads=1 test_end_to_end: setup && clean - cargo test --test end_to_end --no-fail-fast -- --nocapture --test-threads=1 + -cargo test --test end_to_end --no-fail-fast -- --nocapture --test-threads=1 test_migrations: debug_migrations && clean # Debug commands don't perfom the clean up action after running. # This allows to manually check the state of the database. + debug_kafka: setup - cargo test --test end_to_end test_kafka --features debug --no-fail-fast -- --nocapture --test-threads=1 + -cargo test --test end_to_end test_kafka --features debug --no-fail-fast -- --nocapture --test-threads=1 debug_test TEST: setup - cargo test {{TEST}} --features debug --no-fail-fast -- --nocapture --test-threads=1 + -cargo test {{TEST}} --features debug --no-fail-fast -- --nocapture --test-threads=1 debug_migrations: setup - @ cd migrations && go test -v ./... + -@ cd migrations && go test -v ./... setup: @ echo "Starting Postgres docker container..." From 75e445a2826482e6e784c0b647ff974f308bf76b Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 5 Dec 2024 14:11:59 +0100 Subject: [PATCH 56/67] Add more automatic way of dropping indexes --- db/drop_indices.sql | 18 +++++++++++------- db/flags.sql | 2 +- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/db/drop_indices.sql b/db/drop_indices.sql index 1be451d6..a9fe7990 100644 --- a/db/drop_indices.sql +++ b/db/drop_indices.sql @@ -1,7 +1,11 @@ --- Remove indices before bulk insertion -DROP INDEX IF EXISTS data_timestamp_index, - data_timeseries_index, - nonscalar_data_timestamp_index, - nonscalar_data_timeseries_index, - old_flags_obtime_index, - old_flags_timeseries_index; +DO $$ +DECLARE + i RECORD; +BEGIN + FOR i IN (SELECT schemaname, indexname fROM pg_indexes + WHERE schemaname IN ('public', 'flags') + AND NOT indexdef LIKE '%UNIQUE%') + LOOP + EXECUTE format('DROP INDEX IF EXISTS %s.%s', i.schemaname, i.indexname); + END LOOP; +END $$; diff --git a/db/flags.sql b/db/flags.sql index 0bc9348a..b54bc595 100644 --- a/db/flags.sql +++ b/db/flags.sql @@ -10,5 +10,5 @@ CREATE TABLE IF NOT EXISTS flags.kvdata ( cfailed TEXT NULL, CONSTRAINT unique_kvdata_timeseries_obstime UNIQUE (timeseries, obstime) ); -CREATE INDEX IF NOT EXISTS kvdata_obtime_index ON flags.kvdata (obstime); +CREATE INDEX IF NOT EXISTS kvdata_obstime_index ON flags.kvdata (obstime); CREATE INDEX IF NOT EXISTS kvdata_timeseries_index ON flags.kvdata USING HASH (timeseries); From bb35e86efe3042a1c4c3ee7de3375942bf322f55 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Fri, 6 Dec 2024 13:49:24 +0100 Subject: [PATCH 57/67] Remove directory that should not be there --- .../kdvh/tests/T_MDATA_combined/12345/TA.csv | 2645 ----------------- 1 file changed, 2645 deletions(-) delete mode 100644 migrations/kdvh/tests/T_MDATA_combined/12345/TA.csv diff --git a/migrations/kdvh/tests/T_MDATA_combined/12345/TA.csv b/migrations/kdvh/tests/T_MDATA_combined/12345/TA.csv deleted file mode 100644 index dd6cb263..00000000 --- a/migrations/kdvh/tests/T_MDATA_combined/12345/TA.csv +++ /dev/null @@ -1,2645 +0,0 @@ -2644 -2001-07-01_09:00:00;12.9;70000 -2001-07-01_10:00:00;13;70000 -2001-07-01_11:00:00;13;70000 -2001-07-01_12:00:00;13.1;70000 -2001-07-01_13:00:00;13.1;70000 -2001-07-01_14:00:00;13;70000 -2001-07-01_15:00:00;12.9;70000 -2001-07-01_16:00:00;12.8;70000 -2001-07-01_17:00:00;12.8;70000 -2001-07-01_18:00:00;12.7;70000 -2001-07-01_19:00:00;12.8;70000 -2001-07-01_20:00:00;12.6;70000 -2001-07-01_21:00:00;12.6;70000 -2001-07-01_22:00:00;12.6;70000 -2001-07-01_23:00:00;12.6;70000 -2001-07-02_00:00:00;12.5;70000 -2001-07-02_01:00:00;12.4;70000 -2001-07-02_02:00:00;12.4;70000 -2001-07-02_03:00:00;12.3;70000 -2001-07-02_04:00:00;12.3;70000 -2001-07-02_05:00:00;12.3;70000 -2001-07-02_06:00:00;12.4;70000 -2001-07-02_07:00:00;12.5;70000 -2001-07-02_08:00:00;12.6;70000 -2001-07-02_09:00:00;12.7;70000 -2001-07-02_10:00:00;12.9;70000 -2001-07-02_11:00:00;13;70000 -2001-07-02_12:00:00;13.2;70000 -2001-07-02_13:00:00;13.3;70000 -2001-07-02_14:00:00;13.3;70000 -2001-07-02_15:00:00;13.4;70000 -2001-07-02_16:00:00;13.3;70000 -2001-07-02_17:00:00;13.3;70000 -2001-07-02_18:00:00;13.2;70000 -2001-07-02_19:00:00;13.2;70000 -2001-07-02_20:00:00;13.1;70000 -2001-07-02_21:00:00;12.9;70000 -2001-07-02_22:00:00;12.9;70000 -2001-07-02_23:00:00;12.9;70000 -2001-07-03_00:00:00;12.8;58927 -2001-07-03_01:00:00;12.7;70000 -2001-07-03_02:00:00;12.6;70000 -2001-07-03_03:00:00;12.7;70000 -2001-07-03_04:00:00;12.5;70000 -2001-07-03_05:00:00;12.2;70000 -2001-07-03_06:00:00;12.3;70000 -2001-07-03_07:00:00;12.4;70000 -2001-07-03_08:00:00;12.5;70000 -2001-07-03_09:00:00;12.6;70000 -2001-07-03_10:00:00;12.6;70000 -2001-07-03_11:00:00;12.8;70000 -2001-07-03_12:00:00;12.8;70000 -2001-07-03_13:00:00;13;70000 -2001-07-03_14:00:00;13.1;70000 -2001-07-03_15:00:00;13.2;70000 -2001-07-03_16:00:00;13.2;70000 -2001-07-03_17:00:00;13.1;70000 -2001-07-03_18:00:00;13.1;70000 -2001-07-03_19:00:00;13.1;70000 -2001-07-03_20:00:00;12.8;70000 -2001-07-03_21:00:00;12.8;70000 -2001-07-03_22:00:00;12.8;70000 -2001-07-03_23:00:00;12.8;70000 -2001-07-04_00:00:00;12.6;70000 -2001-07-04_01:00:00;12.8;70000 -2001-07-04_02:00:00;12.3;70000 -2001-07-04_03:00:00;12.6;70000 -2001-07-04_04:00:00;12.5;70000 -2001-07-04_05:00:00;12.5;70000 -2001-07-04_06:00:00;12.5;70000 -2001-07-04_07:00:00;12.5;70000 -2001-07-04_08:00:00;12.4;70000 -2001-07-04_09:00:00;12.5;70000 -2001-07-04_10:00:00;12.6;70000 -2001-07-04_11:00:00;12.6;70000 -2001-07-04_12:00:00;12.6;58927 -2001-07-04_13:00:00;12.5;70000 -2001-07-04_14:00:00;12.6;70000 -2001-07-04_15:00:00;12.5;70000 -2001-07-04_16:00:00;12.6;70000 -2001-07-04_17:00:00;12.6;70000 -2001-07-04_18:00:00;12.6;70000 -2001-07-04_19:00:00;12.5;70000 -2001-07-04_20:00:00;12.5;70000 -2001-07-04_21:00:00;12.5;70000 -2001-07-04_22:00:00;12.4;70000 -2001-07-04_23:00:00;12.4;70000 -2001-07-05_00:00:00;12.5;70000 -2001-07-05_01:00:00;12.4;70000 -2001-07-05_02:00:00;12.1;70000 -2001-07-05_03:00:00;11.9;70000 -2001-07-05_04:00:00;12;70000 -2001-07-05_05:00:00;12;70000 -2001-07-05_06:00:00;12.1;70000 -2001-07-05_07:00:00;12.3;70000 -2001-07-05_08:00:00;12.6;70000 -2001-07-05_09:00:00;12.9;70000 -2001-07-05_10:00:00;13;70000 -2001-07-05_11:00:00;13.2;70000 -2001-07-05_12:00:00;13.5;70000 -2001-07-05_13:00:00;13.8;70000 -2001-07-05_14:00:00;13.9;70000 -2001-07-05_15:00:00;13.4;70000 -2001-07-05_16:00:00;13.9;70000 -2001-07-05_17:00:00;13.8;70000 -2001-07-05_18:00:00;13.7;70000 -2001-07-05_19:00:00;13.6;70000 -2001-07-05_20:00:00;13.5;70000 -2001-07-05_21:00:00;13.3;70000 -2001-07-05_22:00:00;13.2;70000 -2001-07-05_23:00:00;13.1;70000 -2001-07-06_00:00:00;13.1;70000 -2001-07-06_01:00:00;13;70000 -2001-07-06_02:00:00;12.9;70000 -2001-07-06_03:00:00;12.8;70000 -2001-07-06_04:00:00;12.9;58927 -2001-07-06_05:00:00;12.9;70000 -2001-07-06_06:00:00;13.2;70000 -2001-07-06_07:00:00;13.2;70000 -2001-07-06_08:00:00;13.3;70000 -2001-07-06_09:00:00;13.8;70000 -2001-07-06_10:00:00;14.3;70000 -2001-07-06_11:00:00;14.7;70000 -2001-07-06_12:00:00;15.8;70000 -2001-07-06_13:00:00;14.9;70000 -2001-07-06_14:00:00;14.6;70000 -2001-07-06_15:00:00;14.7;70000 -2001-07-06_16:00:00;14.6;70000 -2001-07-06_17:00:00;15.5;70000 -2001-07-06_18:00:00;16.6;70000 -2001-07-06_19:00:00;15.5;70000 -2001-07-06_20:00:00;14.8;70000 -2001-07-06_21:00:00;14.7;70000 -2001-07-06_22:00:00;16.2;70000 -2001-07-06_23:00:00;15.6;70000 -2001-07-07_00:00:00;15.1;70000 -2001-07-07_01:00:00;14.4;70000 -2001-07-07_02:00:00;13.8;70000 -2001-07-07_03:00:00;13.2;70000 -2001-07-07_04:00:00;13.3;70000 -2001-07-07_05:00:00;13.6;70000 -2001-07-07_06:00:00;14;70000 -2001-07-07_07:00:00;14.1;70000 -2001-07-07_08:00:00;14.1;70000 -2001-07-07_09:00:00;14.3;70000 -2001-07-07_10:00:00;14.4;70000 -2001-07-07_11:00:00;14.5;70000 -2001-07-07_12:00:00;14.6;70000 -2001-07-07_13:00:00;14.9;70000 -2001-07-07_14:00:00;15;70000 -2001-07-07_15:00:00;14.9;70000 -2001-07-07_16:00:00;15;70000 -2001-07-07_17:00:00;14.9;70000 -2001-07-07_18:00:00;14.9;70000 -2001-07-07_19:00:00;14.8;70000 -2001-07-07_20:00:00;14.8;70000 -2001-07-07_21:00:00;15;70000 -2001-07-07_22:00:00;15;70000 -2001-07-07_23:00:00;15.3;70000 -2001-07-08_00:00:00;14.9;70000 -2001-07-08_01:00:00;14.6;70000 -2001-07-08_02:00:00;14.5;70000 -2001-07-08_03:00:00;14.4;70000 -2001-07-08_04:00:00;14.4;70000 -2001-07-08_05:00:00;14.7;70000 -2001-07-08_06:00:00;14.6;70000 -2001-07-08_07:00:00;14.3;70000 -2001-07-08_08:00:00;14.5;70000 -2001-07-08_09:00:00;14.5;70000 -2001-07-08_10:00:00;14.5;70000 -2001-07-08_11:00:00;15.1;70000 -2001-07-08_12:00:00;15.2;70000 -2001-07-08_13:00:00;15.5;70000 -2001-07-08_14:00:00;14.6;70000 -2001-07-08_15:00:00;16.9;78947 -2001-07-08_16:00:00;17.1;78947 -2001-07-08_17:00:00;16.9;78947 -2001-07-08_18:00:00;16;78947 -2001-07-08_19:00:00;15.5;78947 -2001-07-08_20:00:00;15.1;78947 -2001-07-08_21:00:00;14.9;78947 -2001-07-08_22:00:00;14.6;78947 -2001-07-08_23:00:00;14.3;78947 -2001-07-09_00:00:00;14.1;78947 -2001-07-09_01:00:00;14.2;78947 -2001-07-09_02:00:00;14.3;78947 -2001-07-09_03:00:00;14;78947 -2001-07-09_04:00:00;14;78947 -2001-07-09_05:00:00;14.2;78947 -2001-07-09_06:00:00;14;78947 -2001-07-09_07:00:00;14.6;78947 -2001-07-09_08:00:00;14.5;78947 -2001-07-09_09:00:00;15.3;78947 -2001-07-09_10:00:00;16.3;78947 -2001-07-09_11:00:00;15.1;78947 -2001-07-09_12:00:00;16.2;78947 -2001-07-09_13:00:00;15.2;78947 -2001-07-09_14:00:00;15.6;78947 -2001-07-09_15:00:00;15.4;78947 -2001-07-09_16:00:00;15.6;78947 -2001-07-09_17:00:00;15;78947 -2001-07-09_18:00:00;14.2;78947 -2001-07-09_19:00:00;13.7;78947 -2001-07-09_20:00:00;13.5;78947 -2001-07-09_21:00:00;13.2;78947 -2001-07-09_22:00:00;13.4;78947 -2001-07-09_23:00:00;13.5;78947 -2001-07-10_00:00:00;12.8;78947 -2001-07-10_01:00:00;12.9;78947 -2001-07-10_02:00:00;12.9;78947 -2001-07-10_03:00:00;13.2;78947 -2001-07-10_04:00:00;13.1;78947 -2001-07-10_05:00:00;13.3;78947 -2001-07-10_06:00:00;13.8;78947 -2001-07-10_07:00:00;13.9;78947 -2001-07-10_08:00:00;14.3;78947 -2001-07-10_09:00:00;14.7;78947 -2001-07-10_10:00:00;15.1;78947 -2001-07-10_11:00:00;15.3;78947 -2001-07-10_12:00:00;15.3;78947 -2001-07-10_13:00:00;16;78947 -2001-07-10_14:00:00;16.1;78947 -2001-07-10_15:00:00;15.6;78947 -2001-07-10_16:00:00;15;78947 -2001-07-10_17:00:00;14.5;78947 -2001-07-10_18:00:00;14.3;78947 -2001-07-10_19:00:00;13.5;78947 -2001-07-10_20:00:00;13.3;78947 -2001-07-10_21:00:00;12.9;78947 -2001-07-10_22:00:00;12.2;78947 -2001-07-10_23:00:00;11.9;78947 -2001-07-11_00:00:00;13;78947 -2001-07-11_01:00:00;12.7;78947 -2001-07-11_02:00:00;12.7;78947 -2001-07-11_03:00:00;12.6;78947 -2001-07-11_04:00:00;12.7;78947 -2001-07-11_05:00:00;12.8;78947 -2001-07-11_06:00:00;13.7;78947 -2001-07-11_07:00:00;13.7;78947 -2001-07-11_08:00:00;13.7;78947 -2001-07-11_09:00:00;14.4;78947 -2001-07-11_10:00:00;14.7;78947 -2001-07-11_11:00:00;15.2;78947 -2001-07-11_12:00:00;15.3;78947 -2001-07-11_13:00:00;13.7;78947 -2001-07-11_14:00:00;14.5;78947 -2001-07-11_15:00:00;15;78947 -2001-07-11_16:00:00;13.2;78947 -2001-07-11_17:00:00;12.9;78947 -2001-07-11_18:00:00;12.5;78947 -2001-07-11_19:00:00;12.3;78947 -2001-07-11_20:00:00;12.4;78947 -2001-07-11_21:00:00;12.4;78947 -2001-07-11_22:00:00;12.4;78947 -2001-07-11_23:00:00;12.5;78947 -2001-07-12_00:00:00;12;78947 -2001-07-12_01:00:00;12.1;78947 -2001-07-12_02:00:00;12.2;78947 -2001-07-12_03:00:00;12.2;78947 -2001-07-12_04:00:00;12.3;78947 -2001-07-12_05:00:00;12.3;78947 -2001-07-12_06:00:00;12.1;78947 -2001-07-12_07:00:00;12.4;78947 -2001-07-12_08:00:00;13.5;78947 -2001-07-12_09:00:00;13.1;78947 -2001-07-12_10:00:00;14;78947 -2001-07-12_11:00:00;15.2;78947 -2001-07-12_12:00:00;14.3;78947 -2001-07-12_13:00:00;13.9;78947 -2001-07-12_14:00:00;14.3;78947 -2001-07-12_15:00:00;14;78947 -2001-07-12_16:00:00;13.9;78947 -2001-07-12_17:00:00;13.7;78947 -2001-07-12_18:00:00;13.5;78947 -2001-07-12_19:00:00;13.1;78947 -2001-07-12_20:00:00;12.6;78947 -2001-07-12_21:00:00;12.2;78947 -2001-07-12_22:00:00;11.9;78947 -2001-07-12_23:00:00;11.9;78947 -2001-07-13_00:00:00;11.7;78947 -2001-07-13_01:00:00;11.5;78947 -2001-07-13_02:00:00;11.3;78947 -2001-07-13_03:00:00;11.1;78947 -2001-07-13_04:00:00;11.3;78947 -2001-07-13_05:00:00;12;78947 -2001-07-13_06:00:00;13.4;78947 -2001-07-13_08:00:00;15.5;78947 -2001-07-13_09:00:00;16.5;78947 -2001-07-13_10:00:00;17.4;78947 -2001-07-13_11:00:00;17.7;78947 -2001-07-13_12:00:00;17.3;78947 -2001-07-13_13:00:00;17.3;78947 -2001-07-13_14:00:00;17.3;78947 -2001-07-13_15:00:00;17;78947 -2001-07-13_16:00:00;16.4;78947 -2001-07-13_17:00:00;15.5;78947 -2001-07-13_18:00:00;14.9;78947 -2001-07-13_19:00:00;14.1;78947 -2001-07-13_20:00:00;13.2;78947 -2001-07-13_21:00:00;12.3;78947 -2001-07-13_23:00:00;11.5;78947 -2001-07-14_00:00:00;11.2;78947 -2001-07-14_01:00:00;10.9;78947 -2001-07-14_02:00:00;10.7;78947 -2001-07-14_03:00:00;10.6;78947 -2001-07-14_04:00:00;10.7;78947 -2001-07-14_05:00:00;11.6;78947 -2001-07-14_06:00:00;13.1;78947 -2001-07-14_07:00:00;14.5;78947 -2001-07-14_08:00:00;15.9;78947 -2001-07-14_09:00:00;17.2;78947 -2001-07-14_10:00:00;18.3;78947 -2001-07-14_11:00:00;18.8;78947 -2001-07-14_12:00:00;18.5;78947 -2001-07-14_13:00:00;17.9;78947 -2001-07-14_14:00:00;17.4;78947 -2001-07-14_15:00:00;17.1;78947 -2001-07-14_16:00:00;17;78947 -2001-07-14_17:00:00;16.6;78947 -2001-07-14_18:00:00;16.4;78947 -2001-07-14_19:00:00;15.4;78947 -2001-07-14_20:00:00;14.6;78947 -2001-07-14_21:00:00;13.7;78947 -2001-07-14_23:00:00;12.9;78947 -2001-07-15_00:00:00;12.7;78947 -2001-07-15_01:00:00;12.5;78947 -2001-07-15_02:00:00;12.5;78947 -2001-07-15_03:00:00;12.4;78947 -2001-07-15_05:00:00;12.8;78947 -2001-07-15_06:00:00;13.6;78947 -2001-07-15_10:00:00;16.7;78947 -2001-07-15_11:00:00;16.6;78947 -2001-07-15_13:00:00;16.1;78947 -2001-07-15_15:00:00;15.9;78947 -2001-07-15_16:00:00;15.5;78947 -2001-07-15_17:00:00;15.1;78947 -2001-07-15_18:00:00;14.7;78947 -2001-07-15_19:00:00;14.2;78947 -2001-07-15_20:00:00;13.5;78947 -2001-07-15_21:00:00;12.4;78947 -2001-07-15_22:00:00;11.5;78947 -2001-07-15_23:00:00;10.9;78947 -2001-07-16_00:00:00;10.1;78947 -2001-07-16_01:00:00;10.6;78947 -2001-07-16_02:00:00;11.8;78947 -2001-07-16_03:00:00;12.7;78947 -2001-07-16_04:00:00;13.2;78947 -2001-07-16_05:00:00;13.6;78947 -2001-07-16_06:00:00;14;78947 -2001-07-16_07:00:00;15.2;78947 -2001-07-16_08:00:00;16.6;78947 -2001-07-16_09:00:00;17.7;78947 -2001-07-16_10:00:00;18.8;78947 -2001-07-16_11:00:00;19.5;78947 -2001-07-16_12:00:00;20.5;78947 -2001-07-16_13:00:00;20.6;78947 -2001-07-16_14:00:00;20.7;78947 -2001-07-16_15:00:00;19.3;78947 -2001-07-16_16:00:00;19.4;78947 -2001-07-16_17:00:00;18.5;78947 -2001-07-16_18:00:00;17.1;78947 -2001-07-16_19:00:00;15.8;78947 -2001-07-16_20:00:00;15.1;78947 -2001-07-16_21:00:00;15.1;78947 -2001-07-16_22:00:00;15.5;78947 -2001-07-16_23:00:00;15.1;78947 -2001-07-17_00:00:00;15.3;78947 -2001-07-17_01:00:00;15.1;78947 -2001-07-17_02:00:00;15.3;78947 -2001-07-17_03:00:00;15.3;78947 -2001-07-17_04:00:00;15.2;78947 -2001-07-17_05:00:00;15.1;78947 -2001-07-17_06:00:00;14.8;78947 -2001-07-17_07:00:00;14.8;78947 -2001-07-17_08:00:00;14.8;78947 -2001-07-17_09:00:00;15;78947 -2001-07-17_10:00:00;15.2;78947 -2001-07-17_11:00:00;15.2;78947 -2001-07-17_12:00:00;15.3;78947 -2001-07-17_13:00:00;15.4;78947 -2001-07-17_14:00:00;15.5;78947 -2001-07-17_15:00:00;15.5;78947 -2001-07-17_16:00:00;16.4;78947 -2001-07-17_17:00:00;16.2;78947 -2001-07-17_18:00:00;14.6;78947 -2001-07-17_19:00:00;14.7;78947 -2001-07-17_20:00:00;14.8;78947 -2001-07-17_21:00:00;14.1;78947 -2001-07-17_22:00:00;13.7;78947 -2001-07-17_23:00:00;13.7;78947 -2001-07-18_00:00:00;13.4;78947 -2001-07-18_01:00:00;14;78947 -2001-07-18_02:00:00;14.3;78947 -2001-07-18_03:00:00;13.9;78947 -2001-07-18_04:00:00;13.4;78947 -2001-07-18_05:00:00;13.7;78947 -2001-07-18_06:00:00;16.2;78947 -2001-07-18_07:00:00;17.4;78947 -2001-07-18_08:00:00;18.3;78947 -2001-07-18_11:00:00;20.1;78947 -2001-07-18_12:00:00;19.7;78947 -2001-07-18_13:00:00;18.6;78947 -2001-07-18_14:00:00;19.3;78947 -2001-07-18_15:00:00;18.3;78947 -2001-07-18_16:00:00;16.6;78947 -2001-07-18_17:00:00;17;78947 -2001-07-18_18:00:00;16.9;78947 -2001-07-18_19:00:00;16.5;78947 -2001-07-18_20:00:00;15.1;78947 -2001-07-18_21:00:00;14.7;78947 -2001-07-18_22:00:00;14.3;78947 -2001-07-18_23:00:00;14;78947 -2001-07-19_00:00:00;14.1;78947 -2001-07-19_01:00:00;14;78947 -2001-07-19_02:00:00;14.1;78947 -2001-07-19_03:00:00;14.2;78947 -2001-07-19_04:00:00;13.9;78947 -2001-07-19_05:00:00;13.8;78947 -2001-07-19_06:00:00;14.7;78947 -2001-07-19_07:00:00;15.7;78947 -2001-07-19_08:00:00;15.7;78947 -2001-07-19_09:00:00;17.2;78947 -2001-07-19_10:00:00;18.4;78947 -2001-07-19_11:00:00;18.3;78947 -2001-07-19_12:00:00;16.1;78947 -2001-07-19_13:00:00;15.5;78947 -2001-07-19_14:00:00;16;78947 -2001-07-19_15:00:00;16.5;78947 -2001-07-19_16:00:00;15.3;78947 -2001-07-19_17:00:00;15.4;78947 -2001-07-19_18:00:00;15;78947 -2001-07-19_19:00:00;14.4;78947 -2001-07-19_20:00:00;14.2;78947 -2001-07-19_21:00:00;14.1;78947 -2001-07-19_22:00:00;14;78947 -2001-07-19_23:00:00;13.6;78947 -2001-07-20_00:00:00;13.8;78947 -2001-07-20_01:00:00;13.8;78947 -2001-07-20_02:00:00;13.6;78947 -2001-07-20_03:00:00;13.7;78947 -2001-07-20_04:00:00;13.6;78947 -2001-07-20_05:00:00;14;78947 -2001-07-20_06:00:00;15.1;78947 -2001-07-20_07:00:00;15.6;78947 -2001-07-20_08:00:00;15.4;78947 -2001-07-20_09:00:00;16;78947 -2001-07-20_10:00:00;16.6;78947 -2001-07-20_11:00:00;17.1;78947 -2001-07-20_12:00:00;17.3;78947 -2001-07-20_13:00:00;17;78947 -2001-07-20_14:00:00;16.5;78947 -2001-07-20_15:00:00;16.4;78947 -2001-07-20_16:00:00;15.7;78947 -2001-07-20_17:00:00;14.9;78947 -2001-07-20_18:00:00;14.4;78947 -2001-07-20_19:00:00;14.1;78947 -2001-07-20_20:00:00;13.8;78947 -2001-07-20_21:00:00;13.7;78947 -2001-07-20_22:00:00;13.5;78947 -2001-07-20_23:00:00;13.4;78947 -2001-07-21_00:00:00;13.4;78947 -2001-07-21_01:00:00;13.4;78947 -2001-07-21_02:00:00;13.4;78947 -2001-07-21_03:00:00;13.3;78947 -2001-07-21_04:00:00;13.2;78947 -2001-07-21_05:00:00;13.2;78947 -2001-07-21_06:00:00;13.2;78947 -2001-07-21_07:00:00;13.4;78947 -2001-07-21_08:00:00;14;78947 -2001-07-21_09:00:00;14.6;78947 -2001-07-21_10:00:00;15.2;78947 -2001-07-21_11:00:00;15.4;78947 -2001-07-21_12:00:00;16.5;78947 -2001-07-21_13:00:00;16.2;78947 -2001-07-21_14:00:00;15.8;78947 -2001-07-21_15:00:00;15.4;78947 -2001-07-21_16:00:00;15.1;78947 -2001-07-21_17:00:00;14.7;78947 -2001-07-21_18:00:00;13.9;78947 -2001-07-21_19:00:00;13.4;78947 -2001-07-21_20:00:00;13;78947 -2001-07-21_21:00:00;12.8;78947 -2001-07-21_22:00:00;12.8;78947 -2001-07-21_23:00:00;12.9;78947 -2001-07-22_00:00:00;13;78947 -2001-07-22_01:00:00;13.1;78947 -2001-07-22_02:00:00;13.2;78947 -2001-07-22_03:00:00;13.2;78947 -2001-07-22_04:00:00;13.3;78947 -2001-07-22_05:00:00;13.5;78947 -2001-07-22_06:00:00;14;78947 -2001-07-22_07:00:00;14.7;78947 -2001-07-22_08:00:00;15.5;78947 -2001-07-22_09:00:00;15.8;78947 -2001-07-22_10:00:00;16.7;78947 -2001-07-22_11:00:00;17;78947 -2001-07-22_12:00:00;16.5;78947 -2001-07-22_13:00:00;17.4;78947 -2001-07-22_14:00:00;17.3;78947 -2001-07-22_15:00:00;17.5;78947 -2001-07-22_16:00:00;17;78947 -2001-07-22_17:00:00;16.7;78947 -2001-07-22_18:00:00;15.8;78947 -2001-07-22_19:00:00;15.4;78947 -2001-07-22_20:00:00;15.5;78947 -2001-07-22_21:00:00;15.2;78947 -2001-07-22_22:00:00;15.2;78947 -2001-07-22_23:00:00;15.1;78947 -2001-07-23_00:00:00;14.9;78947 -2001-07-23_01:00:00;14.8;78947 -2001-07-23_02:00:00;14.8;78947 -2001-07-23_03:00:00;14.6;78947 -2001-07-23_04:00:00;14.5;78947 -2001-07-23_05:00:00;14.7;78947 -2001-07-23_06:00:00;15.1;78947 -2001-07-23_07:00:00;15.5;78947 -2001-07-23_08:00:00;15.7;78947 -2001-07-23_09:00:00;16.6;78947 -2001-07-23_10:00:00;18.7;78947 -2001-07-23_11:00:00;19.6;78947 -2001-07-23_12:00:00;16.8;78947 -2001-07-23_13:00:00;17.3;78947 -2001-07-23_14:00:00;17.6;78947 -2001-07-23_15:00:00;19.6;78947 -2001-07-23_16:00:00;17.4;78947 -2001-07-23_17:00:00;17.5;78947 -2001-07-23_18:00:00;16.7;78947 -2001-07-23_19:00:00;16.1;78947 -2001-07-23_20:00:00;15.4;78947 -2001-07-23_21:00:00;15.2;78947 -2001-07-23_22:00:00;14.9;78947 -2001-07-23_23:00:00;15.2;78947 -2001-07-24_00:00:00;15.4;78947 -2001-07-24_01:00:00;15;78947 -2001-07-24_02:00:00;14.1;78947 -2001-07-24_03:00:00;14.1;78947 -2001-07-24_04:00:00;14.5;78947 -2001-07-24_05:00:00;15;78947 -2001-07-24_06:00:00;15.6;78947 -2001-07-24_07:00:00;16.1;78947 -2001-07-24_08:00:00;17.4;78947 -2001-07-24_09:00:00;18.6;78947 -2001-07-24_10:00:00;19.9;78947 -2001-07-24_11:00:00;19.9;78947 -2001-07-24_12:00:00;18.2;78947 -2001-07-24_13:00:00;17.6;78947 -2001-07-24_14:00:00;17.9;78947 -2001-07-24_15:00:00;18.4;78947 -2001-07-24_16:00:00;17.9;78947 -2001-07-24_17:00:00;17.6;78947 -2001-07-24_18:00:00;17.3;78947 -2001-07-24_19:00:00;16.4;78947 -2001-07-24_20:00:00;15.4;78947 -2001-07-24_21:00:00;15.1;78947 -2001-07-24_22:00:00;15.2;78947 -2001-07-24_23:00:00;15.1;78947 -2001-07-25_00:00:00;15.1;78947 -2001-07-25_01:00:00;15;78947 -2001-07-25_02:00:00;14.9;78947 -2001-07-25_03:00:00;15;78947 -2001-07-25_04:00:00;14.9;78947 -2001-07-25_05:00:00;15.2;78947 -2001-07-25_06:00:00;15.7;78947 -2001-07-25_07:00:00;16.3;78947 -2001-07-25_09:00:00;17.7;78947 -2001-07-25_10:00:00;18.6;78947 -2001-07-25_11:00:00;19.1;78947 -2001-07-25_12:00:00;18.1;78947 -2001-07-25_13:00:00;18.7;78947 -2001-07-25_14:00:00;18.8;78947 -2001-07-25_15:00:00;18.9;78947 -2001-07-25_16:00:00;18.7;78947 -2001-07-25_17:00:00;17.8;78947 -2001-07-25_18:00:00;16.9;78947 -2001-07-25_19:00:00;16.4;78947 -2001-07-25_20:00:00;16;78947 -2001-07-25_21:00:00;15.7;78947 -2001-07-25_22:00:00;15.4;78947 -2001-07-25_23:00:00;15.1;78947 -2001-07-26_00:00:00;14.7;78947 -2001-07-26_01:00:00;14.7;78947 -2001-07-26_02:00:00;14.6;78947 -2001-07-26_03:00:00;14.6;78947 -2001-07-26_04:00:00;14.7;78947 -2001-07-26_05:00:00;14.7;78947 -2001-07-26_06:00:00;14.6;78947 -2001-07-26_07:00:00;14.7;78947 -2001-07-26_09:00:00;15.2;78947 -2001-07-26_10:00:00;15.7;78947 -2001-07-26_11:00:00;15.8;78947 -2001-07-26_12:00:00;14.8;78947 -2001-07-26_13:00:00;14.9;78947 -2001-07-26_14:00:00;15.4;78947 -2001-07-26_15:00:00;15.7;78947 -2001-07-26_16:00:00;15.5;78947 -2001-07-26_17:00:00;15.3;78947 -2001-07-26_18:00:00;15.2;78947 -2001-07-26_19:00:00;14.6;78947 -2001-07-26_20:00:00;13.9;78947 -2001-07-26_21:00:00;13.3;78947 -2001-07-26_22:00:00;13.2;78947 -2001-07-26_23:00:00;13.2;78947 -2001-07-27_01:00:00;13.4;78947 -2001-07-27_02:00:00;13.4;78947 -2001-07-27_03:00:00;13.2;78947 -2001-07-27_04:00:00;13;78947 -2001-07-27_05:00:00;13.1;78947 -2001-07-27_06:00:00;13.8;78947 -2001-07-27_07:00:00;14.4;78947 -2001-07-27_08:00:00;15.1;78947 -2001-07-27_09:00:00;16;78947 -2001-07-27_10:00:00;16.7;78947 -2001-07-27_11:00:00;16.7;78947 -2001-07-27_12:00:00;16.8;78947 -2001-07-27_13:00:00;16.5;78947 -2001-07-27_14:00:00;16.1;78947 -2001-07-27_15:00:00;15.6;78947 -2001-07-27_16:00:00;15;78947 -2001-07-27_17:00:00;14.5;78947 -2001-07-27_18:00:00;14.1;78947 -2001-07-27_19:00:00;13.5;78947 -2001-07-27_20:00:00;12.9;78947 -2001-07-27_21:00:00;12.6;78947 -2001-07-27_22:00:00;12.5;78947 -2001-07-27_23:00:00;12.4;78947 -2001-07-28_00:00:00;12.6;78947 -2001-07-28_01:00:00;12.8;78947 -2001-07-28_02:00:00;12.9;78947 -2001-07-28_03:00:00;12.9;78947 -2001-07-28_04:00:00;13;78947 -2001-07-28_05:00:00;13.2;78947 -2001-07-28_06:00:00;13.4;78947 -2001-07-28_07:00:00;13.7;78947 -2001-07-28_09:00:00;14.1;78947 -2001-07-28_10:00:00;14.4;78947 -2001-07-28_11:00:00;14.7;78947 -2001-07-28_12:00:00;15.4;78947 -2001-07-28_13:00:00;15.3;78947 -2001-07-28_14:00:00;14.8;78947 -2001-07-28_15:00:00;14.5;78947 -2001-07-28_16:00:00;14.3;78947 -2001-07-28_17:00:00;14.1;78947 -2001-07-28_18:00:00;13.4;78947 -2001-07-28_19:00:00;12.8;78947 -2001-07-28_20:00:00;12.4;78947 -2001-07-28_21:00:00;12.3;78947 -2001-07-28_22:00:00;12.5;78947 -2001-07-28_23:00:00;12.7;78947 -2001-07-29_00:00:00;12.4;78947 -2001-07-29_01:00:00;12.3;78947 -2001-07-29_02:00:00;12.1;78947 -2001-07-29_03:00:00;12;78947 -2001-07-29_04:00:00;12.1;78947 -2001-07-29_05:00:00;12.3;78947 -2001-07-29_06:00:00;12.9;78947 -2001-07-29_07:00:00;13.5;78947 -2001-07-29_09:00:00;14.6;78947 -2001-07-29_10:00:00;15;78947 -2001-07-29_11:00:00;15.2;78947 -2001-07-29_12:00:00;15.5;78947 -2001-07-29_13:00:00;15.4;78947 -2001-07-29_14:00:00;15.2;78947 -2001-07-29_15:00:00;14.8;78947 -2001-07-29_16:00:00;14.4;78947 -2001-07-29_17:00:00;14;78947 -2001-07-29_18:00:00;13.6;78947 -2001-07-29_19:00:00;13.1;78947 -2001-07-29_20:00:00;12.7;78947 -2001-07-29_21:00:00;12.5;78947 -2001-07-29_22:00:00;12.4;78947 -2001-07-29_23:00:00;12.3;78947 -2001-07-30_00:00:00;12.3;78947 -2001-07-30_01:00:00;12.2;78947 -2001-07-30_02:00:00;12.3;78947 -2001-07-30_03:00:00;12.3;78947 -2001-07-30_05:00:00;12.6;78947 -2001-07-30_06:00:00;13.4;78947 -2001-07-30_07:00:00;14;78947 -2001-07-30_08:00:00;14.8;78947 -2001-07-30_09:00:00;15.4;78947 -2001-07-30_10:00:00;15.8;78947 -2001-07-30_11:00:00;16.1;78947 -2001-07-30_12:00:00;16.6;78947 -2001-07-30_13:00:00;16.5;78947 -2001-07-30_14:00:00;16.3;78947 -2001-07-30_15:00:00;16;78947 -2001-07-30_16:00:00;15.6;78947 -2001-07-30_17:00:00;15;78947 -2001-07-30_18:00:00;14.4;78947 -2001-07-30_19:00:00;13.9;78947 -2001-07-30_20:00:00;13.3;78947 -2001-07-30_21:00:00;12.8;78947 -2001-07-30_22:00:00;12.5;78947 -2001-07-30_23:00:00;12.6;78947 -2001-07-31_00:00:00;13;78947 -2001-07-31_01:00:00;13.1;78947 -2001-07-31_02:00:00;13.3;78947 -2001-07-31_03:00:00;13.3;78947 -2001-07-31_04:00:00;13.2;78947 -2001-07-31_05:00:00;13.5;78947 -2001-07-31_06:00:00;14.2;78947 -2001-07-31_07:00:00;14.6;78947 -2001-07-31_08:00:00;15;78947 -2001-07-31_09:00:00;15.8;78947 -2001-07-31_10:00:00;16.4;78947 -2001-07-31_11:00:00;16.8;78947 -2001-07-31_12:00:00;17.1;78947 -2001-07-31_13:00:00;17.1;78947 -2001-07-31_14:00:00;16.6;78947 -2001-07-31_15:00:00;16.2;78947 -2001-07-31_16:00:00;15.8;78947 -2001-07-31_17:00:00;15.4;78947 -2001-07-31_18:00:00;14.8;78947 -2001-07-31_19:00:00;14;78947 -2001-07-31_20:00:00;13.2;78947 -2001-07-31_21:00:00;12.6;78947 -2001-07-31_22:00:00;12;78947 -2001-07-31_23:00:00;12.3;78947 -2001-08-01_00:00:00;13.1;78947 -2001-08-01_01:00:00;13.2;78947 -2001-08-01_02:00:00;13.3;78947 -2001-08-01_03:00:00;13.3;78947 -2001-08-01_04:00:00;13.3;78947 -2001-08-01_05:00:00;13.6;78947 -2001-08-01_06:00:00;14.4;78947 -2001-08-01_07:00:00;14.9;78947 -2001-08-01_09:00:00;16.2;78947 -2001-08-01_10:00:00;16.6;78947 -2001-08-01_11:00:00;17;78947 -2001-08-01_12:00:00;17.2;78947 -2001-08-01_13:00:00;17.4;78947 -2001-08-01_14:00:00;17.4;78947 -2001-08-01_15:00:00;17.1;78947 -2001-08-01_16:00:00;16.7;78947 -2001-08-01_17:00:00;16.2;78947 -2001-08-01_18:00:00;15.4;78947 -2001-08-01_19:00:00;14.6;78947 -2001-08-01_20:00:00;13.7;78947 -2001-08-01_21:00:00;13;78947 -2001-08-01_22:00:00;12.5;78947 -2001-08-01_23:00:00;12.3;78947 -2001-08-02_00:00:00;12.6;78947 -2001-08-02_01:00:00;12.6;78947 -2001-08-02_02:00:00;12.6;78947 -2001-08-02_03:00:00;12.8;78947 -2001-08-02_04:00:00;13.2;78947 -2001-08-02_05:00:00;14;78947 -2001-08-02_06:00:00;15.6;78947 -2001-08-02_07:00:00;16.1;78947 -2001-08-02_09:00:00;17;78947 -2001-08-02_10:00:00;17;78947 -2001-08-02_11:00:00;17.1;78947 -2001-08-02_12:00:00;17.2;78947 -2001-08-02_13:00:00;18.2;78947 -2001-08-02_14:00:00;19.2;78947 -2001-08-02_15:00:00;19.4;78947 -2001-08-02_16:00:00;19.8;78947 -2001-08-02_17:00:00;20;78947 -2001-08-02_18:00:00;18.4;78947 -2001-08-02_19:00:00;17.8;78947 -2001-08-02_20:00:00;16.6;78947 -2001-08-02_21:00:00;15.6;78947 -2001-08-02_22:00:00;15.6;78947 -2001-08-02_23:00:00;15.7;78947 -2001-08-03_00:00:00;15.6;78947 -2001-08-03_01:00:00;16;78947 -2001-08-03_02:00:00;16.5;78947 -2001-08-03_03:00:00;16.7;78947 -2001-08-03_04:00:00;16.3;78947 -2001-08-03_05:00:00;16.5;78947 -2001-08-03_06:00:00;17.5;78947 -2001-08-03_07:00:00;17;78947 -2001-08-03_09:00:00;17.4;78947 -2001-08-03_10:00:00;18.7;78947 -2001-08-03_11:00:00;20.4;78947 -2001-08-03_12:00:00;21.9;78947 -2001-08-03_13:00:00;21.3;78947 -2001-08-03_14:00:00;21.9;78947 -2001-08-03_15:00:00;21.7;78947 -2001-08-03_16:00:00;19.8;78947 -2001-08-03_17:00:00;17.8;78947 -2001-08-03_18:00:00;19.1;78947 -2001-08-03_19:00:00;18.6;78947 -2001-08-03_20:00:00;18;78947 -2001-08-03_21:00:00;17.7;78947 -2001-08-03_22:00:00;17.7;78947 -2001-08-03_23:00:00;17.8;78947 -2001-08-04_00:00:00;17;78947 -2001-08-04_01:00:00;16.8;78947 -2001-08-04_02:00:00;16.9;78947 -2001-08-04_03:00:00;16.9;78947 -2001-08-04_04:00:00;16.9;78947 -2001-08-04_05:00:00;16.6;78947 -2001-08-04_06:00:00;17.5;78947 -2001-08-04_07:00:00;19;78947 -2001-08-04_09:00:00;20.2;78947 -2001-08-04_10:00:00;19.4;78947 -2001-08-04_11:00:00;20;78947 -2001-08-04_12:00:00;20.3;78947 -2001-08-04_13:00:00;21.3;78947 -2001-08-04_14:00:00;22.2;78947 -2001-08-04_15:00:00;22.1;78947 -2001-08-04_16:00:00;22.5;78947 -2001-08-04_17:00:00;22;78947 -2001-08-04_18:00:00;18.6;78947 -2001-08-04_19:00:00;17.1;78947 -2001-08-04_20:00:00;16.5;78947 -2001-08-04_21:00:00;16.2;78947 -2001-08-04_22:00:00;16.2;78947 -2001-08-04_23:00:00;16.7;78947 -2001-08-05_00:00:00;16.4;78947 -2001-08-05_01:00:00;16.7;78947 -2001-08-05_02:00:00;16.5;78947 -2001-08-05_03:00:00;16.4;78947 -2001-08-05_04:00:00;16.6;78947 -2001-08-05_05:00:00;16.5;78947 -2001-08-05_06:00:00;16.1;78947 -2001-08-05_07:00:00;16.3;78947 -2001-08-05_09:00:00;16.5;78947 -2001-08-05_10:00:00;16.7;78947 -2001-08-05_11:00:00;16.8;78947 -2001-08-05_12:00:00;17;78947 -2001-08-05_13:00:00;16.9;78947 -2001-08-05_14:00:00;16.6;78947 -2001-08-05_15:00:00;16.3;78947 -2001-08-05_16:00:00;16.1;78947 -2001-08-05_17:00:00;15.9;78947 -2001-08-05_18:00:00;16;78947 -2001-08-05_19:00:00;15.8;78947 -2001-08-05_20:00:00;15.6;78947 -2001-08-05_21:00:00;15.6;78947 -2001-08-05_22:00:00;15.5;78947 -2001-08-05_23:00:00;15.4;78947 -2001-08-06_00:00:00;15;78947 -2001-08-06_01:00:00;14.8;78947 -2001-08-06_02:00:00;14.6;78947 -2001-08-06_03:00:00;14.5;78947 -2001-08-06_04:00:00;14.5;78947 -2001-08-06_05:00:00;14.5;78947 -2001-08-06_06:00:00;14.7;78947 -2001-08-06_08:00:00;14.8;78947 -2001-08-06_09:00:00;15.7;78947 -2001-08-06_10:00:00;16.1;78947 -2001-08-06_11:00:00;15.8;78947 -2001-08-06_12:00:00;16.5;78947 -2001-08-06_13:00:00;15.8;78947 -2001-08-06_14:00:00;16.5;78947 -2001-08-06_15:00:00;16.2;78947 -2001-08-06_16:00:00;16.1;78947 -2001-08-06_17:00:00;15.7;78947 -2001-08-06_18:00:00;14.8;78947 -2001-08-06_19:00:00;14.2;78947 -2001-08-06_20:00:00;13.1;78947 -2001-08-06_21:00:00;12.3;78947 -2001-08-06_22:00:00;11.7;78947 -2001-08-06_23:00:00;11.3;78947 -2001-08-07_00:00:00;11.2;78947 -2001-08-07_01:00:00;11.5;78947 -2001-08-07_02:00:00;11.6;78947 -2001-08-07_03:00:00;11.8;78947 -2001-08-07_04:00:00;11.7;78947 -2001-08-07_05:00:00;12.3;78947 -2001-08-07_06:00:00;13.1;78947 -2001-08-07_07:00:00;15;78947 -2001-08-07_08:00:00;14.6;78947 -2001-08-07_09:00:00;13.8;78947 -2001-08-07_10:00:00;14.3;78947 -2001-08-07_11:00:00;15.5;78947 -2001-08-07_12:00:00;15.6;78947 -2001-08-07_13:00:00;15.5;78947 -2001-08-07_14:00:00;16.2;78947 -2001-08-07_15:00:00;16.5;78947 -2001-08-07_16:00:00;16;78947 -2001-08-07_17:00:00;15.9;78947 -2001-08-07_18:00:00;14.8;78947 -2001-08-07_19:00:00;14.5;78947 -2001-08-07_20:00:00;14.2;78947 -2001-08-07_21:00:00;14.5;78947 -2001-08-07_22:00:00;14.6;78947 -2001-08-07_23:00:00;13.5;78947 -2001-08-08_00:00:00;13.2;78947 -2001-08-08_01:00:00;13.1;78947 -2001-08-08_02:00:00;13.4;78947 -2001-08-08_03:00:00;13.2;78947 -2001-08-08_04:00:00;12.9;78947 -2001-08-08_05:00:00;12.7;78947 -2001-08-08_06:00:00;14.1;78947 -2001-08-08_07:00:00;14.9;78947 -2001-08-08_08:00:00;14.7;78947 -2001-08-08_09:00:00;15.5;78947 -2001-08-08_10:00:00;15;78947 -2001-08-08_11:00:00;16.7;78947 -2001-08-08_12:00:00;14.6;70000 -2001-08-08_13:00:00;14;70000 -2001-08-08_14:00:00;13.8;58927 -2001-08-08_15:00:00;13.6;70000 -2001-08-08_16:00:00;14.1;58927 -2001-08-08_17:00:00;14.5;70000 -2001-08-08_18:00:00;14.7;58927 -2001-08-08_19:00:00;14.9;70000 -2001-08-08_20:00:00;13.7;78947 -2001-08-08_21:00:00;13.8;78947 -2001-08-08_22:00:00;15.8;70000 -2001-08-08_23:00:00;15.9;70000 -2001-08-09_00:00:00;12.5;78947 -2001-08-09_01:00:00;13.1;78947 -2001-08-09_02:00:00;13.1;78947 -2001-08-09_03:00:00;13.2;78947 -2001-08-09_04:00:00;13.5;78947 -2001-08-09_05:00:00;13.7;78947 -2001-08-09_06:00:00;13.8;78947 -2001-08-09_07:00:00;14.7;70000 -2001-08-09_08:00:00;14.7;58927 -2001-08-09_09:00:00;14.7;70000 -2001-08-09_10:00:00;14.6;70000 -2001-08-09_11:00:00;14.6;70000 -2001-08-09_12:00:00;14.4;70000 -2001-08-09_13:00:00;14.2;70000 -2001-08-09_14:00:00;14.4;70000 -2001-08-09_15:00:00;14.3;70000 -2001-08-09_16:00:00;14.2;70000 -2001-08-09_17:00:00;14;70000 -2001-08-09_18:00:00;13.9;70000 -2001-08-09_19:00:00;12.9;70000 -2001-08-09_20:00:00;13.2;70000 -2001-08-09_21:00:00;13.1;70000 -2001-08-09_22:00:00;13.1;70000 -2001-08-09_23:00:00;13.1;70000 -2001-08-10_00:00:00;12.8;70000 -2001-08-10_01:00:00;11.6;70000 -2001-08-10_02:00:00;12.2;70000 -2001-08-10_03:00:00;12.7;70000 -2001-08-10_04:00:00;12.5;70000 -2001-08-10_05:00:00;12.4;70000 -2001-08-10_06:00:00;12.5;70000 -2001-08-10_07:00:00;12.6;70000 -2001-08-10_08:00:00;12.6;70000 -2001-08-10_09:00:00;11.8;70000 -2001-08-10_10:00:00;12.1;70000 -2001-08-10_11:00:00;12.1;70000 -2001-08-10_12:00:00;12.7;70000 -2001-08-10_13:00:00;13;70000 -2001-08-10_14:00:00;13.1;70000 -2001-08-10_15:00:00;13.3;70000 -2001-08-10_16:00:00;13.4;70000 -2001-08-10_17:00:00;13.5;70000 -2001-08-10_18:00:00;13.5;70000 -2001-08-10_19:00:00;13.6;70000 -2001-08-10_20:00:00;13.8;70000 -2001-08-10_21:00:00;13.9;70000 -2001-08-10_22:00:00;13.9;70000 -2001-08-10_23:00:00;13.7;58927 -2001-08-11_00:00:00;13.5;70000 -2001-08-11_01:00:00;13.4;70000 -2001-08-11_02:00:00;13.1;70000 -2001-08-11_03:00:00;12.5;70000 -2001-08-11_04:00:00;12.7;70000 -2001-08-11_05:00:00;13.2;70000 -2001-08-11_06:00:00;12.9;70000 -2001-08-11_07:00:00;14.3;70000 -2001-08-11_08:00:00;15;70000 -2001-08-11_09:00:00;15.8;70000 -2001-08-11_10:00:00;15.8;58927 -2001-08-11_11:00:00;15.7;70000 -2001-08-11_12:00:00;15.9;70000 -2001-08-11_13:00:00;16;70000 -2001-08-11_14:00:00;16.1;70000 -2001-08-11_15:00:00;16.2;70000 -2001-08-11_16:00:00;16;70000 -2001-08-11_17:00:00;16.1;70000 -2001-08-11_18:00:00;16.1;70000 -2001-08-11_19:00:00;16;70000 -2001-08-11_20:00:00;15.9;70000 -2001-08-11_21:00:00;15.7;70000 -2001-08-11_22:00:00;15.6;70000 -2001-08-11_23:00:00;15.6;70000 -2001-08-12_00:00:00;15.6;70000 -2001-08-12_01:00:00;15.3;70000 -2001-08-12_02:00:00;15.2;70000 -2001-08-12_03:00:00;15.1;70000 -2001-08-12_04:00:00;15;70000 -2001-08-12_05:00:00;14.7;70000 -2001-08-12_06:00:00;14.7;70000 -2001-08-12_07:00:00;14.9;70000 -2001-08-12_08:00:00;14.9;70000 -2001-08-12_09:00:00;14.9;70000 -2001-08-12_10:00:00;15.3;70000 -2001-08-12_11:00:00;15.6;70000 -2001-08-12_12:00:00;15.8;70000 -2001-08-12_13:00:00;15.6;70000 -2001-08-12_14:00:00;15.9;70000 -2001-08-12_15:00:00;16.2;70000 -2001-08-12_16:00:00;16.2;70000 -2001-08-12_17:00:00;16.3;70000 -2001-08-12_18:00:00;16.9;70000 -2001-08-12_19:00:00;16.6;70000 -2001-08-12_20:00:00;15.7;70000 -2001-08-12_21:00:00;16.2;70000 -2001-08-12_22:00:00;16.5;70000 -2001-08-12_23:00:00;16.3;70000 -2001-08-13_00:00:00;16.1;70000 -2001-08-13_01:00:00;16;70000 -2001-08-13_02:00:00;15.9;70000 -2001-08-13_03:00:00;15.7;70000 -2001-08-13_04:00:00;15.8;70000 -2001-08-13_05:00:00;15.5;70000 -2001-08-13_06:00:00;15.3;70000 -2001-08-13_07:00:00;15.6;70000 -2001-08-13_08:00:00;16.2;70000 -2001-08-13_09:00:00;16.6;70000 -2001-08-13_10:00:00;16.9;70000 -2001-08-13_11:00:00;17.2;70000 -2001-08-13_12:00:00;17.5;70000 -2001-08-13_13:00:00;17.4;70000 -2001-08-13_14:00:00;17.6;70000 -2001-08-13_15:00:00;17.2;70000 -2001-08-13_16:00:00;17.1;70000 -2001-08-13_17:00:00;17.2;70000 -2001-08-13_18:00:00;17.2;70000 -2001-08-13_19:00:00;17.9;70000 -2001-08-13_20:00:00;17.5;70000 -2001-08-13_21:00:00;17.3;70000 -2001-08-13_22:00:00;17.2;70000 -2001-08-13_23:00:00;17.1;70000 -2001-08-14_00:00:00;17.1;70000 -2001-08-14_01:00:00;17.2;70000 -2001-08-14_02:00:00;16.9;70000 -2001-08-14_03:00:00;17.2;70000 -2001-08-14_04:00:00;17;70000 -2001-08-14_05:00:00;16.9;70000 -2001-08-14_06:00:00;16.8;70000 -2001-08-14_07:00:00;17;70000 -2001-08-14_08:00:00;17.3;70000 -2001-08-14_09:00:00;17;70000 -2001-08-14_10:00:00;17.1;70000 -2001-08-14_11:00:00;17.3;70000 -2001-08-14_12:00:00;17.1;70000 -2001-08-14_13:00:00;17;70000 -2001-08-14_14:00:00;17.1;70000 -2001-08-14_15:00:00;16.9;70000 -2001-08-14_16:00:00;16.7;70000 -2001-08-14_17:00:00;16.5;70000 -2001-08-14_18:00:00;16.3;70000 -2001-08-14_19:00:00;16.2;70000 -2001-08-14_20:00:00;16.2;70000 -2001-08-14_21:00:00;16.2;70000 -2001-08-14_22:00:00;15.9;70000 -2001-08-14_23:00:00;16;70000 -2001-08-15_00:00:00;15.4;70000 -2001-08-15_01:00:00;15.3;70000 -2001-08-15_02:00:00;15.3;70000 -2001-08-15_03:00:00;15.3;70000 -2001-08-15_04:00:00;14.9;70000 -2001-08-15_05:00:00;15;70000 -2001-08-15_06:00:00;15;70000 -2001-08-15_07:00:00;15.2;70000 -2001-08-15_08:00:00;15.1;70000 -2001-08-15_09:00:00;15.4;70000 -2001-08-15_10:00:00;15.3;70000 -2001-08-15_11:00:00;15.3;70000 -2001-08-15_12:00:00;15.6;70000 -2001-08-15_13:00:00;15.6;70000 -2001-08-15_14:00:00;15.5;70000 -2001-08-15_15:00:00;15.5;70000 -2001-08-15_16:00:00;15.5;70000 -2001-08-15_17:00:00;15.5;70000 -2001-08-15_18:00:00;15.3;70000 -2001-08-15_19:00:00;15.3;70000 -2001-08-15_20:00:00;15.2;70000 -2001-08-15_21:00:00;15.2;70000 -2001-08-15_22:00:00;15.1;70000 -2001-08-15_23:00:00;15;70000 -2001-08-16_00:00:00;15;70000 -2001-08-16_01:00:00;14.8;70000 -2001-08-16_02:00:00;14.8;70000 -2001-08-16_03:00:00;14.8;70000 -2001-08-16_04:00:00;14.6;70000 -2001-08-16_05:00:00;14.6;70000 -2001-08-16_06:00:00;14.5;70000 -2001-08-16_07:00:00;14.6;70000 -2001-08-16_08:00:00;14.5;70000 -2001-08-16_09:00:00;14.5;70000 -2001-08-16_10:00:00;14.8;70000 -2001-08-16_11:00:00;14.9;70000 -2001-08-16_12:00:00;15;70000 -2001-08-16_13:00:00;15.1;70000 -2001-08-16_14:00:00;15.2;70000 -2001-08-16_15:00:00;15.3;70000 -2001-08-16_16:00:00;15.4;70000 -2001-08-16_17:00:00;15.3;70000 -2001-08-16_18:00:00;15.4;70000 -2001-08-16_19:00:00;15.4;70000 -2001-08-16_20:00:00;15.5;70000 -2001-08-16_21:00:00;15.6;70000 -2001-08-16_22:00:00;15.7;70000 -2001-08-16_23:00:00;15.3;70000 -2001-08-17_00:00:00;15.2;70000 -2001-08-17_01:00:00;14.9;70000 -2001-08-17_02:00:00;14.6;70000 -2001-08-17_03:00:00;14.6;70000 -2001-08-17_04:00:00;14.3;70000 -2001-08-17_05:00:00;14.1;70000 -2001-08-17_06:00:00;14.3;70000 -2001-08-17_07:00:00;14.5;70000 -2001-08-17_08:00:00;14.8;70000 -2001-08-17_09:00:00;15.1;70000 -2001-08-17_10:00:00;15.4;70000 -2001-08-17_11:00:00;15.8;70000 -2001-08-17_12:00:00;16;70000 -2001-08-17_13:00:00;16.1;70000 -2001-08-17_14:00:00;16.3;70000 -2001-08-17_15:00:00;16.5;70000 -2001-08-17_16:00:00;16.7;70000 -2001-08-17_17:00:00;16.5;70000 -2001-08-17_18:00:00;16.5;70000 -2001-08-17_19:00:00;16.3;70000 -2001-08-17_20:00:00;15.9;70000 -2001-08-17_21:00:00;15.9;70000 -2001-08-17_22:00:00;15.8;70000 -2001-08-17_23:00:00;15.6;70000 -2001-08-18_00:00:00;15.5;70000 -2001-08-18_01:00:00;15.4;70000 -2001-08-18_02:00:00;15.3;70000 -2001-08-18_03:00:00;15.2;70000 -2001-08-18_04:00:00;15.2;70000 -2001-08-18_05:00:00;15;70000 -2001-08-18_06:00:00;15;70000 -2001-08-18_07:00:00;15;70000 -2001-08-18_08:00:00;15.2;70000 -2001-08-18_09:00:00;15.2;70000 -2001-08-18_10:00:00;15.2;70000 -2001-08-18_11:00:00;15.2;70000 -2001-08-18_12:00:00;15.2;70000 -2001-08-18_13:00:00;15.2;70203 -2001-08-18_14:00:00;15.3;70000 -2001-08-18_15:00:00;15.3;70000 -2001-08-18_16:00:00;15.3;70000 -2001-08-18_17:00:00;15.3;70000 -2001-08-18_18:00:00;15.2;70000 -2001-08-18_19:00:00;15.2;70000 -2001-08-18_20:00:00;15.2;70000 -2001-08-18_21:00:00;15.2;70000 -2001-08-18_22:00:00;15.2;70000 -2001-08-18_23:00:00;15.2;70203 -2001-08-19_00:00:00;15.2;70203 -2001-08-19_01:00:00;15.2;70203 -2001-08-19_02:00:00;15.2;70203 -2001-08-19_03:00:00;15;70000 -2001-08-19_04:00:00;15;70000 -2001-08-19_05:00:00;15;70000 -2001-08-19_06:00:00;14.9;70000 -2001-08-19_07:00:00;15.1;70000 -2001-08-19_08:00:00;15;70000 -2001-08-19_09:00:00;15;70000 -2001-08-19_10:00:00;15.1;70000 -2001-08-19_11:00:00;15;70000 -2001-08-19_12:00:00;15.1;70000 -2001-08-19_13:00:00;15.1;70000 -2001-08-19_14:00:00;15.1;70000 -2001-08-19_15:00:00;15;70000 -2001-08-19_16:00:00;15;70000 -2001-08-19_17:00:00;15;70000 -2001-08-19_18:00:00;14.9;70000 -2001-08-19_19:00:00;14.8;70000 -2001-08-19_20:00:00;14.8;70000 -2001-08-19_21:00:00;14.6;70000 -2001-08-19_22:00:00;14.6;70000 -2001-08-19_23:00:00;14.5;70000 -2001-08-20_00:00:00;14.8;70000 -2001-08-20_01:00:00;14.6;70000 -2001-08-20_02:00:00;14.8;70000 -2001-08-20_03:00:00;14.1;70000 -2001-08-20_04:00:00;14.9;70000 -2001-08-20_05:00:00;15.2;70000 -2001-08-20_06:00:00;15.2;70000 -2001-08-20_07:00:00;13.9;70000 -2001-08-20_08:00:00;13.5;70000 -2001-08-20_09:00:00;14.6;70000 -2001-08-20_10:00:00;15.8;70000 -2001-08-20_11:00:00;15.8;70000 -2001-08-20_12:00:00;15.9;70000 -2001-08-20_13:00:00;15.8;70000 -2001-08-20_14:00:00;15.8;70000 -2001-08-20_15:00:00;15.9;70000 -2001-08-20_16:00:00;16;70000 -2001-08-20_17:00:00;16;70000 -2001-08-20_18:00:00;15.8;70000 -2001-08-20_19:00:00;15.6;70000 -2001-08-20_20:00:00;15.5;70000 -2001-08-20_21:00:00;15.5;70000 -2001-08-20_22:00:00;15.4;70000 -2001-08-20_23:00:00;15.3;70000 -2001-08-21_00:00:00;15.4;70000 -2001-08-21_01:00:00;15.4;70000 -2001-08-21_02:00:00;15.5;70000 -2001-08-21_03:00:00;15.6;70000 -2001-08-21_04:00:00;15.8;70000 -2001-08-21_05:00:00;15.4;70000 -2001-08-21_06:00:00;14.7;70000 -2001-08-21_07:00:00;14.9;70000 -2001-08-21_08:00:00;14.9;70000 -2001-08-21_09:00:00;14.4;70000 -2001-08-21_10:00:00;14.9;70000 -2001-08-21_11:00:00;15.9;70000 -2001-08-21_12:00:00;15.8;70000 -2001-08-21_13:00:00;16.1;70000 -2001-08-21_14:00:00;16.1;70000 -2001-08-21_15:00:00;15.9;70000 -2001-08-21_16:00:00;15.8;70000 -2001-08-21_17:00:00;15.8;70000 -2001-08-21_18:00:00;15.7;70000 -2001-08-21_19:00:00;15.7;70000 -2001-08-21_20:00:00;15.6;70000 -2001-08-21_21:00:00;15.5;70000 -2001-08-21_22:00:00;15.3;70000 -2001-08-21_23:00:00;15.2;70000 -2001-08-22_00:00:00;15.2;70000 -2001-08-22_01:00:00;15.3;70000 -2001-08-22_02:00:00;15.4;70000 -2001-08-22_03:00:00;15.4;70000 -2001-08-22_04:00:00;15.2;70000 -2001-08-22_05:00:00;15.2;70000 -2001-08-22_06:00:00;15.5;70000 -2001-08-22_07:00:00;15.5;70000 -2001-08-22_08:00:00;15.5;70000 -2001-08-22_09:00:00;15.5;70000 -2001-08-22_10:00:00;15.4;70000 -2001-08-22_11:00:00;15.3;70000 -2001-08-22_12:00:00;15.2;70000 -2001-08-22_13:00:00;15.2;70000 -2001-08-22_14:00:00;15.2;70000 -2001-08-22_15:00:00;15.2;70000 -2001-08-22_16:00:00;15;70000 -2001-08-22_17:00:00;15.3;70000 -2001-08-22_18:00:00;15.2;70000 -2001-08-22_19:00:00;15.2;70000 -2001-08-22_20:00:00;15.3;70000 -2001-08-22_21:00:00;15.3;70000 -2001-08-22_22:00:00;15;70000 -2001-08-22_23:00:00;15.2;70000 -2001-08-23_00:00:00;15.2;70000 -2001-08-23_01:00:00;15;70000 -2001-08-23_02:00:00;14.9;70000 -2001-08-23_03:00:00;14.9;70000 -2001-08-23_04:00:00;14.8;70000 -2001-08-23_05:00:00;14.9;70000 -2001-08-23_06:00:00;15;70000 -2001-08-23_07:00:00;15.1;70000 -2001-08-23_08:00:00;14.8;70000 -2001-08-23_09:00:00;15.1;70000 -2001-08-23_10:00:00;15.2;70000 -2001-08-23_11:00:00;15.2;70000 -2001-08-23_12:00:00;15.5;70000 -2001-08-23_13:00:00;15.6;70000 -2001-08-23_14:00:00;15.7;70000 -2001-08-23_15:00:00;15.9;70000 -2001-08-23_16:00:00;16;70000 -2001-08-23_17:00:00;16.2;70000 -2001-08-23_18:00:00;16.3;70000 -2001-08-23_19:00:00;16.3;70000 -2001-08-23_20:00:00;16.5;70000 -2001-08-23_21:00:00;16.5;70000 -2001-08-23_22:00:00;16.9;70000 -2001-08-23_23:00:00;16.6;70000 -2001-08-24_00:00:00;16.5;70000 -2001-08-24_01:00:00;16.2;70000 -2001-08-24_02:00:00;16.6;70000 -2001-08-24_03:00:00;16;70000 -2001-08-24_04:00:00;16.6;70000 -2001-08-24_05:00:00;16.7;70000 -2001-08-24_06:00:00;15.8;70000 -2001-08-24_07:00:00;14.9;70000 -2001-08-24_08:00:00;14.6;70000 -2001-08-24_09:00:00;15.2;70000 -2001-08-24_10:00:00;15.7;70000 -2001-08-24_11:00:00;15.7;70000 -2001-08-24_12:00:00;15.8;70000 -2001-08-24_13:00:00;16.3;70000 -2001-08-24_14:00:00;17.2;70000 -2001-08-24_15:00:00;17.3;70000 -2001-08-24_16:00:00;17.4;70000 -2001-08-24_17:00:00;17.4;70000 -2001-08-24_18:00:00;15.8;70000 -2001-08-24_19:00:00;15.4;70000 -2001-08-24_20:00:00;15.3;70000 -2001-08-24_21:00:00;15;70000 -2001-08-24_22:00:00;15.5;70000 -2001-08-24_23:00:00;15.5;70000 -2001-08-25_00:00:00;15.5;70000 -2001-08-25_01:00:00;15.3;70000 -2001-08-25_02:00:00;15.3;70000 -2001-08-25_03:00:00;15.3;70000 -2001-08-25_04:00:00;15.3;70000 -2001-08-25_05:00:00;15.3;70000 -2001-08-25_06:00:00;15.3;70203 -2001-08-25_07:00:00;15.3;70203 -2001-08-25_08:00:00;15.6;70000 -2001-08-25_09:00:00;15.7;70000 -2001-08-25_10:00:00;15.8;70000 -2001-08-25_11:00:00;15.8;70000 -2001-08-25_12:00:00;15.8;70000 -2001-08-25_13:00:00;15.9;70000 -2001-08-25_14:00:00;16.1;70000 -2001-08-25_15:00:00;16.2;70000 -2001-08-25_16:00:00;16.3;70000 -2001-08-25_17:00:00;16.2;70000 -2001-08-25_18:00:00;16.2;70000 -2001-08-25_19:00:00;16.3;70000 -2001-08-25_20:00:00;16.3;70000 -2001-08-25_21:00:00;16.3;70000 -2001-08-25_22:00:00;16.3;70000 -2001-08-25_23:00:00;16.1;70000 -2001-08-26_00:00:00;15.8;70000 -2001-08-26_01:00:00;16.4;70000 -2001-08-26_02:00:00;16.4;70000 -2001-08-26_03:00:00;16.5;70000 -2001-08-26_04:00:00;16.5;70000 -2001-08-26_05:00:00;16.2;70000 -2001-08-26_06:00:00;16.3;70000 -2001-08-26_07:00:00;16.5;70000 -2001-08-26_08:00:00;16.4;70000 -2001-08-26_09:00:00;16.6;70000 -2001-08-26_10:00:00;16.9;70000 -2001-08-26_11:00:00;17.1;70000 -2001-08-26_12:00:00;16.6;70000 -2001-08-26_13:00:00;16.2;70000 -2001-08-26_14:00:00;16.7;70000 -2001-08-26_15:00:00;17.3;70000 -2001-08-26_16:00:00;17.7;70000 -2001-08-26_17:00:00;16.4;70000 -2001-08-26_18:00:00;17.9;70000 -2001-08-26_19:00:00;18.1;70000 -2001-08-26_20:00:00;18.5;70000 -2001-08-26_21:00:00;19;70000 -2001-08-26_22:00:00;18.4;70000 -2001-08-26_23:00:00;18.4;70000 -2001-08-27_00:00:00;18.1;70000 -2001-08-27_01:00:00;17.8;70000 -2001-08-27_02:00:00;17.7;70000 -2001-08-27_03:00:00;17.6;70000 -2001-08-27_04:00:00;17.5;70000 -2001-08-27_05:00:00;17.5;70000 -2001-08-27_06:00:00;17.6;70000 -2001-08-27_07:00:00;17.1;70000 -2001-08-27_08:00:00;16.9;70000 -2001-08-27_09:00:00;16.7;70000 -2001-08-27_10:00:00;15;78947 -2001-08-27_11:00:00;15;78947 -2001-08-27_12:00:00;18.1;78947 -2001-08-27_13:00:00;18.1;78947 -2001-08-27_14:00:00;16.5;78947 -2001-08-27_15:00:00;16.9;78947 -2001-08-27_16:00:00;15.1;78947 -2001-08-27_17:00:00;15.2;78947 -2001-08-27_18:00:00;15.4;78947 -2001-08-27_19:00:00;14.9;78947 -2001-08-27_20:00:00;14.7;78947 -2001-08-27_21:00:00;14.3;78947 -2001-08-27_22:00:00;14.4;78947 -2001-08-27_23:00:00;14.1;78947 -2001-08-28_00:00:00;12.8;78947 -2001-08-28_01:00:00;12.7;78947 -2001-08-28_02:00:00;12.6;78947 -2001-08-28_03:00:00;12.4;78947 -2001-08-28_04:00:00;12.6;78947 -2001-08-28_05:00:00;11.9;78947 -2001-08-28_06:00:00;12.6;78947 -2001-08-28_07:00:00;12.7;78947 -2001-08-28_08:00:00;13.5;78947 -2001-08-28_09:00:00;12.8;78947 -2001-08-28_10:00:00;12.9;78947 -2001-08-28_11:00:00;13.2;78947 -2001-08-28_12:00:00;14.4;78947 -2001-08-28_13:00:00;13;78947 -2001-08-28_14:00:00;14.1;78947 -2001-08-28_15:00:00;13.2;78947 -2001-08-28_16:00:00;13;78947 -2001-08-28_17:00:00;12.7;78947 -2001-08-28_18:00:00;12.8;78947 -2001-08-28_19:00:00;13;78947 -2001-08-28_20:00:00;13.1;78947 -2001-08-28_21:00:00;12.9;78947 -2001-08-28_22:00:00;13;78947 -2001-08-28_23:00:00;13.4;78947 -2001-08-29_00:00:00;14.1;78947 -2001-08-29_01:00:00;13.3;78947 -2001-08-29_02:00:00;13.6;78947 -2001-08-29_03:00:00;13.8;78947 -2001-08-29_04:00:00;13.8;78947 -2001-08-29_05:00:00;13.9;78947 -2001-08-29_06:00:00;15.7;70000 -2001-08-29_07:00:00;15.9;70000 -2001-08-29_08:00:00;16.1;70000 -2001-08-29_09:00:00;16.1;70000 -2001-08-29_10:00:00;15.8;70000 -2001-08-29_11:00:00;15.4;70000 -2001-08-29_12:00:00;15.3;70000 -2001-08-29_13:00:00;15.7;70000 -2001-08-29_14:00:00;15.8;70000 -2001-08-29_15:00:00;15;70000 -2001-08-29_16:00:00;14.8;70000 -2001-08-29_17:00:00;14.5;70000 -2001-08-29_18:00:00;13.2;70000 -2001-08-29_19:00:00;13.6;70000 -2001-08-29_20:00:00;13.4;70000 -2001-08-29_21:00:00;13.3;70000 -2001-08-29_22:00:00;13.5;70000 -2001-08-29_23:00:00;13.2;70000 -2001-08-30_00:00:00;13.5;70000 -2001-08-30_01:00:00;13.3;70000 -2001-08-30_02:00:00;13.2;70000 -2001-08-30_03:00:00;13.6;70000 -2001-08-30_04:00:00;13.5;70000 -2001-08-30_05:00:00;13.5;70000 -2001-08-30_06:00:00;13.5;70000 -2001-08-30_07:00:00;13.6;70000 -2001-08-30_08:00:00;13.5;70000 -2001-08-30_09:00:00;13.5;70000 -2001-08-30_10:00:00;13.1;70000 -2001-08-30_11:00:00;13.2;70000 -2001-08-30_12:00:00;13.2;70000 -2001-08-30_13:00:00;13.4;70000 -2001-08-30_14:00:00;13.2;70000 -2001-08-30_15:00:00;13;70000 -2001-08-30_16:00:00;12.6;70000 -2001-08-30_17:00:00;12.8;70000 -2001-08-30_18:00:00;13.3;70000 -2001-08-30_19:00:00;14;70000 -2001-08-30_20:00:00;14.7;70000 -2001-08-30_21:00:00;14.8;70000 -2001-08-30_22:00:00;14.7;70000 -2001-08-30_23:00:00;14.6;70000 -2001-08-31_00:00:00;14.7;70000 -2001-08-31_01:00:00;14.6;70000 -2001-08-31_02:00:00;14.3;70000 -2001-08-31_03:00:00;14.4;70000 -2001-08-31_04:00:00;14;70000 -2001-08-31_05:00:00;14.2;70000 -2001-08-31_06:00:00;13.6;70000 -2001-08-31_07:00:00;14.5;70000 -2001-08-31_08:00:00;14.6;70000 -2001-08-31_09:00:00;14.3;70000 -2001-08-31_10:00:00;14.2;70000 -2001-08-31_11:00:00;14.5;70000 -2001-08-31_12:00:00;14.3;70000 -2001-08-31_13:00:00;14.2;70000 -2001-08-31_14:00:00;14.3;70000 -2001-08-31_15:00:00;14.3;70000 -2001-08-31_16:00:00;14.2;70000 -2001-08-31_17:00:00;14.3;70000 -2001-08-31_18:00:00;14.2;70000 -2001-08-31_19:00:00;14.1;70000 -2001-08-31_20:00:00;14.2;70000 -2001-08-31_21:00:00;14.3;70000 -2001-08-31_22:00:00;14.2;70000 -2001-08-31_23:00:00;14.3;70000 -2001-09-01_00:00:00;14.6;70000 -2001-09-01_01:00:00;14.8;70000 -2001-09-01_02:00:00;10.5;78947 -2001-09-01_03:00:00;9.7;78947 -2001-09-01_04:00:00;9.2;78947 -2001-09-01_05:00:00;8.9;78947 -2001-09-01_06:00:00;14.9;70000 -2001-09-01_07:00:00;14.8;70000 -2001-09-01_08:00:00;14.8;70000 -2001-09-01_09:00:00;14.6;70000 -2001-09-01_10:00:00;14.7;70000 -2001-09-01_11:00:00;14.9;70000 -2001-09-01_12:00:00;14.8;70000 -2001-09-01_13:00:00;14.4;70000 -2001-09-01_14:00:00;14.1;70000 -2001-09-01_15:00:00;14.1;70000 -2001-09-01_16:00:00;14.3;70000 -2001-09-01_17:00:00;14.3;70000 -2001-09-01_18:00:00;14.2;70000 -2001-09-01_19:00:00;14.1;70000 -2001-09-01_20:00:00;13.9;70000 -2001-09-01_21:00:00;13.9;70000 -2001-09-01_22:00:00;13.8;70000 -2001-09-01_23:00:00;13.6;70000 -2001-09-02_00:00:00;13.6;70000 -2001-09-02_01:00:00;13.3;70000 -2001-09-02_02:00:00;13.2;70000 -2001-09-02_03:00:00;13.2;70000 -2001-09-02_04:00:00;13.1;70000 -2001-09-02_05:00:00;13.1;70000 -2001-09-02_06:00:00;13.2;70000 -2001-09-02_07:00:00;13;70000 -2001-09-02_08:00:00;13.1;70000 -2001-09-02_09:00:00;13.4;70000 -2001-09-02_10:00:00;13.6;70000 -2001-09-02_11:00:00;13.7;70000 -2001-09-02_12:00:00;13.8;70000 -2001-09-02_13:00:00;13.9;70000 -2001-09-02_14:00:00;13.5;70000 -2001-09-02_15:00:00;13.6;70000 -2001-09-02_16:00:00;14;70000 -2001-09-02_17:00:00;13.6;70000 -2001-09-02_18:00:00;13.6;70000 -2001-09-02_19:00:00;13.9;70000 -2001-09-02_20:00:00;14.3;70000 -2001-09-02_21:00:00;14.6;70000 -2001-09-02_22:00:00;14.6;70000 -2001-09-02_23:00:00;14.9;70000 -2001-09-03_00:00:00;14.5;70000 -2001-09-03_01:00:00;14.6;70000 -2001-09-03_02:00:00;14.8;70000 -2001-09-03_03:00:00;14.7;70000 -2001-09-03_04:00:00;14.8;70000 -2001-09-03_05:00:00;14.9;70000 -2001-09-03_06:00:00;15.1;70000 -2001-09-03_07:00:00;15.1;70000 -2001-09-03_08:00:00;15.2;70000 -2001-09-03_09:00:00;15.5;70000 -2001-09-03_10:00:00;15.8;70000 -2001-09-03_11:00:00;15.8;70000 -2001-09-03_12:00:00;15.9;70000 -2001-09-03_13:00:00;16.2;70000 -2001-09-03_14:00:00;16.2;70000 -2001-09-03_15:00:00;16.2;70000 -2001-09-03_16:00:00;16.1;70000 -2001-09-03_17:00:00;16.1;70000 -2001-09-03_18:00:00;16.2;70000 -2001-09-03_19:00:00;16.3;70000 -2001-09-03_20:00:00;16.4;70000 -2001-09-03_21:00:00;16.4;70000 -2001-09-03_22:00:00;16.1;70000 -2001-09-03_23:00:00;16.2;70000 -2001-09-04_00:00:00;15.9;70000 -2001-09-04_01:00:00;15.6;70000 -2001-09-04_02:00:00;15.3;70000 -2001-09-04_03:00:00;15.3;70000 -2001-09-04_04:00:00;15.6;70000 -2001-09-04_05:00:00;15.6;70000 -2001-09-04_06:00:00;15.4;70000 -2001-09-04_07:00:00;15.4;70000 -2001-09-04_08:00:00;15.3;70000 -2001-09-04_09:00:00;15.4;70000 -2001-09-04_10:00:00;15.5;70000 -2001-09-04_11:00:00;15.8;70000 -2001-09-04_12:00:00;16;70000 -2001-09-04_13:00:00;15.3;70000 -2001-09-04_14:00:00;15.3;70000 -2001-09-04_15:00:00;15.3;70000 -2001-09-04_16:00:00;15.3;70000 -2001-09-04_17:00:00;15.3;70000 -2001-09-04_18:00:00;15.3;70203 -2001-09-04_19:00:00;15.3;70203 -2001-09-04_20:00:00;15;70000 -2001-09-04_21:00:00;15.4;70000 -2001-09-04_22:00:00;15.4;70000 -2001-09-04_23:00:00;15.6;70000 -2001-09-05_00:00:00;15.6;70000 -2001-09-05_01:00:00;15.4;70000 -2001-09-05_02:00:00;15.7;70000 -2001-09-05_03:00:00;15.8;70000 -2001-09-05_04:00:00;15.9;70000 -2001-09-05_05:00:00;15.9;70000 -2001-09-05_06:00:00;16;70000 -2001-09-05_07:00:00;16;70000 -2001-09-05_08:00:00;16.1;70000 -2001-09-05_09:00:00;16.2;70000 -2001-09-05_10:00:00;15.9;70000 -2001-09-05_11:00:00;15.9;70000 -2001-09-05_12:00:00;15.7;70000 -2001-09-05_13:00:00;15.5;70000 -2001-09-05_14:00:00;15;70000 -2001-09-05_15:00:00;14.7;70000 -2001-09-05_16:00:00;14.9;70000 -2001-09-05_17:00:00;14.8;70000 -2001-09-05_18:00:00;14.6;70000 -2001-09-05_19:00:00;14.5;70000 -2001-09-05_20:00:00;14.6;70000 -2001-09-05_21:00:00;14.8;70000 -2001-09-05_22:00:00;14.3;70000 -2001-09-05_23:00:00;14.2;70000 -2001-09-06_00:00:00;14.2;70000 -2001-09-06_01:00:00;14.2;70000 -2001-09-06_02:00:00;14.1;70000 -2001-09-06_03:00:00;14.1;70000 -2001-09-06_04:00:00;14.1;70000 -2001-09-06_05:00:00;14;70000 -2001-09-06_06:00:00;14.1;70000 -2001-09-06_07:00:00;14.3;70000 -2001-09-06_08:00:00;14.6;70000 -2001-09-06_09:00:00;14.9;70000 -2001-09-06_10:00:00;13.8;70000 -2001-09-06_11:00:00;14.3;70000 -2001-09-06_12:00:00;14.8;70000 -2001-09-06_13:00:00;14.9;70000 -2001-09-06_14:00:00;14.9;70000 -2001-09-06_15:00:00;14.9;70000 -2001-09-06_16:00:00;14.9;70000 -2001-09-06_17:00:00;14.8;70000 -2001-09-06_18:00:00;14.7;70000 -2001-09-06_19:00:00;14.6;70000 -2001-09-06_20:00:00;14.6;70000 -2001-09-06_21:00:00;14.5;70000 -2001-09-06_22:00:00;14.7;70000 -2001-09-06_23:00:00;14.7;70000 -2001-09-07_00:00:00;14.6;70000 -2001-09-07_01:00:00;14.6;70000 -2001-09-07_02:00:00;14.5;70000 -2001-09-07_03:00:00;14.3;70000 -2001-09-07_04:00:00;14.6;70000 -2001-09-07_05:00:00;14;70000 -2001-09-07_06:00:00;13.6;70000 -2001-09-07_07:00:00;13.7;70000 -2001-09-07_08:00:00;12.9;70000 -2001-09-07_09:00:00;13.6;70000 -2001-09-07_10:00:00;13.4;70000 -2001-09-07_11:00:00;13.2;70000 -2001-09-07_12:00:00;12.3;70000 -2001-09-07_13:00:00;13.6;70000 -2001-09-07_14:00:00;13.6;70000 -2001-09-07_15:00:00;13.3;70000 -2001-09-07_16:00:00;13.5;70000 -2001-09-07_17:00:00;13.6;70000 -2001-09-07_18:00:00;13.5;70000 -2001-09-07_19:00:00;13.4;70000 -2001-09-07_20:00:00;13.5;70000 -2001-09-07_21:00:00;13.1;70000 -2001-09-07_22:00:00;12.5;70000 -2001-09-07_23:00:00;12.5;70000 -2001-09-08_00:00:00;12.5;70000 -2001-09-08_01:00:00;12.8;70000 -2001-09-08_02:00:00;11.8;70000 -2001-09-08_03:00:00;12.5;70000 -2001-09-08_04:00:00;13.1;70000 -2001-09-08_05:00:00;13.2;70000 -2001-09-08_06:00:00;12.6;70000 -2001-09-08_07:00:00;12.9;70000 -2001-09-08_08:00:00;13.5;70000 -2001-09-08_09:00:00;13.5;70000 -2001-09-08_10:00:00;13.4;70000 -2001-09-08_11:00:00;13.6;70000 -2001-09-08_12:00:00;12.5;70000 -2001-09-08_13:00:00;13.2;70000 -2001-09-08_14:00:00;13.5;70000 -2001-09-08_15:00:00;13.6;70000 -2001-09-08_16:00:00;13.6;70000 -2001-09-08_17:00:00;13.6;70000 -2001-09-08_18:00:00;13.6;70000 -2001-09-08_19:00:00;13.6;70000 -2001-09-08_20:00:00;13.6;70203 -2001-09-08_21:00:00;13.7;70000 -2001-09-08_22:00:00;13.6;70000 -2001-09-08_23:00:00;13.6;70000 -2001-09-09_00:00:00;13.8;70000 -2001-09-09_01:00:00;13.6;70000 -2001-09-09_02:00:00;13.5;70000 -2001-09-09_03:00:00;13.6;70000 -2001-09-09_04:00:00;13.5;70000 -2001-09-09_05:00:00;12.3;70000 -2001-09-09_06:00:00;13.3;70000 -2001-09-09_07:00:00;13.2;70000 -2001-09-09_08:00:00;13.2;70000 -2001-09-09_09:00:00;13.3;70000 -2001-09-09_10:00:00;13.4;58927 -2001-09-09_11:00:00;13.5;70000 -2001-09-09_12:00:00;13.6;70000 -2001-09-09_13:00:00;13.6;70000 -2001-09-09_14:00:00;13.6;70000 -2001-09-09_15:00:00;13.6;70000 -2001-09-09_16:00:00;13.7;70000 -2001-09-09_17:00:00;13.8;70000 -2001-09-09_18:00:00;12.1;70000 -2001-09-09_19:00:00;13.3;70000 -2001-09-09_20:00:00;13.1;70000 -2001-09-09_21:00:00;12.9;70000 -2001-09-09_22:00:00;11.8;70000 -2001-09-09_23:00:00;11.6;70000 -2001-09-10_00:00:00;11.8;70000 -2001-09-10_01:00:00;11.5;70000 -2001-09-10_02:00:00;11.6;70000 -2001-09-10_03:00:00;11.9;70000 -2001-09-10_04:00:00;12.4;70000 -2001-09-10_05:00:00;12.5;70000 -2001-09-10_06:00:00;12.9;70000 -2001-09-10_07:00:00;12.8;70000 -2001-09-10_08:00:00;13;70000 -2001-09-10_09:00:00;14.6;70000 -2001-09-10_10:00:00;14.8;70000 -2001-09-10_11:00:00;15;70000 -2001-09-10_12:00:00;15.1;70000 -2001-09-10_13:00:00;15.3;70000 -2001-09-10_14:00:00;15.3;70000 -2001-09-10_15:00:00;15.2;70000 -2001-09-10_16:00:00;15.6;70000 -2001-09-10_17:00:00;15.4;70000 -2001-09-10_18:00:00;15.4;70000 -2001-09-10_19:00:00;15;70000 -2001-09-10_20:00:00;15.6;70000 -2001-09-10_21:00:00;16.4;70000 -2001-09-10_22:00:00;16.6;70000 -2001-09-10_23:00:00;16.4;70000 -2001-09-11_00:00:00;16.3;70000 -2001-09-11_01:00:00;16.1;70000 -2001-09-11_02:00:00;15.9;70000 -2001-09-11_03:00:00;15.9;70000 -2001-09-11_04:00:00;15.9;70000 -2001-09-11_05:00:00;15.6;70000 -2001-09-11_06:00:00;15.6;70000 -2001-09-11_07:00:00;15.4;70000 -2001-09-11_08:00:00;15.7;70000 -2001-09-11_09:00:00;15.7;70000 -2001-09-11_10:00:00;15.6;70000 -2001-09-11_11:00:00;15.3;70000 -2001-09-11_12:00:00;15.2;70000 -2001-09-11_13:00:00;15.2;70000 -2001-09-11_14:00:00;15.2;70000 -2001-09-11_15:00:00;15.3;70000 -2001-09-11_16:00:00;15.4;70000 -2001-09-11_17:00:00;15.5;70000 -2001-09-11_18:00:00;15.4;70000 -2001-09-11_19:00:00;15.1;70000 -2001-09-11_20:00:00;15;70000 -2001-09-11_21:00:00;14.9;70000 -2001-09-11_22:00:00;14.9;70000 -2001-09-11_23:00:00;15;70000 -2001-09-12_00:00:00;15.1;70000 -2001-09-12_01:00:00;14.9;70000 -2001-09-12_02:00:00;14.3;70000 -2001-09-12_03:00:00;13.6;70000 -2001-09-12_04:00:00;14.2;70000 -2001-09-12_05:00:00;14.3;70000 -2001-09-12_06:00:00;15;70000 -2001-09-12_07:00:00;14.9;70000 -2001-09-12_08:00:00;15.1;70000 -2001-09-12_09:00:00;14.9;70000 -2001-09-12_10:00:00;14.8;70000 -2001-09-12_11:00:00;14.8;70000 -2001-09-12_12:00:00;13.6;70000 -2001-09-12_13:00:00;13.3;70000 -2001-09-12_14:00:00;13.6;70000 -2001-09-12_15:00:00;14.1;70000 -2001-09-12_16:00:00;15;70000 -2001-09-12_17:00:00;15.2;70000 -2001-09-12_18:00:00;14.2;70000 -2001-09-12_19:00:00;14.2;70000 -2001-09-12_20:00:00;14.1;70000 -2001-09-12_21:00:00;14.2;70000 -2001-09-12_22:00:00;14.2;70000 -2001-09-12_23:00:00;14.2;70000 -2001-09-13_00:00:00;14.3;70000 -2001-09-13_01:00:00;14.4;70000 -2001-09-13_02:00:00;14.2;70000 -2001-09-13_03:00:00;14.3;70000 -2001-09-13_04:00:00;14.2;70000 -2001-09-13_05:00:00;13.9;70000 -2001-09-13_06:00:00;13.9;70000 -2001-09-13_07:00:00;13.9;70000 -2001-09-13_08:00:00;14.2;70000 -2001-09-13_09:00:00;14.2;70000 -2001-09-13_10:00:00;14.1;70000 -2001-09-13_11:00:00;13.8;70000 -2001-09-13_12:00:00;13.5;70000 -2001-09-13_13:00:00;14.2;70000 -2001-09-13_14:00:00;14;70000 -2001-09-13_15:00:00;14.1;70000 -2001-09-13_16:00:00;13.6;70000 -2001-09-13_17:00:00;13.9;70000 -2001-09-13_18:00:00;13.5;70000 -2001-09-13_19:00:00;13.9;70000 -2001-09-13_20:00:00;14.1;70000 -2001-09-13_21:00:00;14.1;70000 -2001-09-13_22:00:00;13.9;70000 -2001-09-13_23:00:00;13.9;70000 -2001-09-14_00:00:00;13.9;70000 -2001-09-14_01:00:00;14;70000 -2001-09-14_02:00:00;14;70000 -2001-09-14_03:00:00;13.9;70000 -2001-09-14_04:00:00;14.1;70000 -2001-09-14_05:00:00;14.1;70000 -2001-09-14_06:00:00;14.1;70000 -2001-09-14_07:00:00;13.9;70000 -2001-09-14_08:00:00;13.8;70000 -2001-09-14_09:00:00;13.3;70000 -2001-09-14_10:00:00;13.2;70000 -2001-09-14_11:00:00;13.2;70000 -2001-09-14_12:00:00;12.9;70000 -2001-09-14_13:00:00;12.9;70000 -2001-09-14_14:00:00;12.9;70000 -2001-09-14_15:00:00;12.9;70000 -2001-09-14_16:00:00;12.7;70000 -2001-09-14_17:00:00;12.8;70000 -2001-09-14_18:00:00;12.5;70000 -2001-09-14_19:00:00;12.6;70000 -2001-09-14_20:00:00;12.6;70000 -2001-09-14_21:00:00;12;70000 -2001-09-14_22:00:00;11.8;70000 -2001-09-14_23:00:00;12.9;70000 -2001-09-15_00:00:00;13.3;70000 -2001-09-15_01:00:00;12.8;70000 -2001-09-15_02:00:00;11.9;70000 -2001-09-15_03:00:00;12.6;70000 -2001-09-15_04:00:00;13.2;70000 -2001-09-15_05:00:00;13.2;70000 -2001-09-15_06:00:00;12.9;70000 -2001-09-15_07:00:00;12.8;70000 -2001-09-15_08:00:00;12.5;70000 -2001-09-15_09:00:00;11.4;70000 -2001-09-15_10:00:00;12.7;70000 -2001-09-15_11:00:00;11.1;70000 -2001-09-15_12:00:00;12.2;70000 -2001-09-15_13:00:00;12.1;70000 -2001-09-15_14:00:00;12.1;70000 -2001-09-15_15:00:00;12.2;70000 -2001-09-15_16:00:00;12.2;70000 -2001-09-15_17:00:00;12.2;70000 -2001-09-15_18:00:00;12.2;70000 -2001-09-15_19:00:00;12.2;70000 -2001-09-15_20:00:00;12.1;70000 -2001-09-15_21:00:00;12.1;70000 -2001-09-15_22:00:00;12.1;70000 -2001-09-15_23:00:00;12.2;70000 -2001-09-16_00:00:00;11.2;70000 -2001-09-16_01:00:00;11.6;70000 -2001-09-16_02:00:00;11.1;70000 -2001-09-16_03:00:00;11.5;70000 -2001-09-16_04:00:00;11.3;70000 -2001-09-16_05:00:00;11.5;70000 -2001-09-16_06:00:00;11.2;70000 -2001-09-16_07:00:00;11.5;70000 -2001-09-16_08:00:00;11.2;70000 -2001-09-16_09:00:00;11.4;70000 -2001-09-16_10:00:00;11.3;70000 -2001-09-16_11:00:00;11.5;70000 -2001-09-16_12:00:00;11.7;70000 -2001-09-16_13:00:00;11.8;70000 -2001-09-16_14:00:00;11.8;70000 -2001-09-16_15:00:00;11.9;70000 -2001-09-16_16:00:00;12.1;70000 -2001-09-16_17:00:00;12.1;70000 -2001-09-16_18:00:00;11.5;70000 -2001-09-16_19:00:00;11.5;70000 -2001-09-16_20:00:00;11.7;70000 -2001-09-16_21:00:00;11.8;70000 -2001-09-16_22:00:00;11.7;70000 -2001-09-16_23:00:00;11.9;70000 -2001-09-17_00:00:00;12;70000 -2001-09-17_01:00:00;11.6;70000 -2001-09-17_02:00:00;11.8;70000 -2001-09-17_03:00:00;11.9;70000 -2001-09-17_04:00:00;12;70000 -2001-09-17_05:00:00;12;70000 -2001-09-17_06:00:00;12.3;70000 -2001-09-17_07:00:00;12.5;70000 -2001-09-17_08:00:00;12.4;70000 -2001-09-17_09:00:00;12.6;70000 -2001-09-17_10:00:00;13.2;70000 -2001-09-17_11:00:00;13.1;70000 -2001-09-17_12:00:00;13.3;70000 -2001-09-17_13:00:00;12.8;70000 -2001-09-17_14:00:00;12.6;70000 -2001-09-17_15:00:00;12.9;70000 -2001-09-17_16:00:00;12.9;70000 -2001-09-17_17:00:00;14;70000 -2001-09-17_18:00:00;14.1;70000 -2001-09-17_19:00:00;14.1;70000 -2001-09-17_20:00:00;14.2;70000 -2001-09-17_21:00:00;14.2;70000 -2001-09-17_22:00:00;14.5;70000 -2001-09-17_23:00:00;14.2;70000 -2001-09-18_00:00:00;14.3;70000 -2001-09-18_01:00:00;14.2;70000 -2001-09-18_02:00:00;14.5;70000 -2001-09-18_03:00:00;14.3;70000 -2001-09-18_04:00:00;12.8;70000 -2001-09-18_05:00:00;12.2;70000 -2001-09-18_06:00:00;12.7;70000 -2001-09-18_07:00:00;12.8;70000 -2001-09-18_08:00:00;12.8;70000 -2001-09-18_09:00:00;13.2;70000 -2001-09-18_10:00:00;13.7;70000 -2001-09-18_11:00:00;14.3;70000 -2001-09-18_12:00:00;14.3;70000 -2001-09-18_13:00:00;14.5;70000 -2001-09-18_14:00:00;14.4;70000 -2001-09-18_15:00:00;14.5;70000 -2001-09-18_16:00:00;14.5;70000 -2001-09-18_17:00:00;14.6;70000 -2001-09-18_18:00:00;14.5;70000 -2001-09-18_19:00:00;14.2;70000 -2001-09-18_20:00:00;13.3;70000 -2001-09-18_21:00:00;13.1;70000 -2001-09-18_22:00:00;12.8;70000 -2001-09-18_23:00:00;12.7;70000 -2001-09-19_00:00:00;12.6;70000 -2001-09-19_01:00:00;12.4;70000 -2001-09-19_02:00:00;12.1;70000 -2001-09-19_03:00:00;11.9;70000 -2001-09-19_04:00:00;12.1;70000 -2001-09-19_05:00:00;11;78947 -2001-09-19_06:00:00;11.6;78947 -2001-09-19_07:00:00;11.7;78947 -2001-09-19_09:00:00;13.1;78947 -2001-09-19_10:00:00;13.5;78947 -2001-09-19_11:00:00;13.7;78947 -2001-09-19_12:00:00;12.2;78947 -2001-09-19_13:00:00;12.5;78947 -2001-09-19_14:00:00;12.2;78947 -2001-09-19_15:00:00;11.9;78947 -2001-09-19_16:00:00;11.8;78947 -2001-09-19_17:00:00;11.8;78947 -2001-09-19_18:00:00;11.6;78947 -2001-09-19_19:00:00;11.7;78947 -2001-09-19_20:00:00;11.8;78947 -2001-09-19_21:00:00;11.7;78947 -2001-09-19_22:00:00;11.7;78947 -2001-09-19_23:00:00;11.5;78947 -2001-09-20_00:00:00;11.4;78947 -2001-09-20_01:00:00;11.7;78947 -2001-09-20_02:00:00;12.2;78947 -2001-09-20_03:00:00;12.7;78947 -2001-09-20_04:00:00;12.7;78947 -2001-09-20_06:00:00;12.4;78947 -2001-09-20_07:00:00;12.5;78947 -2001-09-20_08:00:00;12.9;78947 -2001-09-20_09:00:00;13.2;78947 -2001-09-20_10:00:00;13.2;78947 -2001-09-20_11:00:00;13.4;78947 -2001-09-20_12:00:00;13.9;78947 -2001-09-20_13:00:00;14.4;78947 -2001-09-20_14:00:00;14.4;78947 -2001-09-20_15:00:00;14;78947 -2001-09-20_16:00:00;13.3;78947 -2001-09-20_17:00:00;12.7;78947 -2001-09-20_18:00:00;11.7;78947 -2001-09-20_19:00:00;11.6;78947 -2001-09-20_20:00:00;11.3;78947 -2001-09-20_21:00:00;11.3;78947 -2001-09-20_22:00:00;11.2;78947 -2001-09-20_23:00:00;11.3;78947 -2001-09-21_00:00:00;10.7;78947 -2001-09-21_01:00:00;10.8;78947 -2001-09-21_02:00:00;10.8;78947 -2001-09-21_03:00:00;11.3;78947 -2001-09-21_04:00:00;11.4;78947 -2001-09-21_05:00:00;11.3;78947 -2001-09-21_06:00:00;11.5;78947 -2001-09-21_07:00:00;11.9;78947 -2001-09-21_09:00:00;13.2;78947 -2001-09-21_10:00:00;13.3;78947 -2001-09-21_11:00:00;12.9;78947 -2001-09-21_12:00:00;13.3;78947 -2001-09-21_13:00:00;13.6;78947 -2001-09-21_14:00:00;12.7;78947 -2001-09-21_15:00:00;11.7;78947 -2001-09-21_16:00:00;12;78947 -2001-09-21_17:00:00;11.7;78947 -2001-09-21_18:00:00;11.1;78947 -2001-09-21_19:00:00;11.3;78947 -2001-09-21_20:00:00;11.4;78947 -2001-09-21_21:00:00;11.4;78947 -2001-09-21_22:00:00;10.8;78947 -2001-09-21_23:00:00;10.9;78947 -2001-09-22_00:00:00;10.9;78947 -2001-09-22_01:00:00;11.2;78947 -2001-09-22_02:00:00;11.2;78947 -2001-09-22_03:00:00;11.1;78947 -2001-09-22_04:00:00;11.2;78947 -2001-09-22_05:00:00;11.1;78947 -2001-09-22_06:00:00;10.6;78947 -2001-09-22_07:00:00;10.7;78947 -2001-09-22_09:00:00;11.3;78947 -2001-09-22_10:00:00;12.3;78947 -2001-09-22_11:00:00;12.9;78947 -2001-09-22_12:00:00;11.7;78947 -2001-09-22_13:00:00;11.8;78947 -2001-09-22_14:00:00;11.6;78947 -2001-09-22_15:00:00;11.7;78947 -2001-09-22_16:00:00;11.6;78947 -2001-09-22_17:00:00;11.6;78947 -2001-09-22_18:00:00;11.2;78947 -2001-09-22_19:00:00;11.1;78947 -2001-09-22_20:00:00;11;78947 -2001-09-22_21:00:00;10.8;78947 -2001-09-22_22:00:00;10.7;78947 -2001-09-22_23:00:00;10.8;78947 -2001-09-23_00:00:00;10.7;78947 -2001-09-23_01:00:00;10.8;78947 -2001-09-23_02:00:00;10.9;78947 -2001-09-23_03:00:00;10.8;78947 -2001-09-23_04:00:00;10.7;78947 -2001-09-23_05:00:00;10.9;78947 -2001-09-23_06:00:00;10.8;78947 -2001-09-23_07:00:00;11;78947 -2001-09-23_08:00:00;11.1;78947 -2001-09-23_09:00:00;11.7;78947 -2001-09-23_10:00:00;12.2;78947 -2001-09-23_11:00:00;12.6;78947 -2001-09-23_12:00:00;11.6;78947 -2001-09-23_13:00:00;12.6;78947 -2001-09-23_14:00:00;12.8;78947 -2001-09-23_15:00:00;12.7;78947 -2001-09-23_16:00:00;12.1;78947 -2001-09-23_17:00:00;11.6;78947 -2001-09-23_18:00:00;10.5;78947 -2001-09-23_19:00:00;10.6;78947 -2001-09-23_20:00:00;11.2;78947 -2001-09-23_21:00:00;11.6;78947 -2001-09-23_22:00:00;11.8;78947 -2001-09-23_23:00:00;12;78947 -2001-09-24_00:00:00;11.8;78947 -2001-09-24_01:00:00;12.1;78947 -2001-09-24_02:00:00;12.2;78947 -2001-09-24_03:00:00;12.4;78947 -2001-09-24_04:00:00;12.5;78947 -2001-09-24_05:00:00;12.7;78947 -2001-09-24_06:00:00;12.5;78947 -2001-09-24_07:00:00;12.8;78947 -2001-09-24_08:00:00;13.4;78947 -2001-09-24_09:00:00;13.6;78947 -2001-09-24_10:00:00;13.9;78947 -2001-09-24_11:00:00;15;78947 -2001-09-24_12:00:00;14.7;78947 -2001-09-24_13:00:00;14.6;78947 -2001-09-24_14:00:00;14.2;78947 -2001-09-24_15:00:00;13.8;78947 -2001-09-24_16:00:00;13.7;78947 -2001-09-24_17:00:00;13.5;78947 -2001-09-24_18:00:00;13.3;78947 -2001-09-24_19:00:00;13.1;78947 -2001-09-24_20:00:00;13;78947 -2001-09-24_21:00:00;12.9;78947 -2001-09-24_22:00:00;12.9;78947 -2001-09-24_23:00:00;12.8;78947 -2001-09-25_00:00:00;12.4;78947 -2001-09-25_01:00:00;12.4;78947 -2001-09-25_02:00:00;12.4;78947 -2001-09-25_03:00:00;12.4;78947 -2001-09-25_04:00:00;12.4;78947 -2001-09-25_05:00:00;12.5;78947 -2001-09-25_06:00:00;13.1;78947 -2001-09-25_07:00:00;13.3;78947 -2001-09-25_08:00:00;13.7;78947 -2001-09-25_09:00:00;13.8;78947 -2001-09-25_10:00:00;13.9;78947 -2001-09-25_12:00:00;15.2;78947 -2001-09-25_13:00:00;15.5;78947 -2001-09-25_14:00:00;15.8;78947 -2001-09-25_15:00:00;15.2;78947 -2001-09-25_16:00:00;14.5;78947 -2001-09-25_17:00:00;14;78947 -2001-09-25_18:00:00;14.9;78947 -2001-09-25_19:00:00;14.7;78947 -2001-09-25_20:00:00;14.3;78947 -2001-09-25_21:00:00;13.3;78947 -2001-09-25_22:00:00;13.3;78947 -2001-09-25_23:00:00;13.5;78947 -2001-09-26_00:00:00;13.1;78947 -2001-09-26_01:00:00;13.2;78947 -2001-09-26_02:00:00;13.3;78947 -2001-09-26_03:00:00;13.3;78947 -2001-09-26_04:00:00;13.4;78947 -2001-09-26_05:00:00;13.5;78947 -2001-09-26_06:00:00;13.7;78947 -2001-09-26_07:00:00;13.4;78947 -2001-09-26_08:00:00;13.2;78947 -2001-09-26_09:00:00;12.9;78947 -2001-09-26_10:00:00;12.5;78947 -2001-09-26_11:00:00;13.4;78947 -2001-09-26_12:00:00;13.3;78947 -2001-09-26_13:00:00;13.7;78947 -2001-09-26_14:00:00;13.5;78947 -2001-09-26_15:00:00;13;78947 -2001-09-26_16:00:00;12.5;78947 -2001-09-26_17:00:00;12.4;78947 -2001-09-26_18:00:00;12;78947 -2001-09-26_19:00:00;11.7;78947 -2001-09-26_20:00:00;11.5;78947 -2001-09-26_21:00:00;11.4;78947 -2001-09-26_22:00:00;11.3;78947 -2001-09-26_23:00:00;11.1;78947 -2001-09-27_00:00:00;10.8;78947 -2001-09-27_01:00:00;10.5;78947 -2001-09-27_02:00:00;10.1;78947 -2001-09-27_03:00:00;9.8;78947 -2001-09-27_04:00:00;9.7;78947 -2001-09-27_05:00:00;10.1;78947 -2001-09-27_06:00:00;9.5;78947 -2001-09-27_07:00:00;10.9;78947 -2001-09-27_09:00:00;12.5;78947 -2001-09-27_10:00:00;13.2;78947 -2001-09-27_11:00:00;13.9;78947 -2001-09-27_12:00:00;13.8;78947 -2001-09-27_13:00:00;13.7;78947 -2001-09-27_14:00:00;13.5;78947 -2001-09-27_15:00:00;12.9;78947 -2001-09-27_16:00:00;12.6;78947 -2001-09-27_17:00:00;12.4;78947 -2001-09-27_18:00:00;12.2;78947 -2001-09-27_19:00:00;12.3;78947 -2001-09-27_20:00:00;12.5;78947 -2001-09-27_21:00:00;12.7;78947 -2001-09-27_22:00:00;12.8;78947 -2001-09-27_23:00:00;12.9;78947 -2001-09-28_00:00:00;13;78947 -2001-09-28_01:00:00;13.1;78947 -2001-09-28_02:00:00;13.2;78947 -2001-09-28_03:00:00;13.3;78947 -2001-09-28_04:00:00;13.4;78947 -2001-09-28_05:00:00;13.4;78947 -2001-09-28_06:00:00;13.3;78947 -2001-09-28_07:00:00;13.3;78947 -2001-09-28_08:00:00;13.5;78947 -2001-09-28_09:00:00;14.1;78947 -2001-09-28_10:00:00;14.5;78947 -2001-09-28_11:00:00;14.2;78947 -2001-09-28_12:00:00;15.2;78947 -2001-09-28_13:00:00;14.6;78947 -2001-09-28_14:00:00;14.2;78947 -2001-09-28_15:00:00;14;78947 -2001-09-28_16:00:00;14.1;78947 -2001-09-28_17:00:00;14.1;78947 -2001-09-28_18:00:00;14.1;78947 -2001-09-28_19:00:00;14.1;78947 -2001-09-28_20:00:00;14;78947 -2001-09-28_21:00:00;13.9;78947 -2001-09-28_22:00:00;13.9;78947 -2001-09-28_23:00:00;13.8;78947 -2001-09-29_00:00:00;14.1;78947 -2001-09-29_01:00:00;14.1;78947 -2001-09-29_02:00:00;14;78947 -2001-09-29_03:00:00;13.7;78947 -2001-09-29_04:00:00;13.3;78947 -2001-09-29_05:00:00;13;78947 -2001-09-29_06:00:00;12.9;78947 -2001-09-29_07:00:00;13.1;78947 -2001-09-29_09:00:00;14.1;78947 -2001-09-29_10:00:00;14.7;78947 -2001-09-29_11:00:00;15.3;78947 -2001-09-29_12:00:00;14.1;78947 -2001-09-29_13:00:00;14.2;78947 -2001-09-29_14:00:00;14.6;78947 -2001-09-29_15:00:00;14.7;78947 -2001-09-29_16:00:00;14.5;78947 -2001-09-29_17:00:00;14.3;78947 -2001-09-29_18:00:00;14.1;78947 -2001-09-29_19:00:00;14.1;78947 -2001-09-29_20:00:00;14.1;78947 -2001-09-29_21:00:00;14.1;78947 -2001-09-29_22:00:00;14;78947 -2001-09-29_23:00:00;13.5;78947 -2001-09-30_00:00:00;13.2;78947 -2001-09-30_01:00:00;13;78947 -2001-09-30_02:00:00;12.9;78947 -2001-09-30_03:00:00;12.8;78947 -2001-09-30_04:00:00;12.7;78947 -2001-09-30_05:00:00;12.7;78947 -2001-09-30_06:00:00;12.9;78947 -2001-09-30_07:00:00;13;78947 -2001-09-30_09:00:00;13.9;78947 -2001-09-30_10:00:00;14.7;78947 -2001-09-30_11:00:00;15.4;78947 -2001-09-30_12:00:00;16.7;78947 -2001-09-30_13:00:00;16.7;78947 -2001-09-30_14:00:00;16.6;78947 -2001-09-30_15:00:00;16.4;78947 -2001-09-30_16:00:00;16.1;78947 -2001-09-30_17:00:00;15.3;78947 -2001-09-30_18:00:00;14.9;78947 -2001-09-30_19:00:00;14.8;78947 -2001-09-30_20:00:00;14.9;78947 -2001-09-30_21:00:00;14.8;78947 -2001-09-30_22:00:00;14.7;78947 -2001-09-30_23:00:00;14.7;78947 -2001-10-01_00:00:00;14.1;78947 -2001-10-01_01:00:00;14.2;78947 -2001-10-01_02:00:00;14.2;78947 -2001-10-01_03:00:00;14.1;78947 -2001-10-01_04:00:00;13.8;78947 -2001-10-01_05:00:00;13.6;78947 -2001-10-01_06:00:00;13.4;78947 -2001-10-01_07:00:00;13.8;78947 -2001-10-01_08:00:00;14.6;78947 -2001-10-01_09:00:00;15.5;78947 -2001-10-01_10:00:00;16.2;78947 -2001-10-01_11:00:00;16.7;78947 -2001-10-01_12:00:00;17.7;78947 -2001-10-01_13:00:00;18;78947 -2001-10-01_14:00:00;17.8;78947 -2001-10-01_15:00:00;17.2;78947 -2001-10-01_16:00:00;16.2;78947 -2001-10-01_17:00:00;15.3;78947 -2001-10-01_18:00:00;14.9;78947 -2001-10-01_19:00:00;14.8;78947 -2001-10-01_20:00:00;14.5;78947 -2001-10-01_21:00:00;14.2;78947 -2001-10-01_22:00:00;14.1;78947 -2001-10-01_23:00:00;14;78947 -2001-10-02_00:00:00;14.1;78947 -2001-10-02_01:00:00;13.9;78947 -2001-10-02_02:00:00;13.6;78947 -2001-10-02_03:00:00;13.6;78947 -2001-10-02_04:00:00;13.5;78947 -2001-10-02_05:00:00;13.5;78947 -2001-10-02_06:00:00;13.8;78947 -2001-10-02_07:00:00;13.9;78947 -2001-10-02_08:00:00;14;78947 -2001-10-02_09:00:00;14.4;78947 -2001-10-02_10:00:00;14.4;78947 -2001-10-02_11:00:00;14.1;78947 -2001-10-02_12:00:00;15;78947 -2001-10-02_13:00:00;14.7;78947 -2001-10-02_14:00:00;13.9;78947 -2001-10-02_15:00:00;13.4;78947 -2001-10-02_16:00:00;13;78947 -2001-10-02_17:00:00;12.7;78947 -2001-10-02_18:00:00;12.3;78947 -2001-10-02_19:00:00;12;78947 -2001-10-02_20:00:00;11.3;78947 -2001-10-02_21:00:00;10.6;78947 -2001-10-02_22:00:00;11.4;78947 -2001-10-02_23:00:00;11.1;78947 -2001-10-03_00:00:00;10.9;78947 -2001-10-03_01:00:00;11.3;78947 -2001-10-03_02:00:00;11.6;78947 -2001-10-03_03:00:00;11.8;78947 -2001-10-03_04:00:00;11.3;78947 -2001-10-03_05:00:00;11.7;78947 -2001-10-03_06:00:00;12.2;78947 -2001-10-03_07:00:00;12.8;78947 -2001-10-03_08:00:00;13.3;78947 -2001-10-03_09:00:00;13.1;78947 -2001-10-03_10:00:00;13.4;78947 -2001-10-03_11:00:00;13.7;78947 -2001-10-03_12:00:00;13.8;78947 -2001-10-03_13:00:00;13.9;78947 -2001-10-03_14:00:00;13.6;78947 -2001-10-03_15:00:00;13.9;78947 -2001-10-03_16:00:00;14.2;78947 -2001-10-03_17:00:00;14.3;78947 -2001-10-03_18:00:00;13.9;78947 -2001-10-03_19:00:00;14.1;78947 -2001-10-03_20:00:00;14.4;78947 -2001-10-03_21:00:00;12.9;78947 -2001-10-03_22:00:00;12.8;78947 -2001-10-03_23:00:00;13.1;78947 -2001-10-04_00:00:00;12.1;78947 -2001-10-04_01:00:00;12.3;78947 -2001-10-04_02:00:00;11.9;78947 -2001-10-04_03:00:00;11.5;78947 -2001-10-04_04:00:00;11.7;78947 -2001-10-04_05:00:00;12.3;78947 -2001-10-04_06:00:00;11;78947 -2001-10-04_07:00:00;11.1;78947 -2001-10-04_09:00:00;11.5;78947 -2001-10-04_10:00:00;11.6;78947 -2001-10-04_11:00:00;12;78947 -2001-10-04_12:00:00;11.6;78947 -2001-10-04_13:00:00;12.2;78947 -2001-10-04_14:00:00;13;78947 -2001-10-04_15:00:00;12;78947 -2001-10-04_16:00:00;12.4;78947 -2001-10-04_17:00:00;11.5;78947 -2001-10-04_18:00:00;11.1;78947 -2001-10-04_19:00:00;11.2;78947 -2001-10-04_20:00:00;11.8;78947 -2001-10-04_21:00:00;11.5;78947 -2001-10-04_22:00:00;11.4;78947 -2001-10-04_23:00:00;11.2;78947 -2001-10-05_00:00:00;10.5;78947 -2001-10-05_01:00:00;10.8;78947 -2001-10-05_02:00:00;10.7;78947 -2001-10-05_03:00:00;10.9;78947 -2001-10-05_04:00:00;11.1;78947 -2001-10-05_05:00:00;10.9;78947 -2001-10-05_06:00:00;10.8;78947 -2001-10-05_07:00:00;10.9;78947 -2001-10-05_09:00:00;12.2;78947 -2001-10-05_10:00:00;12.8;78947 -2001-10-05_11:00:00;12.6;78947 -2001-10-05_12:00:00;11.9;78947 -2001-10-05_13:00:00;12.3;78947 -2001-10-05_14:00:00;13;78947 -2001-10-05_15:00:00;13.4;78947 -2001-10-05_16:00:00;13.5;78947 -2001-10-05_17:00:00;13.6;78947 -2001-10-05_18:00:00;13.6;78947 -2001-10-05_19:00:00;13.7;78947 -2001-10-05_20:00:00;13.7;78947 -2001-10-05_21:00:00;13.6;78947 -2001-10-05_22:00:00;13.4;78947 -2001-10-05_23:00:00;13.3;78947 -2001-10-06_00:00:00;12.4;78947 -2001-10-06_01:00:00;12;78947 -2001-10-06_02:00:00;11.9;78947 -2001-10-06_03:00:00;11.9;78947 -2001-10-06_04:00:00;11.2;78947 -2001-10-06_05:00:00;11;78947 -2001-10-06_06:00:00;9.5;78947 -2001-10-06_07:00:00;9.7;78947 -2001-10-06_08:00:00;10.2;78947 -2001-10-06_09:00:00;10.9;78947 -2001-10-06_10:00:00;11.1;78947 -2001-10-06_11:00:00;11.3;78947 -2001-10-06_12:00:00;10.5;78947 -2001-10-06_13:00:00;9.8;78947 -2001-10-06_14:00:00;10;78947 -2001-10-06_15:00:00;10.1;78947 -2001-10-06_16:00:00;10.1;78947 -2001-10-06_17:00:00;9.9;78947 -2001-10-06_18:00:00;9.1;78947 -2001-10-06_19:00:00;8.8;78947 -2001-10-06_20:00:00;8.5;78947 -2001-10-06_21:00:00;9;78947 -2001-10-06_22:00:00;9;78947 -2001-10-06_23:00:00;9.1;78947 -2001-10-07_00:00:00;9.2;78947 -2001-10-07_01:00:00;9.6;78947 -2001-10-07_02:00:00;9.5;78947 -2001-10-07_03:00:00;9.9;78947 -2001-10-07_04:00:00;10.6;78947 -2001-10-07_05:00:00;10.1;78947 -2001-10-07_06:00:00;9.5;78947 -2001-10-07_07:00:00;9.4;78947 -2001-10-07_09:00:00;10.7;78947 -2001-10-07_10:00:00;10.7;78947 -2001-10-07_11:00:00;9.5;78947 -2001-10-07_12:00:00;9.9;78947 -2001-10-07_13:00:00;9.8;78947 -2001-10-07_14:00:00;10.5;78947 -2001-10-07_15:00:00;9.1;78947 -2001-10-07_16:00:00;9.5;78947 -2001-10-07_17:00:00;9.5;78947 -2001-10-07_18:00:00;8.1;78947 -2001-10-07_19:00:00;9.1;78947 -2001-10-07_20:00:00;9.3;78947 -2001-10-07_21:00:00;9.3;78947 -2001-10-07_22:00:00;9.2;78947 -2001-10-07_23:00:00;9.2;78947 -2001-10-08_00:00:00;7.8;78947 -2001-10-08_01:00:00;7.7;78947 -2001-10-08_02:00:00;7.9;78947 -2001-10-08_03:00:00;7.6;78947 -2001-10-08_04:00:00;7.8;78947 -2001-10-08_05:00:00;7.9;78947 -2001-10-08_06:00:00;7.1;78947 -2001-10-08_07:00:00;6.8;78947 -2001-10-08_08:00:00;7.1;78947 -2001-10-08_09:00:00;8.2;78947 -2001-10-08_10:00:00;9.3;78947 -2001-10-08_11:00:00;10;78947 -2001-10-08_12:00:00;9.8;78947 -2001-10-08_13:00:00;10.2;78947 -2001-10-08_14:00:00;10.2;78947 -2001-10-08_15:00:00;10;78947 -2001-10-08_16:00:00;9.6;78947 -2001-10-08_17:00:00;8.5;78947 -2001-10-08_18:00:00;5.9;78947 -2001-10-08_19:00:00;4.4;78947 -2001-10-08_20:00:00;3.5;78947 -2001-10-08_21:00:00;3.6;78947 -2001-10-08_22:00:00;4.6;78947 -2001-10-08_23:00:00;6.2;78947 -2001-10-09_00:00:00;8;78947 -2001-10-09_01:00:00;8.8;78947 -2001-10-09_02:00:00;8.9;78947 -2001-10-09_03:00:00;8.8;78947 -2001-10-09_04:00:00;8.9;78947 -2001-10-09_05:00:00;8.9;78947 -2001-10-09_06:00:00;9.6;78947 -2001-10-09_07:00:00;9.5;78947 -2001-10-09_08:00:00;9.1;78947 -2001-10-09_09:00:00;9.2;78947 -2001-10-09_10:00:00;9.4;78947 -2001-10-09_11:00:00;9.7;78947 -2001-10-09_12:00:00;9.7;78947 -2001-10-09_13:00:00;10.5;78947 -2001-10-09_14:00:00;11.2;78947 -2001-10-09_15:00:00;10.2;78947 -2001-10-09_16:00:00;9.9;78947 -2001-10-09_17:00:00;10.2;78947 -2001-10-09_18:00:00;10.8;78947 -2001-10-09_19:00:00;11.1;78947 -2001-10-09_20:00:00;10.9;78947 -2001-10-09_21:00:00;10.6;78947 -2001-10-09_22:00:00;10.4;78947 -2001-10-09_23:00:00;10.2;78947 -2001-10-10_00:00:00;9.8;78947 -2001-10-10_01:00:00;9.9;78947 -2001-10-10_02:00:00;9.9;78947 -2001-10-10_03:00:00;9.9;78947 -2001-10-10_04:00:00;10;78947 -2001-10-10_05:00:00;10.2;78947 -2001-10-10_06:00:00;10.2;78947 -2001-10-10_07:00:00;10.3;78947 -2001-10-10_08:00:00;10;78947 -2001-10-10_09:00:00;9.7;78947 -2001-10-10_10:00:00;10.7;78947 -2001-10-10_11:00:00;10.2;78947 -2001-10-10_12:00:00;10.5;78947 -2001-10-10_13:00:00;10.4;78947 -2001-10-10_14:00:00;10.9;78947 -2001-10-10_15:00:00;11;78947 -2001-10-10_16:00:00;10.4;78947 -2001-10-10_17:00:00;10.1;78947 -2001-10-10_18:00:00;9.5;78947 -2001-10-10_19:00:00;9.5;78947 -2001-10-10_20:00:00;9.7;78947 -2001-10-10_21:00:00;9.7;78947 -2001-10-10_22:00:00;9.8;78947 -2001-10-10_23:00:00;9.8;78947 -2001-10-11_00:00:00;9.6;78947 -2001-10-11_01:00:00;9.3;78947 -2001-10-11_02:00:00;9.7;78947 -2001-10-11_03:00:00;10;78947 -2001-10-11_04:00:00;9.8;78947 -2001-10-11_05:00:00;9.6;78947 -2001-10-11_06:00:00;9.2;78947 -2001-10-11_07:00:00;9.2;78947 -2001-10-11_08:00:00;9.4;78947 -2001-10-11_09:00:00;10.3;78947 -2001-10-11_10:00:00;9.6;78947 -2001-10-11_11:00:00;10.3;78947 -2001-10-11_12:00:00;10.9;78947 -2001-10-11_13:00:00;11;78947 -2001-10-11_14:00:00;10.7;78947 -2001-10-11_15:00:00;8.7;78947 -2001-10-11_16:00:00;8.7;78947 -2001-10-11_17:00:00;8.8;78947 -2001-10-11_18:00:00;8.2;78947 -2001-10-11_19:00:00;8.6;78947 -2001-10-11_20:00:00;8.7;78947 -2001-10-11_21:00:00;9.2;78947 -2001-10-11_22:00:00;9.3;78947 -2001-10-11_23:00:00;9.1;78947 -2001-10-12_00:00:00;9.3;78947 -2001-10-12_01:00:00;9.4;78947 -2001-10-12_02:00:00;9.6;78947 -2001-10-12_03:00:00;9.7;78947 -2001-10-12_04:00:00;9.6;78947 -2001-10-12_05:00:00;9.3;78947 -2001-10-12_06:00:00;9.1;78947 -2001-10-12_07:00:00;9.2;78947 -2001-10-12_08:00:00;9.3;78947 -2001-10-12_11:00:00;9.5;78947 -2001-10-12_12:00:00;10.6;78947 -2001-10-12_13:00:00;10.5;78947 -2001-10-12_14:00:00;11;78947 -2001-10-12_15:00:00;10.6;78947 -2001-10-12_16:00:00;10.1;78947 -2001-10-12_17:00:00;9.8;78947 -2001-10-12_18:00:00;8.7;78947 -2001-10-12_19:00:00;8.4;78947 -2001-10-12_20:00:00;8;78947 -2001-10-12_21:00:00;7.8;78947 -2001-10-12_22:00:00;7.3;78947 -2001-10-12_23:00:00;6.8;78947 -2001-10-13_00:00:00;6.4;78947 -2001-10-13_01:00:00;6.1;78947 -2001-10-13_02:00:00;5.5;78947 -2001-10-13_03:00:00;4.6;78947 -2001-10-13_04:00:00;3.5;78947 -2001-10-13_05:00:00;2.7;78947 -2001-10-13_06:00:00;2.2;78947 -2001-10-13_07:00:00;2;78947 -2001-10-13_09:00:00;6.4;78947 -2001-10-13_10:00:00;9.4;78947 -2001-10-13_11:00:00;10.5;78947 -2001-10-13_12:00:00;10.3;78947 -2001-10-13_13:00:00;10.4;78947 -2001-10-13_14:00:00;10.3;78947 -2001-10-13_15:00:00;9.9;78947 -2001-10-13_16:00:00;8.7;78947 -2001-10-13_17:00:00;6.7;78947 -2001-10-13_18:00:00;5.2;78947 -2001-10-13_19:00:00;4.6;78947 -2001-10-13_20:00:00;4.7;78947 -2001-10-13_21:00:00;4.7;78947 -2001-10-13_22:00:00;4.7;78947 -2001-10-13_23:00:00;5.4;78947 -2001-10-14_00:00:00;6.1;78947 -2001-10-14_01:00:00;6.7;78947 -2001-10-14_02:00:00;7.6;78947 -2001-10-14_03:00:00;8.1;78947 -2001-10-14_04:00:00;8.3;78947 -2001-10-14_06:00:00;9.5;78947 -2001-10-14_07:00:00;9.6;78947 -2001-10-14_09:00:00;9.9;78947 -2001-10-14_10:00:00;9.9;78947 -2001-10-14_11:00:00;10.3;78947 -2001-10-14_12:00:00;11.2;78947 -2001-10-14_13:00:00;11.1;78947 -2001-10-14_14:00:00;11.2;78947 -2001-10-14_15:00:00;11.3;78947 -2001-10-14_16:00:00;11.1;78947 -2001-10-14_17:00:00;10.4;78947 -2001-10-14_18:00:00;10.2;78947 -2001-10-14_19:00:00;10.3;78947 -2001-10-14_20:00:00;9.8;78947 -2001-10-14_21:00:00;9.6;78947 -2001-10-14_22:00:00;9.6;78947 -2001-10-14_23:00:00;9.8;78947 -2001-10-15_00:00:00;9.9;78947 -2001-10-15_01:00:00;10.3;78947 -2001-10-15_02:00:00;10.4;78947 -2001-10-15_03:00:00;10.5;78947 -2001-10-15_04:00:00;10.6;78947 -2001-10-15_05:00:00;10.8;78947 -2001-10-15_06:00:00;9.7;78947 -2001-10-15_07:00:00;9.9;78947 -2001-10-15_08:00:00;10.4;78947 -2001-10-15_09:00:00;10.6;78947 -2001-10-15_10:00:00;11;78947 -2001-10-15_11:00:00;11.1;78947 -2001-10-15_12:00:00;12.1;78947 -2001-10-15_13:00:00;11.1;78947 -2001-10-15_14:00:00;11.3;78947 -2001-10-15_15:00:00;11;78947 -2001-10-15_16:00:00;10.5;78947 -2001-10-15_17:00:00;10.3;78947 -2001-10-15_18:00:00;10.4;78947 -2001-10-15_19:00:00;10.1;78947 -2001-10-15_20:00:00;10.2;78947 -2001-10-15_21:00:00;10.1;78947 -2001-10-15_22:00:00;9.9;78947 -2001-10-15_23:00:00;10.1;78947 -2001-10-16_00:00:00;9.8;78947 -2001-10-16_01:00:00;9.8;78947 -2001-10-16_02:00:00;9.9;78947 -2001-10-16_03:00:00;9.9;78947 -2001-10-16_04:00:00;10.1;78947 -2001-10-16_05:00:00;10.3;78947 -2001-10-16_06:00:00;10.4;78947 -2001-10-16_07:00:00;10.4;78947 -2001-10-16_08:00:00;10.6;78947 -2001-10-16_09:00:00;11;78947 -2001-10-16_10:00:00;11.5;78947 -2001-10-16_11:00:00;11.7;78947 -2001-10-16_12:00:00;11.6;78947 -2001-10-16_13:00:00;11.5;78947 -2001-10-16_14:00:00;11.3;78947 -2001-10-16_15:00:00;11.1;78947 -2001-10-16_16:00:00;11.1;78947 -2001-10-16_17:00:00;11.1;78947 -2001-10-16_18:00:00;10.8;78947 -2001-10-16_19:00:00;11;78947 -2001-10-16_20:00:00;11.2;78947 -2001-10-16_21:00:00;11.3;78947 -2001-10-16_22:00:00;11.4;78947 -2001-10-16_23:00:00;11.4;78947 -2001-10-17_00:00:00;10.8;78947 -2001-10-17_01:00:00;10.5;78947 -2001-10-17_02:00:00;10.9;78947 -2001-10-17_03:00:00;11.1;78947 -2001-10-17_04:00:00;11.1;78947 -2001-10-17_05:00:00;10.7;78947 -2001-10-17_06:00:00;10.7;78947 -2001-10-17_07:00:00;10.5;78947 -2001-10-17_09:00:00;11.8;78947 -2001-10-17_10:00:00;11.5;78947 -2001-10-17_11:00:00;11.5;78947 -2001-10-17_12:00:00;11.4;78947 -2001-10-17_13:00:00;11.3;78947 -2001-10-17_14:00:00;11.1;78947 -2001-10-17_15:00:00;11;78947 -2001-10-17_16:00:00;10.8;78947 -2001-10-17_17:00:00;10.8;78947 -2001-10-17_18:00:00;10.5;78947 -2001-10-17_19:00:00;10.6;78947 -2001-10-17_20:00:00;10.7;78947 -2001-10-17_21:00:00;10.5;78947 -2001-10-17_22:00:00;10.6;78947 -2001-10-17_23:00:00;11.1;78947 -2001-10-18_00:00:00;8.5;78947 -2001-10-18_01:00:00;6.8;78947 -2001-10-18_02:00:00;7.5;78947 -2001-10-18_03:00:00;8.7;78947 -2001-10-18_04:00:00;8.6;78947 -2001-10-18_05:00:00;8.7;78947 -2001-10-18_06:00:00;8.5;78947 -2001-10-18_07:00:00;8.5;78947 -2001-10-18_09:00:00;9.5;78947 -2001-10-18_10:00:00;9.5;78947 -2001-10-18_11:00:00;9.9;78947 -2001-10-18_12:00:00;9.7;78947 -2001-10-18_13:00:00;9.1;78947 -2001-10-18_14:00:00;9.3;78947 -2001-10-18_15:00:00;9;78947 -2001-10-18_16:00:00;8.5;78947 -2001-10-18_17:00:00;8.3;78947 -2001-10-18_18:00:00;8.2;78947 -2001-10-18_19:00:00;8.3;78947 -2001-10-18_20:00:00;8.1;78947 -2001-10-18_21:00:00;7.6;78947 -2001-10-18_22:00:00;6.2;78947 -2001-10-18_23:00:00;6.7;78947 -2001-10-19_00:00:00;7.4;78947 -2001-10-19_01:00:00;7.1;78947 -2001-10-19_02:00:00;7.8;78947 -2001-10-19_03:00:00;8;78947 -2001-10-19_04:00:00;7.6;78947 -2001-10-19_05:00:00;6.3;78947 -2001-10-19_06:00:00;7.2;78947 -2001-10-19_07:00:00;8;78947 -2001-10-19_09:00:00;7.6;78947 -2001-10-19_10:00:00;7.6;78947 -2001-10-19_11:00:00;7.6;78947 -2001-10-19_12:00:00;6.9;78947 -2001-10-19_13:00:00;6.8;78947 -2001-10-19_14:00:00;6.6;78947 -2001-10-19_15:00:00;6.4;78947 -2001-10-19_16:00:00;6.3;78947 -2001-10-19_17:00:00;6.2;78947 -2001-10-19_18:00:00;6.9;78947 -2001-10-19_19:00:00;6.6;78947 -2001-10-19_20:00:00;6.4;78947 -2001-10-19_21:00:00;6;78947 -2001-10-19_22:00:00;5.9;78947 -2001-10-19_23:00:00;5.7;78947 -2001-10-20_00:00:00;6.1;78947 -2001-10-20_01:00:00;6.2;78947 -2001-10-20_02:00:00;7.1;78947 -2001-10-20_03:00:00;6.4;78947 -2001-10-20_04:00:00;6.3;78947 -2001-10-20_05:00:00;6.6;78947 -2001-10-20_06:00:00;7;78947 -2001-10-20_07:00:00;6.1;78947 -2001-10-20_09:00:00;5.6;78947 -2001-10-20_10:00:00;6.1;78947 -2001-10-20_11:00:00;6.6;78947 -2001-10-20_12:00:00;6.8;78947 -2001-10-20_13:00:00;6.4;78947 -2001-10-20_14:00:00;6.7;78947 -2001-10-20_15:00:00;6.2;78947 -2001-10-20_16:00:00;5.7;78947 -2001-10-20_17:00:00;5.2;78947 -2001-10-20_18:00:00;4;78947 -2001-10-20_19:00:00;3.1;78947 -2001-10-20_20:00:00;3.1;78947 -2001-10-20_21:00:00;3.1;78947 -2001-10-20_22:00:00;2.1;78947 -2001-10-20_23:00:00;2;78947 -2001-10-21_00:00:00;2.2;78947 -2001-10-21_01:00:00;3.7;78947 -2001-10-21_02:00:00;5.1;78947 -2001-10-21_03:00:00;6;78947 -2001-10-21_04:00:00;6.4;78947 -2001-10-21_05:00:00;6.7;78947 -2001-10-21_06:00:00;6.9;78947 -2001-10-21_07:00:00;7.3;78947 From 6e01c2842809d3609b48a2c2ca72f1351124c924 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Thu, 5 Dec 2024 11:04:04 +0100 Subject: [PATCH 58/67] Unclutter kdvh package --- .../kdvh/{import => db}/convert_functions.go | 193 +++++++----------- .../kdvh/{dump => db}/dump_functions.go | 186 +++++++++++------ migrations/kdvh/db/flag_test.go | 33 +++ migrations/kdvh/{import => db}/flags.go | 2 +- migrations/kdvh/db/main.go | 53 +++-- migrations/kdvh/db/table.go | 42 +++- migrations/kdvh/dump/dump.go | 18 +- migrations/kdvh/dump/write.go | 89 -------- migrations/kdvh/import/cache/main.go | 16 +- migrations/kdvh/import/import.go | 9 +- migrations/kdvh/import/import_test.go | 31 --- 11 files changed, 325 insertions(+), 347 deletions(-) rename migrations/kdvh/{import => db}/convert_functions.go (56%) rename migrations/kdvh/{dump => db}/dump_functions.go (50%) create mode 100644 migrations/kdvh/db/flag_test.go rename migrations/kdvh/{import => db}/flags.go (99%) delete mode 100644 migrations/kdvh/dump/write.go delete mode 100644 migrations/kdvh/import/import_test.go diff --git a/migrations/kdvh/import/convert_functions.go b/migrations/kdvh/db/convert_functions.go similarity index 56% rename from migrations/kdvh/import/convert_functions.go rename to migrations/kdvh/db/convert_functions.go index 6340e0a5..448d809b 100644 --- a/migrations/kdvh/import/convert_functions.go +++ b/migrations/kdvh/db/convert_functions.go @@ -1,110 +1,75 @@ -package port +package db import ( "errors" "strconv" - "time" "github.com/rickb777/period" - "migrate/kdvh/db" - "migrate/kdvh/import/cache" "migrate/lard" ) -// The following ConvertFunctions try to recover the original pair of `controlinfo` -// and `useinfo` generated by Kvalobs for an observation, based on `Obs.Flags` and `Obs.Data` -// Different KDVH tables need different ways to perform this conversion (defined in CONV_MAP). -// -// It returns three structs for each of the lard tables we are inserting into -type ConvertFunction func(KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) - -func getConvertFunc(table *db.Table) ConvertFunction { - switch table.TableName { - case "T_EDATA": - return ConvertEdata - case "T_PDATA": - return ConvertPdata - case "T_NDATA": - return ConvertNdata - case "T_VDATA": - return ConvertVdata - case "T_MONTH", "T_DIURNAL", "T_HOMOGEN_DIURNAL", "T_HOMOGEN_MONTH": - return ConvertProduct - case "T_DIURNAL_INTERPOLATED": - return ConvertDiurnalInterpolated - } - return Convert -} - -type KdvhObs struct { - *cache.TsInfo - obstime time.Time - data string - flags string -} - // Work around to return reference to consts func addr[T any](t T) *T { return &t } -func (obs *KdvhObs) flagsAreValid() bool { - if len(obs.flags) != 5 { +func flagsAreValid(obs *KdvhObs) bool { + if len(obs.Flags) != 5 { return false } - _, err := strconv.ParseInt(obs.flags, 10, 32) + _, err := strconv.ParseInt(obs.Flags, 10, 32) return err == nil } -func (obs *KdvhObs) Useinfo() *string { - if !obs.flagsAreValid() { +func useinfo(obs *KdvhObs) *string { + if !flagsAreValid(obs) { return addr(INVALID_FLAGS) } - return addr(obs.flags + DELAY_DEFAULT) + return addr(obs.Flags + DELAY_DEFAULT) } // Default ConvertFunction // NOTE: this should be the only function that can return `lard.TextObs` with non-null text data. -func Convert(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { +func convert(obs *KdvhObs, ts *TsInfo) (lard.DataObs, lard.TextObs, lard.Flag, error) { var valPtr *float32 controlinfo := VALUE_PASSED_QC - if obs.data == "" { + if obs.Data == "" { controlinfo = VALUE_MISSING } - val, err := strconv.ParseFloat(obs.data, 32) + val, err := strconv.ParseFloat(obs.Data, 32) if err == nil { valPtr = addr(float32(val)) } return lard.DataObs{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Data: valPtr, }, lard.TextObs{ - Id: obs.Id, - Obstime: obs.obstime, - Text: &obs.data, + Id: ts.Id, + Obstime: obs.Obstime, + Text: &obs.Data, }, lard.Flag{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Original: valPtr, Corrected: valPtr, Controlinfo: &controlinfo, - Useinfo: obs.Useinfo(), + Useinfo: useinfo(obs), }, nil } // This function modifies obstimes to always use totime // This is needed because KDVH used incorrect and incosistent timestamps -func ConvertProduct(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { - data, text, flag, err := Convert(obs) - if !obs.Offset.IsZero() { - if temp, ok := obs.Offset.AddTo(data.Obstime); ok { +func convertProduct(obs *KdvhObs, ts *TsInfo) (lard.DataObs, lard.TextObs, lard.Flag, error) { + data, text, flag, err := convert(obs, ts) + if !ts.Offset.IsZero() { + if temp, ok := ts.Offset.AddTo(data.Obstime); ok { data.Obstime = temp text.Obstime = temp flag.Obstime = temp @@ -113,12 +78,12 @@ func ConvertProduct(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) return data, text, flag, err } -func ConvertEdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { +func convertEdata(obs *KdvhObs, ts *TsInfo) (lard.DataObs, lard.TextObs, lard.Flag, error) { var controlinfo string var valPtr *float32 - if val, err := strconv.ParseFloat(obs.data, 32); err != nil { - switch obs.flags { + if val, err := strconv.ParseFloat(obs.Data, 32); err != nil { + switch obs.Flags { case "70381", "70389", "90989": controlinfo = VALUE_REMOVED_BY_QC default: @@ -131,31 +96,31 @@ func ConvertEdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { } return lard.DataObs{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Data: valPtr, }, lard.TextObs{ - Id: obs.Id, - Obstime: obs.obstime, - Text: &obs.data, + Id: ts.Id, + Obstime: obs.Obstime, + Text: &obs.Data, }, lard.Flag{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Original: valPtr, Corrected: valPtr, Controlinfo: &controlinfo, - Useinfo: obs.Useinfo(), + Useinfo: useinfo(obs), }, nil } -func ConvertPdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { +func convertPdata(obs *KdvhObs, ts *TsInfo) (lard.DataObs, lard.TextObs, lard.Flag, error) { var controlinfo string var valPtr *float32 - if val, err := strconv.ParseFloat(obs.data, 32); err != nil { - switch obs.flags { + if val, err := strconv.ParseFloat(obs.Data, 32); err != nil { + switch obs.Flags { case "20389", "30389", "40389", "50383", "70381", "71381": controlinfo = VALUE_REMOVED_BY_QC default: @@ -168,7 +133,7 @@ func ConvertPdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { } else { valPtr = addr(float32(val)) - switch obs.flags { + switch obs.Flags { case "10319", "10329", "30319", "40319", "48929", "48999": controlinfo = VALUE_MANUALLY_INTERPOLATED case "20389", "30389", "40389", "50383", "70381", "71381", "99319": @@ -182,31 +147,31 @@ func ConvertPdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { } return lard.DataObs{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Data: valPtr, }, lard.TextObs{ - Id: obs.Id, - Obstime: obs.obstime, - Text: &obs.data, + Id: ts.Id, + Obstime: obs.Obstime, + Text: &obs.Data, }, lard.Flag{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Original: valPtr, Corrected: valPtr, Controlinfo: &controlinfo, - Useinfo: obs.Useinfo(), + Useinfo: useinfo(obs), }, nil } -func ConvertNdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { +func convertNdata(obs *KdvhObs, ts *TsInfo) (lard.DataObs, lard.TextObs, lard.Flag, error) { var controlinfo string var valPtr *float32 - if val, err := strconv.ParseFloat(obs.data, 32); err != nil { - switch obs.flags { + if val, err := strconv.ParseFloat(obs.Data, 32); err != nil { + switch obs.Flags { case "70389": controlinfo = VALUE_REMOVED_BY_QC default: @@ -219,7 +184,7 @@ func ConvertNdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { } else { valPtr = addr(float32(val)) - switch obs.flags { + switch obs.Flags { case "43325", "48325": controlinfo = VALUE_MANUALLY_ASSIGNED case "30319", "38929", "40315", "40319": @@ -235,53 +200,53 @@ func ConvertNdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { } return lard.DataObs{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Data: valPtr, }, lard.TextObs{ - Id: obs.Id, - Obstime: obs.obstime, - Text: &obs.data, + Id: ts.Id, + Obstime: obs.Obstime, + Text: &obs.Data, }, lard.Flag{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Original: valPtr, Corrected: valPtr, Controlinfo: &controlinfo, - Useinfo: obs.Useinfo(), + Useinfo: useinfo(obs), }, nil } -func ConvertVdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { +func convertVdata(obs *KdvhObs, ts *TsInfo) (lard.DataObs, lard.TextObs, lard.Flag, error) { var useinfo, controlinfo string var valPtr *float32 // set useinfo based on time - if h := obs.obstime.Hour(); h == 0 || h == 6 || h == 12 || h == 18 { + if h := obs.Obstime.Hour(); h == 0 || h == 6 || h == 12 || h == 18 { useinfo = COMPLETED_HQC } else { useinfo = INVALID_FLAGS } // set data and controlinfo - if val, err := strconv.ParseFloat(obs.data, 32); err != nil { + if val, err := strconv.ParseFloat(obs.Data, 32); err != nil { controlinfo = VALUE_MISSING } else { // super special treatment clause of T_VDATA.OT_24, so it will be the same as in kvalobs // add custom offset, because OT_24 in KDVH has been treated differently than OT_24 in kvalobs - if obs.Element == "OT_24" { + if ts.Element == "OT_24" { offset, err := period.Parse("PT18H") // fromtime_offset -PT6H, timespan P1D if err != nil { return lard.DataObs{}, lard.TextObs{}, lard.Flag{}, errors.New("could not parse period") } - temp, ok := offset.AddTo(obs.obstime) + temp, ok := offset.AddTo(obs.Obstime) if !ok { return lard.DataObs{}, lard.TextObs{}, lard.Flag{}, errors.New("could not add period") } - obs.obstime = temp + obs.Obstime = temp // convert from hours to minutes val *= 60.0 } @@ -291,18 +256,18 @@ func ConvertVdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { } return lard.DataObs{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Data: valPtr, }, lard.TextObs{ - Id: obs.Id, - Obstime: obs.obstime, - Text: &obs.data, + Id: ts.Id, + Obstime: obs.Obstime, + Text: &obs.Data, }, lard.Flag{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Original: valPtr, Corrected: valPtr, Useinfo: &useinfo, @@ -310,25 +275,25 @@ func ConvertVdata(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { }, nil } -func ConvertDiurnalInterpolated(obs KdvhObs) (lard.DataObs, lard.TextObs, lard.Flag, error) { - val, err := strconv.ParseFloat(obs.data, 32) +func convertDiurnalInterpolated(obs *KdvhObs, ts *TsInfo) (lard.DataObs, lard.TextObs, lard.Flag, error) { + val, err := strconv.ParseFloat(obs.Data, 32) if err != nil { return lard.DataObs{}, lard.TextObs{}, lard.Flag{}, err } valPtr := addr(float32(val)) return lard.DataObs{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Data: valPtr, }, lard.TextObs{ - Id: obs.Id, - Obstime: obs.obstime, - Text: &obs.data, + Id: ts.Id, + Obstime: obs.Obstime, + Text: &obs.Data, }, lard.Flag{ - Id: obs.Id, - Obstime: obs.obstime, + Id: ts.Id, + Obstime: obs.Obstime, Original: valPtr, Corrected: valPtr, Useinfo: addr(DIURNAL_INTERPOLATED_USEINFO), diff --git a/migrations/kdvh/dump/dump_functions.go b/migrations/kdvh/db/dump_functions.go similarity index 50% rename from migrations/kdvh/dump/dump_functions.go rename to migrations/kdvh/db/dump_functions.go index 1ad0989b..345d027e 100644 --- a/migrations/kdvh/dump/dump_functions.go +++ b/migrations/kdvh/db/dump_functions.go @@ -1,44 +1,38 @@ -package dump +package db import ( "context" + "database/sql" + "encoding/csv" "errors" "fmt" + "io" "log/slog" "os" "path/filepath" + "slices" "strconv" + "time" + "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" - - "migrate/kdvh/db" ) -// Function used to dump the KDVH table, see below -type DumpFunction func(path string, meta DumpArgs, pool *pgxpool.Pool) error -type DumpArgs struct { - element string - station string - dataTable string - flagTable string - overwrite bool - logStr string -} +// Format string for date field in CSV files +const TIMEFORMAT string = "2006-01-02_15:04:05" -func getDumpFunc(table *db.Table) DumpFunction { - switch table.TableName { - case "T_METARDATA", "T_HOMOGEN_DIURNAL": - return dumpDataOnly - case "T_SECOND_DATA", "T_MINUTE_DATA", "T_10MINUTE_DATA": - return dumpByYear - case "T_HOMOGEN_MONTH": - return dumpHomogenMonth - } - return dumpDataAndFlags +// Error returned if no observations are found for a (station, element) pair +var EMPTY_QUERY_ERR error = errors.New("The query did not return any rows") + +// Struct representing a single record in the output CSV file +type Record struct { + Time time.Time `db:"time"` + Data sql.NullString `db:"data"` + Flag sql.NullString `db:"flag"` } -func fileExists(filename string, overwrite bool) error { - if _, err := os.Stat(filename); err == nil && !overwrite { +func fileExists(filename string) error { + if _, err := os.Stat(filename); err == nil { return errors.New( fmt.Sprintf( "Skipping dump of %q because dumped file already exists and the --overwrite flag was not provided", @@ -72,13 +66,13 @@ func fetchYearRange(tableName, station string, pool *pgxpool.Pool) (int64, int64 // This function is used when the table contains large amount of data // (T_SECOND, T_MINUTE, T_10MINUTE) -func dumpByYear(path string, meta DumpArgs, pool *pgxpool.Pool) error { - dataBegin, dataEnd, err := fetchYearRange(meta.dataTable, meta.station, pool) +func dumpByYear(path string, args dumpArgs, logStr string, overwrite bool, pool *pgxpool.Pool) error { + dataBegin, dataEnd, err := fetchYearRange(args.dataTable, args.station, pool) if err != nil { return err } - flagBegin, flagEnd, err := fetchYearRange(meta.flagTable, meta.station, pool) + flagBegin, flagEnd, err := fetchYearRange(args.flagTable, args.station, pool) if err != nil { return err } @@ -98,32 +92,32 @@ func dumpByYear(path string, meta DumpArgs, pool *pgxpool.Pool) error { (SELECT dato, stnr, %[1]s FROM %[3]s WHERE %[1]s IS NOT NULL AND stnr = $1 AND TO_CHAR(dato, 'yyyy') = $2) f USING (dato)`, - meta.element, - meta.dataTable, - meta.flagTable, + args.element, + args.dataTable, + args.flagTable, ) for year := begin; year < end; year++ { yearPath := filepath.Join(path, fmt.Sprint(year)) if err := os.MkdirAll(path, os.ModePerm); err != nil { - slog.Error(meta.logStr + err.Error()) + slog.Error(logStr + err.Error()) continue } - filename := filepath.Join(yearPath, meta.element+".csv") - if err := fileExists(filename, meta.overwrite); err != nil { - slog.Warn(meta.logStr + err.Error()) + filename := filepath.Join(yearPath, args.element+".csv") + if err := fileExists(filename); err != nil && !overwrite { + slog.Warn(logStr + err.Error()) continue } - rows, err := pool.Query(context.TODO(), query, meta.station, year) + rows, err := pool.Query(context.TODO(), query, args.station, year) if err != nil { - slog.Error(meta.logStr + fmt.Sprint("Could not query KDVH: ", err)) + slog.Error(logStr + "Could not query KDVH - " + err.Error()) continue } if err := writeToCsv(filename, rows); err != nil { - slog.Error(meta.logStr + err.Error()) + slog.Error(logStr + err.Error()) continue } } @@ -137,10 +131,10 @@ func dumpByYear(path string, meta DumpArgs, pool *pgxpool.Pool) error { // - RR (hourly precipitations, note that in Stinfosys this parameter is 'RR_1') // // We calculate the other data on the fly (outside this program) if needed. -func dumpHomogenMonth(path string, meta DumpArgs, pool *pgxpool.Pool) error { - filename := filepath.Join(path, meta.element+".csv") - if err := fileExists(filename, meta.overwrite); err != nil { - slog.Warn(meta.logStr + err.Error()) +func dumpHomogenMonth(path string, args dumpArgs, logStr string, overwrite bool, pool *pgxpool.Pool) error { + filename := filepath.Join(path, args.element+".csv") + if err := fileExists(filename); err != nil && !overwrite { + slog.Warn(logStr + err.Error()) return err } @@ -148,17 +142,17 @@ func dumpHomogenMonth(path string, meta DumpArgs, pool *pgxpool.Pool) error { `SELECT dato AS time, %s[1]s AS data, '' AS flag FROM T_HOMOGEN_MONTH WHERE %s[1]s IS NOT NULL AND stnr = $1 AND season BETWEEN 1 AND 12`, // NOTE: adding a dummy argument is the only way to suppress this stupid warning - meta.element, "", + args.element, "", ) - rows, err := pool.Query(context.TODO(), query, meta.station) + rows, err := pool.Query(context.TODO(), query, args.station) if err != nil { - slog.Error(meta.logStr + err.Error()) + slog.Error(logStr + err.Error()) return err } if err := writeToCsv(filename, rows); err != nil { - slog.Error(meta.logStr + err.Error()) + slog.Error(logStr + err.Error()) return err } @@ -167,28 +161,28 @@ func dumpHomogenMonth(path string, meta DumpArgs, pool *pgxpool.Pool) error { // This function is used to dump tables that don't have a FLAG table, // (T_METARDATA, T_HOMOGEN_DIURNAL) -func dumpDataOnly(path string, meta DumpArgs, pool *pgxpool.Pool) error { - filename := filepath.Join(path, meta.element+".csv") - if err := fileExists(filename, meta.overwrite); err != nil { - slog.Warn(meta.logStr + err.Error()) +func dumpDataOnly(path string, args dumpArgs, logStr string, overwrite bool, pool *pgxpool.Pool) error { + filename := filepath.Join(path, args.element+".csv") + if err := fileExists(filename); err != nil && !overwrite { + slog.Warn(logStr + err.Error()) return err } query := fmt.Sprintf( `SELECT dato AS time, %[1]s AS data, '' AS flag FROM %[2]s WHERE %[1]s IS NOT NULL AND stnr = $1`, - meta.element, - meta.dataTable, + args.element, + args.dataTable, ) - rows, err := pool.Query(context.TODO(), query, meta.station) + rows, err := pool.Query(context.TODO(), query, args.station) if err != nil { - slog.Error(meta.logStr + err.Error()) + slog.Error(logStr + err.Error()) return err } if err := writeToCsv(filename, rows); err != nil { - slog.Error(meta.logStr + err.Error()) + slog.Error(logStr + err.Error()) return err } @@ -198,10 +192,10 @@ func dumpDataOnly(path string, meta DumpArgs, pool *pgxpool.Pool) error { // This is the default dump function. // It selects both data and flag tables for a specific (station, element) pair, // and then performs a full outer join on the two subqueries -func dumpDataAndFlags(path string, meta DumpArgs, pool *pgxpool.Pool) error { - filename := filepath.Join(path, meta.element+".csv") - if err := fileExists(filename, meta.overwrite); err != nil { - slog.Warn(meta.logStr + err.Error()) +func dumpDataAndFlags(path string, args dumpArgs, logStr string, overwrite bool, pool *pgxpool.Pool) error { + filename := filepath.Join(path, args.element+".csv") + if err := fileExists(filename); err != nil && !overwrite { + slog.Warn(logStr + err.Error()) return err } @@ -215,23 +209,85 @@ func dumpDataAndFlags(path string, meta DumpArgs, pool *pgxpool.Pool) error { FULL OUTER JOIN (SELECT dato, %[1]s FROM %[3]s WHERE %[1]s IS NOT NULL AND stnr = $1) f USING (dato)`, - meta.element, - meta.dataTable, - meta.flagTable, + args.element, + args.dataTable, + args.flagTable, ) - rows, err := pool.Query(context.TODO(), query, meta.station) + rows, err := pool.Query(context.TODO(), query, args.station) if err != nil { - slog.Error(meta.logStr + err.Error()) + slog.Error(logStr + err.Error()) return err } if err := writeToCsv(filename, rows); err != nil { if !errors.Is(err, EMPTY_QUERY_ERR) { - slog.Error(meta.logStr + err.Error()) + slog.Error(logStr + err.Error()) } return err } return nil } + +// Dumps queried rows to file +func writeToCsv(filename string, rows pgx.Rows) error { + lines, err := sortRows(rows) + if err != nil { + return err + } + + // Return if query was empty + if len(lines) == 0 { + return EMPTY_QUERY_ERR + } + + file, err := os.Create(filename) + if err != nil { + return err + } + + err = writeElementFile(lines, file) + if closeErr := file.Close(); closeErr != nil { + return errors.Join(err, closeErr) + } + return err +} + +// Scans the rows and collects them in a slice of chronologically sorted lines +func sortRows(rows pgx.Rows) ([]Record, error) { + defer rows.Close() + + records, err := pgx.CollectRows(rows, pgx.RowToStructByName[Record]) + if err != nil { + return nil, errors.New("Could not collect rows: " + err.Error()) + } + + slices.SortFunc(records, func(a, b Record) int { + return a.Time.Compare(b.Time) + }) + + return records, rows.Err() +} + +// Writes queried (time | data | flag) columns to CSV +func writeElementFile(lines []Record, file io.Writer) error { + // Write number of lines as header + file.Write([]byte(fmt.Sprintf("%v\n", len(lines)))) + + writer := csv.NewWriter(file) + + record := make([]string, 3) + for _, l := range lines { + record[0] = l.Time.Format(TIMEFORMAT) + record[1] = l.Data.String + record[2] = l.Flag.String + + if err := writer.Write(record); err != nil { + return errors.New("Could not write to file: " + err.Error()) + } + } + + writer.Flush() + return writer.Error() +} diff --git a/migrations/kdvh/db/flag_test.go b/migrations/kdvh/db/flag_test.go new file mode 100644 index 00000000..212ab61a --- /dev/null +++ b/migrations/kdvh/db/flag_test.go @@ -0,0 +1,33 @@ +package db + +import ( + "testing" +) + +func TestFlagsAreValid(t *testing.T) { + type testCase struct { + input KdvhObs + expected bool + } + + cases := []testCase{ + {KdvhObs{Flags: "12309"}, true}, + {KdvhObs{Flags: "984.3"}, false}, + {KdvhObs{Flags: ".1111"}, false}, + {KdvhObs{Flags: "1234."}, false}, + {KdvhObs{Flags: "12.2.4"}, false}, + {KdvhObs{Flags: "12.343"}, false}, + {KdvhObs{Flags: ""}, false}, + {KdvhObs{Flags: "asdas"}, false}, + {KdvhObs{Flags: "12a3a"}, false}, + {KdvhObs{Flags: "1sdfl"}, false}, + } + + for _, c := range cases { + t.Log("Testing flag:", c.input.Flags) + + if result := flagsAreValid(&c.input); result != c.expected { + t.Errorf("Got %v, wanted %v", result, c.expected) + } + } +} diff --git a/migrations/kdvh/import/flags.go b/migrations/kdvh/db/flags.go similarity index 99% rename from migrations/kdvh/import/flags.go rename to migrations/kdvh/db/flags.go index 8fdc511b..89fd412f 100644 --- a/migrations/kdvh/import/flags.go +++ b/migrations/kdvh/db/flags.go @@ -1,4 +1,4 @@ -package port +package db // In kvalobs a flag is a 16 char string containg QC information about the observation: // Note: Missing numbers in the following lists are marked as reserved (not in use I guess?) diff --git a/migrations/kdvh/db/main.go b/migrations/kdvh/db/main.go index 698d4068..03fb3ac6 100644 --- a/migrations/kdvh/db/main.go +++ b/migrations/kdvh/db/main.go @@ -1,5 +1,13 @@ package db +import ( + "migrate/stinfosys" + "migrate/utils" + "time" + + "github.com/rickb777/period" +) + const KDVH_ENV_VAR string = "KDVH_PROXY_CONN_STRING" // Map of all tables found in KDVH, with set max import year @@ -11,39 +19,58 @@ func Init() *KDVH { return &KDVH{map[string]*Table{ // Section 1: tables that need to be migrated entirely // TODO: figure out if we need to use the elem_code_paramid_level_sensor_t_edata table? - "T_EDATA": NewTable("T_EDATA", "T_EFLAG", "T_ELEM_EDATA").SetImportYear(3000), - "T_METARDATA": NewTable("T_METARDATA", "", "T_ELEM_METARDATA").SetImportYear(3000), + "T_EDATA": NewTable("T_EDATA", "T_EFLAG", "T_ELEM_EDATA").SetConvertFunc(convertEdata).SetImportYear(3000), + "T_METARDATA": NewTable("T_METARDATA", "", "T_ELEM_METARDATA").SetDumpFunc(dumpDataOnly).SetImportYear(3000), // Section 2: tables with some data in kvalobs, import only up to 2005-12-31 "T_ADATA": NewTable("T_ADATA", "T_AFLAG", "T_ELEM_OBS").SetImportYear(2006), "T_MDATA": NewTable("T_MDATA", "T_MFLAG", "T_ELEM_OBS").SetImportYear(2006), "T_TJ_DATA": NewTable("T_TJ_DATA", "T_TJ_FLAG", "T_ELEM_OBS").SetImportYear(2006), - "T_PDATA": NewTable("T_PDATA", "T_PFLAG", "T_ELEM_OBS").SetImportYear(2006), - "T_NDATA": NewTable("T_NDATA", "T_NFLAG", "T_ELEM_OBS").SetImportYear(2006), - "T_VDATA": NewTable("T_VDATA", "T_VFLAG", "T_ELEM_OBS").SetImportYear(2006), + "T_PDATA": NewTable("T_PDATA", "T_PFLAG", "T_ELEM_OBS").SetConvertFunc(convertPdata).SetImportYear(2006), + "T_NDATA": NewTable("T_NDATA", "T_NFLAG", "T_ELEM_OBS").SetConvertFunc(convertNdata).SetImportYear(2006), + "T_VDATA": NewTable("T_VDATA", "T_VFLAG", "T_ELEM_OBS").SetConvertFunc(convertVdata).SetImportYear(2006), "T_UTLANDDATA": NewTable("T_UTLANDDATA", "T_UTLANDFLAG", "T_ELEM_OBS").SetImportYear(2006), // Section 3: tables that should only be dumped - "T_10MINUTE_DATA": NewTable("T_10MINUTE_DATA", "T_10MINUTE_FLAG", "T_ELEM_OBS"), + "T_10MINUTE_DATA": NewTable("T_10MINUTE_DATA", "T_10MINUTE_FLAG", "T_ELEM_OBS").SetDumpFunc(dumpByYear), "T_ADATA_LEVEL": NewTable("T_ADATA_LEVEL", "T_AFLAG_LEVEL", "T_ELEM_OBS"), - "T_MINUTE_DATA": NewTable("T_MINUTE_DATA", "T_MINUTE_FLAG", "T_ELEM_OBS"), - "T_SECOND_DATA": NewTable("T_SECOND_DATA", "T_SECOND_FLAG", "T_ELEM_OBS"), + "T_MINUTE_DATA": NewTable("T_MINUTE_DATA", "T_MINUTE_FLAG", "T_ELEM_OBS").SetDumpFunc(dumpByYear), + "T_SECOND_DATA": NewTable("T_SECOND_DATA", "T_SECOND_FLAG", "T_ELEM_OBS").SetDumpFunc(dumpByYear), "T_CDCV_DATA": NewTable("T_CDCV_DATA", "T_CDCV_FLAG", "T_ELEM_EDATA"), "T_MERMAID": NewTable("T_MERMAID", "T_MERMAID_FLAG", "T_ELEM_EDATA"), "T_SVVDATA": NewTable("T_SVVDATA", "T_SVVFLAG", "T_ELEM_OBS"), // Section 4: special cases, namely digitized historical data - "T_MONTH": NewTable("T_MONTH", "T_MONTH_FLAG", "T_ELEM_MONTH").SetImportYear(1957), - "T_DIURNAL": NewTable("T_DIURNAL", "T_DIURNAL_FLAG", "T_ELEM_DIURNAL").SetImportYear(2006), - "T_HOMOGEN_DIURNAL": NewTable("T_HOMOGEN_DIURNAL", "", "T_ELEM_HOMOGEN_MONTH"), - "T_HOMOGEN_MONTH": NewTable("T_HOMOGEN_MONTH", "T_ELEM_HOMOGEN_MONTH", ""), + // TODO: I don't think we want to import these, they are products + "T_MONTH": NewTable("T_MONTH", "T_MONTH_FLAG", "T_ELEM_MONTH").SetConvertFunc(convertProduct).SetImportYear(1957), + "T_DIURNAL": NewTable("T_DIURNAL", "T_DIURNAL_FLAG", "T_ELEM_DIURNAL").SetConvertFunc(convertProduct).SetImportYear(2006), + "T_HOMOGEN_DIURNAL": NewTable("T_HOMOGEN_DIURNAL", "", "T_ELEM_HOMOGEN_MONTH").SetDumpFunc(dumpDataOnly).SetConvertFunc(convertProduct), + "T_HOMOGEN_MONTH": NewTable("T_HOMOGEN_MONTH", "T_ELEM_HOMOGEN_MONTH", "").SetDumpFunc(dumpHomogenMonth).SetConvertFunc(convertProduct), // Section 5: tables missing in the KDVH proxy: // 1. these exist in a separate database "T_AVINOR": NewTable("T_AVINOR", "T_AVINOR_FLAG", "T_ELEM_OBS"), "T_PROJDATA": NewTable("T_PROJDATA", "T_PROJFLAG", "T_ELEM_PROJ"), // 2. these are not in active use and don't need to be imported in LARD - "T_DIURNAL_INTERPOLATED": NewTable("T_DIURNAL_INTERPOLATED", "", ""), + "T_DIURNAL_INTERPOLATED": NewTable("T_DIURNAL_INTERPOLATED", "", "").SetConvertFunc(convertDiurnalInterpolated), "T_MONTH_INTERPOLATED": NewTable("T_MONTH_INTERPOLATED", "", ""), }} } + +// Struct that represent an observation in KDVH +type KdvhObs struct { + Obstime time.Time + Data string + Flags string +} + +// Convenience struct that holds information for a specific timeseries +type TsInfo struct { + Id int32 + Station int32 + Element string + Offset period.Period + Param stinfosys.Param + Timespan utils.TimeSpan + Logstr string +} diff --git a/migrations/kdvh/db/table.go b/migrations/kdvh/db/table.go index a1b9b787..aa28dc29 100644 --- a/migrations/kdvh/db/table.go +++ b/migrations/kdvh/db/table.go @@ -1,5 +1,11 @@ package db +import ( + "github.com/jackc/pgx/v5/pgxpool" + + "migrate/lard" +) + // In KDVH for each table name we usually have three separate tables: // 1. A DATA table containing observation values; // 2. A FLAG table containing quality control (QC) flags; @@ -22,6 +28,8 @@ type Table struct { ElemTableName string // Name of the ELEM table Path string // Directory name of where the dumped table is stored importUntil int // Import data only until the year specified by this field. Table import will be skipped, if `SetImportYear` is not called. + DumpFn DumpFunction + Convert ConvertFunction } // Creates default Table @@ -31,10 +39,42 @@ func NewTable(data, flag, elem string) *Table { FlagTableName: flag, ElemTableName: elem, // NOTE: '_combined' kept for backward compatibility with original scripts - Path: data + "_combined", + Path: data + "_combined", + DumpFn: dumpDataAndFlags, + Convert: convert, } } +// Function used to dump the KDVH table, see below +type DumpFunction func(path string, args dumpArgs, logStr string, overwrite bool, pool *pgxpool.Pool) error +type dumpArgs struct { + element string + station string + dataTable string + flagTable string +} + +// The following ConvertFunctions try to recover the original pair of `controlinfo` +// and `useinfo` generated by Kvalobs for an observation, based on `Obs.Flags` and `Obs.Data` +// Different KDVH tables need different ways to perform this conversion (defined in CONV_MAP). +// +// It returns three structs for each of the lard tables we are inserting into +type ConvertFunction func(*KdvhObs, *TsInfo) (lard.DataObs, lard.TextObs, lard.Flag, error) + +func (t *Table) Dump(path, element, station, logStr string, overwrite bool, pool *pgxpool.Pool) error { + return t.DumpFn(path, dumpArgs{element, station, t.TableName, t.FlagTableName}, logStr, overwrite, pool) +} + +func (t *Table) SetDumpFunc(fn DumpFunction) *Table { + t.DumpFn = fn + return t +} + +func (t *Table) SetConvertFunc(fn ConvertFunction) *Table { + t.Convert = fn + return t +} + // Specify the year until data should be imported func (t *Table) SetImportYear(year int) *Table { if year > 0 { diff --git a/migrations/kdvh/dump/dump.go b/migrations/kdvh/dump/dump.go index 86e2f36e..88ac0f7d 100644 --- a/migrations/kdvh/dump/dump.go +++ b/migrations/kdvh/dump/dump.go @@ -38,8 +38,6 @@ func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { return } - dumpFunc := getDumpFunc(table) - // Used to limit connections to the database semaphore := make(chan struct{}, config.MaxConn) @@ -65,19 +63,11 @@ func DumpTable(table *db.Table, pool *pgxpool.Pool, config *Config) { wg.Done() }() - err := dumpFunc( - path, - DumpArgs{ - element: element, - station: station, - dataTable: table.TableName, - flagTable: table.FlagTableName, - overwrite: config.Overwrite, - }, - pool, - ) + logStr := fmt.Sprintf("%s - %s - %s: ", table.TableName, station, element) + + err := table.Dump(path, element, station, logStr, config.Overwrite, pool) if err == nil { - slog.Info(fmt.Sprintf("%s - %s - %s: dumped successfully", table.TableName, station, element)) + slog.Info(logStr + "dumped successfully") } // Release semaphore diff --git a/migrations/kdvh/dump/write.go b/migrations/kdvh/dump/write.go deleted file mode 100644 index 5e4aec9d..00000000 --- a/migrations/kdvh/dump/write.go +++ /dev/null @@ -1,89 +0,0 @@ -package dump - -import ( - "database/sql" - "encoding/csv" - "errors" - "fmt" - "io" - "os" - "slices" - "time" - - "github.com/jackc/pgx/v5" -) - -// Format string for date field in CSV files -const TIMEFORMAT string = "2006-01-02_15:04:05" - -// Error returned if no observations are found for a (station, element) pair -var EMPTY_QUERY_ERR error = errors.New("The query did not return any rows") - -// Struct representing a single record in the output CSV file -type Record struct { - Time time.Time `db:"time"` - Data sql.NullString `db:"data"` - Flag sql.NullString `db:"flag"` -} - -// Dumps queried rows to file -func writeToCsv(filename string, rows pgx.Rows) error { - lines, err := sortRows(rows) - if err != nil { - return err - } - - // Return if query was empty - if len(lines) == 0 { - return EMPTY_QUERY_ERR - } - - file, err := os.Create(filename) - if err != nil { - return err - } - - err = writeElementFile(lines, file) - if closeErr := file.Close(); closeErr != nil { - return errors.Join(err, closeErr) - } - return err -} - -// Scans the rows and collects them in a slice of chronologically sorted lines -func sortRows(rows pgx.Rows) ([]Record, error) { - defer rows.Close() - - records, err := pgx.CollectRows(rows, pgx.RowToStructByName[Record]) - if err != nil { - return nil, errors.New("Could not collect rows: " + err.Error()) - } - - slices.SortFunc(records, func(a, b Record) int { - return a.Time.Compare(b.Time) - }) - - return records, rows.Err() -} - -// Writes queried (time | data | flag) columns to CSV -func writeElementFile(lines []Record, file io.Writer) error { - // Write number of lines as header - file.Write([]byte(fmt.Sprintf("%v\n", len(lines)))) - - writer := csv.NewWriter(file) - - record := make([]string, 3) - for _, l := range lines { - record[0] = l.Time.Format(TIMEFORMAT) - record[1] = l.Data.String - record[2] = l.Flag.String - - if err := writer.Write(record); err != nil { - return errors.New("Could not write to file: " + err.Error()) - } - } - - writer.Flush() - return writer.Error() -} diff --git a/migrations/kdvh/import/cache/main.go b/migrations/kdvh/import/cache/main.go index 81767a4c..7613f013 100644 --- a/migrations/kdvh/import/cache/main.go +++ b/migrations/kdvh/import/cache/main.go @@ -6,7 +6,6 @@ import ( "log/slog" "github.com/jackc/pgx/v5/pgxpool" - "github.com/rickb777/period" kdvh "migrate/kdvh/db" "migrate/lard" @@ -35,18 +34,7 @@ func CacheMetadata(tables, stations, elements []string, database *kdvh.KDVH) *Ca } } -// Convenience struct that holds information for a specific timeseries -type TsInfo struct { - Id int32 - Station int32 - Element string - Offset period.Period - Param stinfosys.Param - Timespan utils.TimeSpan - Logstr string -} - -func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpool.Pool) (*TsInfo, error) { +func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpool.Pool) (*kdvh.TsInfo, error) { logstr := fmt.Sprintf("[%v - %v - %v]: ", table, station, element) key := newKDVHKey(element, table, station) @@ -86,7 +74,7 @@ func (cache *Cache) NewTsInfo(table, element string, station int32, pool *pgxpoo return nil, err } - return &TsInfo{ + return &kdvh.TsInfo{ Id: tsid, Station: station, Element: element, diff --git a/migrations/kdvh/import/import.go b/migrations/kdvh/import/import.go index 72627c9b..36f3729c 100644 --- a/migrations/kdvh/import/import.go +++ b/migrations/kdvh/import/import.go @@ -34,8 +34,6 @@ func ImportTable(table *kdvh.Table, cache *cache.Cache, pool *pgxpool.Pool, conf return 0 } - convFunc := getConvertFunc(table) - for _, station := range stations { stnr, err := getStationNumber(station, config.Stations) if err != nil { @@ -78,7 +76,7 @@ func ImportTable(table *kdvh.Table, cache *cache.Cache, pool *pgxpool.Pool, conf } filename := filepath.Join(stationDir, element.Name()) - data, text, flag, err := parseData(filename, tsInfo, convFunc, table, config) + data, text, flag, err := parseData(filename, tsInfo, table, config) if err != nil { return } @@ -150,7 +148,7 @@ func getElementCode(element os.DirEntry, elementList []string) (string, error) { // Parses the observations in the CSV file, converts them with the table // ConvertFunction and returns three arrays that can be passed to pgx.CopyFromRows -func parseData(filename string, tsInfo *cache.TsInfo, convFunc ConvertFunction, table *kdvh.Table, config *Config) ([][]any, [][]any, [][]any, error) { +func parseData(filename string, tsInfo *kdvh.TsInfo, table *kdvh.Table, config *Config) ([][]any, [][]any, [][]any, error) { file, err := os.Open(filename) if err != nil { slog.Warn(err.Error()) @@ -190,7 +188,8 @@ func parseData(filename string, tsInfo *cache.TsInfo, convFunc ConvertFunction, break } - dataRow, textRow, flagRow, err := convFunc(KdvhObs{tsInfo, obsTime, cols[1], cols[2]}) + obs := kdvh.KdvhObs{Obstime: obsTime, Data: cols[1], Flags: cols[2]} + dataRow, textRow, flagRow, err := table.Convert(&obs, tsInfo) if err != nil { return nil, nil, nil, err } diff --git a/migrations/kdvh/import/import_test.go b/migrations/kdvh/import/import_test.go deleted file mode 100644 index d5f8eafb..00000000 --- a/migrations/kdvh/import/import_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package port - -import "testing" - -func TestFlagsAreValid(t *testing.T) { - type testCase struct { - input KdvhObs - expected bool - } - - cases := []testCase{ - {KdvhObs{flags: "12309"}, true}, - {KdvhObs{flags: "984.3"}, false}, - {KdvhObs{flags: ".1111"}, false}, - {KdvhObs{flags: "1234."}, false}, - {KdvhObs{flags: "12.2.4"}, false}, - {KdvhObs{flags: "12.343"}, false}, - {KdvhObs{flags: ""}, false}, - {KdvhObs{flags: "asdas"}, false}, - {KdvhObs{flags: "12a3a"}, false}, - {KdvhObs{flags: "1sdfl"}, false}, - } - - for _, c := range cases { - t.Log("Testing flag:", c.input.flags) - - if result := c.input.flagsAreValid(); result != c.expected { - t.Errorf("Got %v, wanted %v", result, c.expected) - } - } -} From 3b28f9621fbe9fa3d97a4eb905ee822b88461ce0 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 9 Dec 2024 11:03:28 +0100 Subject: [PATCH 59/67] Rework kvalobs structure --- migrations/kvalobs/check/main.go | 12 +- migrations/kvalobs/db/base_config.go | 2 +- .../{import/data.go => db/csv_parsers.go} | 134 +++++++++--------- migrations/kvalobs/db/import_functions.go | 116 +++++++++++++++ migrations/kvalobs/db/{labels.go => label.go} | 0 migrations/kvalobs/db/label_dump_functions.go | 63 ++++++++ migrations/kvalobs/db/main.go | 16 ++- .../kvalobs/db/series_dump_functions.go | 107 ++++++++++++++ migrations/kvalobs/db/table.go | 4 +- migrations/kvalobs/dump/data.go | 91 ------------ migrations/kvalobs/dump/dump.go | 34 +---- migrations/kvalobs/dump/main.go | 15 +- migrations/kvalobs/dump/text.go | 78 ---------- migrations/kvalobs/import/cache/main.go | 1 - migrations/kvalobs/import/import.go | 18 +-- migrations/kvalobs/import/main.go | 24 ++-- migrations/kvalobs/import/text.go | 130 ----------------- .../18700/18700_1000_316__.csv | 0 migrations/tests/kvalobs_test.go | 23 +-- 19 files changed, 412 insertions(+), 456 deletions(-) rename migrations/kvalobs/{import/data.go => db/csv_parsers.go} (56%) create mode 100644 migrations/kvalobs/db/import_functions.go rename migrations/kvalobs/db/{labels.go => label.go} (100%) create mode 100644 migrations/kvalobs/db/label_dump_functions.go create mode 100644 migrations/kvalobs/db/series_dump_functions.go delete mode 100644 migrations/kvalobs/dump/data.go delete mode 100644 migrations/kvalobs/dump/text.go delete mode 100644 migrations/kvalobs/import/text.go rename migrations/tests/files/kvalobs/{text => text_data}/18700/18700_1000_316__.csv (100%) diff --git a/migrations/kvalobs/check/main.go b/migrations/kvalobs/check/main.go index 9ca90fbc..0370e837 100644 --- a/migrations/kvalobs/check/main.go +++ b/migrations/kvalobs/check/main.go @@ -23,17 +23,19 @@ type Config struct { } func (c *Config) Execute() { - kvalobs, histkvalobs := db.InitDBs() + dbs := db.InitDBs() if utils.IsEmptyOrEqual(c.CheckName, "overlap") { fmt.Println("Checking if some param IDs are stored in both the `data` and `text_data` tables") - c.checkDataAndTextParamsOverlap(&kvalobs) - c.checkDataAndTextParamsOverlap(&histkvalobs) + for _, db := range dbs { + c.checkDataAndTextParamsOverlap(&db) + } } if utils.IsEmptyOrEqual(c.CheckName, "non-scalars") { fmt.Println("Checking if param IDs in `text_data` match non-scalar parameters in Stinfosys") stinfoParams := getStinfoNonScalars() - c.checkNonScalars(&kvalobs, stinfoParams) - c.checkNonScalars(&histkvalobs, stinfoParams) + for _, db := range dbs { + c.checkNonScalars(&db, stinfoParams) + } } } diff --git a/migrations/kvalobs/db/base_config.go b/migrations/kvalobs/db/base_config.go index 8a301ac5..544ca68f 100644 --- a/migrations/kvalobs/db/base_config.go +++ b/migrations/kvalobs/db/base_config.go @@ -15,7 +15,7 @@ type BaseConfig struct { FromTime *utils.Timestamp `arg:"--from" help:"Fetch data only starting from this date-only timestamp"` ToTime *utils.Timestamp `arg:"--to" help:"Fetch data only until this date-only timestamp"` Database string `arg:"--db" help:"Which database to process, all by default. Choices: ['kvalobs', 'histkvalobs']"` - Table string `help:"Which table to process, all by default. Choices: ['data', 'text']"` + Table string `help:"Which table to process, all by default. Choices: ['data', 'text_data']"` Stations []int32 `help:"Optional space separated list of station numbers"` TypeIds []int32 `help:"Optional space separated list of type IDs"` ParamIds []int32 `help:"Optional space separated list of param IDs"` diff --git a/migrations/kvalobs/import/data.go b/migrations/kvalobs/db/csv_parsers.go similarity index 56% rename from migrations/kvalobs/import/data.go rename to migrations/kvalobs/db/csv_parsers.go index 75f44ad2..bf2f774d 100644 --- a/migrations/kvalobs/import/data.go +++ b/migrations/kvalobs/db/csv_parsers.go @@ -1,76 +1,15 @@ -package port +package db import ( "bufio" - "log/slog" - "os" - "path/filepath" + "migrate/lard" "slices" "strconv" "strings" "time" - - kvalobs "migrate/kvalobs/db" - "migrate/lard" - - "github.com/jackc/pgx/v5/pgxpool" ) -// Returns a DataTable for import -func DataTable(path string) kvalobs.Table { - return kvalobs.Table{ - Path: filepath.Join(path, kvalobs.DATA_TABLE_NAME), - Import: importData, - } -} - -func importData(tsid int32, label *kvalobs.Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) { - file, err := os.Open(filename) - if err != nil { - slog.Error(logStr + err.Error()) - return 0, err - } - defer file.Close() - - scanner := bufio.NewScanner(file) - - // Parse number of rows - scanner.Scan() - rowCount, _ := strconv.Atoi(scanner.Text()) - - // Skip header - scanner.Scan() - - if label.IsSpecialCloudType() { - text, err := parseSpecialCloudType(tsid, rowCount, scanner) - if err != nil { - slog.Error(logStr + err.Error()) - return 0, err - } - - count, err := lard.InsertTextData(text, pool, logStr) - if err != nil { - slog.Error(logStr + err.Error()) - return 0, err - } - - return count, nil - } - - data, flags, err := parseDataCSV(tsid, rowCount, scanner) - count, err := lard.InsertData(data, pool, logStr) - if err != nil { - slog.Error(logStr + err.Error()) - return 0, err - } - - if err := lard.InsertFlags(flags, pool, logStr); err != nil { - slog.Error(logStr + err.Error()) - return 0, err - } - - return count, nil -} +// Here we define how to parse dumped CSV depending on the table func parseDataCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, [][]any, error) { data := make([][]any, 0, rowCount) @@ -100,10 +39,10 @@ func parseDataCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, [] corrected := float32(corrected64) // Filter out special values that in Kvalobs stand for null observations - if !slices.Contains(kvalobs.NULL_VALUES, original) { + if !slices.Contains(NULL_VALUES, original) { originalPtr = &original } - if !slices.Contains(kvalobs.NULL_VALUES, corrected) { + if !slices.Contains(NULL_VALUES, corrected) { correctedPtr = &corrected } @@ -124,8 +63,8 @@ func parseDataCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, [] Obstime: obstime, Original: originalPtr, Corrected: correctedPtr, - Controlinfo: &fields[4], // Never null, has default values in KValobs - Useinfo: &fields[5], // Never null, has default values in KValobs + Controlinfo: &fields[4], // Never null, has default value in Kvalobs + Useinfo: &fields[5], // Never null, has default value in Kvalobs Cfailed: cfailed, } @@ -136,6 +75,65 @@ func parseDataCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, [] return data, flags, nil } +// Text obs are not flagged +func parseTextCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { + data := make([][]any, 0, rowCount) + for scanner.Scan() { + // obstime, original, tbtime + fields := strings.Split(scanner.Text(), ",") + + obstime, err := time.Parse(time.RFC3339, fields[0]) + if err != nil { + return nil, err + } + + lardObs := lard.TextObs{ + Id: tsid, + Obstime: obstime, + Text: &fields[1], + } + + data = append(data, lardObs.ToRow()) + } + + return data, nil +} + +// Function for paramids 2751, 2752, 2753, 2754 that were stored as text data +// but should instead be treated as scalars +// TODO: I'm not sure these params should be scalars given that the other cloud types are not. +// Should all cloud types be integers? +func parseMetarCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { + data := make([][]any, 0, rowCount) + for scanner.Scan() { + // obstime, original, tbtime + fields := strings.Split(scanner.Text(), ",") + + obstime, err := time.Parse(time.RFC3339, fields[0]) + if err != nil { + return nil, err + } + + val, err := strconv.ParseFloat(fields[1], 32) + if err != nil { + return nil, err + } + + original := float32(val) + lardObs := lard.DataObs{ + Id: tsid, + Obstime: obstime, + Data: &original, + } + + data = append(data, lardObs.ToRow()) + } + + // TODO: Original text obs were not flagged, so we don't return a flags? + // Or should we return default values? + return data, nil +} + // Function for paramids 305, 306, 307, 308 that were stored as scalar data // but should be treated as text func parseSpecialCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { diff --git a/migrations/kvalobs/db/import_functions.go b/migrations/kvalobs/db/import_functions.go new file mode 100644 index 00000000..09f28509 --- /dev/null +++ b/migrations/kvalobs/db/import_functions.go @@ -0,0 +1,116 @@ +package db + +import ( + "bufio" + "log/slog" + "migrate/lard" + "os" + "strconv" + + "github.com/jackc/pgx/v5/pgxpool" +) + +// Here are implemented the `ImportFunc` stored inside the Table struct + +// NOTE: +// - for both kvalobs and histkvalobs: +// - all stinfo non-scalar params that can be found in Kvalobs are stored in `text_data` +// - 305, 306, 307, 308 are also in `data` but should be treated as `text_data` +// => should always use readDataCSV and lard.InsertData for these +// - only for histkvalobs +// - 2751, 2752, 2753, 2754 are in `text_data` but should be treated as `data`? + +func importData(tsid int32, label *Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) { + file, err := os.Open(filename) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + + // Parse number of rows + scanner.Scan() + rowCount, _ := strconv.Atoi(scanner.Text()) + + // Skip header + scanner.Scan() + + if label.IsSpecialCloudType() { + text, err := parseSpecialCloudType(tsid, rowCount, scanner) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + count, err := lard.InsertTextData(text, pool, logStr) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + return count, nil + } + + data, flags, err := parseDataCSV(tsid, rowCount, scanner) + count, err := lard.InsertData(data, pool, logStr) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + if err := lard.InsertFlags(flags, pool, logStr); err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + return count, nil +} + +func importText(tsid int32, label *Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) { + file, err := os.Open(filename) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + + // Parse number of rows + scanner.Scan() + rowCount, _ := strconv.Atoi(scanner.Text()) + + // Skip header + scanner.Scan() + + if label.IsMetarCloudType() { + data, err := parseMetarCloudType(tsid, rowCount, scanner) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + count, err := lard.InsertData(data, pool, logStr) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + return count, nil + } + + text, err := parseTextCSV(tsid, rowCount, scanner) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + count, err := lard.InsertTextData(text, pool, logStr) + if err != nil { + slog.Error(logStr + err.Error()) + return 0, err + } + + return count, nil +} diff --git a/migrations/kvalobs/db/labels.go b/migrations/kvalobs/db/label.go similarity index 100% rename from migrations/kvalobs/db/labels.go rename to migrations/kvalobs/db/label.go diff --git a/migrations/kvalobs/db/label_dump_functions.go b/migrations/kvalobs/db/label_dump_functions.go new file mode 100644 index 00000000..34827ea6 --- /dev/null +++ b/migrations/kvalobs/db/label_dump_functions.go @@ -0,0 +1,63 @@ +package db + +import ( + "context" + "log/slog" + "migrate/utils" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) + +// Here are implemented the `LabelDumpFunc` stored inside the Table struct + +func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, error) { + query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level + FROM data + WHERE ($1::timestamp IS NULL OR obstime >= $1) + AND ($2::timestamp IS NULL OR obstime < $2) + ORDER BY stationid` + + slog.Info("Querying data labels...") + rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + slog.Info("Collecting data labels...") + labels := make([]*Label, 0, rows.CommandTag().RowsAffected()) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByName[Label]) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return labels, nil +} + +func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, error) { + // NOTE: `param` table is empty in histkvalobs + query := `SELECT DISTINCT stationid, typeid, paramid, NULL AS sensor, NULL AS level + FROM text_data + WHERE ($1::timestamp IS NULL OR obstime >= $1) + AND ($2::timestamp IS NULL OR obstime < $2) + ORDER BY stationid` + + slog.Info("Querying text labels...") + rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + slog.Info("Collecting text labels...") + labels := make([]*Label, 0, rows.CommandTag().RowsAffected()) + labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByName[Label]) + if err != nil { + slog.Error(err.Error()) + return nil, err + } + + return labels, nil +} diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go index 50d42701..2383245c 100644 --- a/migrations/kvalobs/db/main.go +++ b/migrations/kvalobs/db/main.go @@ -135,13 +135,19 @@ type TextObs struct { // Basic Metadata for a Kvalobs database type DB struct { Name string - Path string ConnEnvVar string + Tables map[string]*Table } // Returns two `DB` structs with metadata for the prod and hist databases -func InitDBs() (DB, DB) { - kvalobs := DB{Name: "kvalobs", ConnEnvVar: "KVALOBS_CONN_STRING"} - histkvalobs := DB{Name: "histkvalobs", ConnEnvVar: "HISTKVALOBS_CONN_STRING"} - return kvalobs, histkvalobs +func InitDBs() map[string]DB { + tables := map[string]*Table{ + "data": {Name: "data", DumpLabels: dumpDataLabels, DumpSeries: dumpDataSeries, Import: importData}, + "text_data": {Name: "text_data", DumpLabels: dumpTextLabels, DumpSeries: dumpTextSeries, Import: importText}, + } + + return map[string]DB{ + "kvalobs": {Name: "kvalobs", ConnEnvVar: "KVALOBS_CONN_STRING", Tables: tables}, + "histkvalobs": {Name: "histkvalobs", ConnEnvVar: "HISTKVALOBS_CONN_STRING", Tables: tables}, + } } diff --git a/migrations/kvalobs/db/series_dump_functions.go b/migrations/kvalobs/db/series_dump_functions.go new file mode 100644 index 00000000..6f886ba3 --- /dev/null +++ b/migrations/kvalobs/db/series_dump_functions.go @@ -0,0 +1,107 @@ +package db + +import ( + "context" + "fmt" + "log/slog" + "migrate/utils" + "os" + "path/filepath" + + "github.com/gocarina/gocsv" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) + +// Here are implemented the `ObsDumpFunc` stored inside the Table struct + +func dumpDataSeries(label *Label, timespan *utils.TimeSpan, path string, pool *pgxpool.Pool) error { + // NOTE: sensor and level could be NULL, but in reality they have default values + query := `SELECT obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed + FROM data + WHERE stationid = $1 + AND typeid = $2 + AND paramid = $3 + AND sensor = $4 + AND level = $5 + AND ($6::timestamp IS NULL OR obstime >= $6) + AND ($7::timestamp IS NULL OR obstime < $7) + ORDER BY obstime` + + // Convert to string because `sensor` in Kvalobs is a BPCHAR(1) + var sensor *string + if label.Sensor != nil { + sensorval := fmt.Sprint(*label.Sensor) + sensor = &sensorval + } + + rows, err := pool.Query( + context.TODO(), + query, + label.StationID, + label.TypeID, + label.ParamID, + sensor, + label.Level, + timespan.From, + timespan.To, + ) + if err != nil { + return err + } + + data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[DataObs]) + if err != nil { + return err + } + + return writeSeriesCSV(data, path, label) +} + +func dumpTextSeries(label *Label, timespan *utils.TimeSpan, path string, pool *pgxpool.Pool) error { + query := `SELECT obstime, original, tbtime FROM text_data + WHERE stationid = $1 + AND typeid = $2 + AND paramid = $3 + AND ($4::timestamp IS NULL OR obstime >= $4) + AND ($5::timestamp IS NULL OR obstime < $5) + ORDER BY obstime` + + rows, err := pool.Query( + context.TODO(), + query, + label.StationID, + label.TypeID, + label.ParamID, + timespan.From, + timespan.To, + ) + if err != nil { + return err + } + + data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[TextObs]) + if err != nil { + return err + } + + return writeSeriesCSV(data, path, label) +} + +func writeSeriesCSV[S DataSeries | TextSeries](series S, path string, label *Label) error { + filename := filepath.Join(path, label.ToFilename()) + file, err := os.Create(filename) + if err != nil { + slog.Error(err.Error()) + return err + } + + // Write number of lines on first line, keep headers on 2nd line + file.Write([]byte(fmt.Sprintf("%v\n", len(series)))) + if err = gocsv.Marshal(series, file); err != nil { + slog.Error(err.Error()) + return err + } + + return nil +} diff --git a/migrations/kvalobs/db/table.go b/migrations/kvalobs/db/table.go index 959532a2..a9552e31 100644 --- a/migrations/kvalobs/db/table.go +++ b/migrations/kvalobs/db/table.go @@ -8,6 +8,7 @@ import ( // Maps to `data` and `text_data` tables in Kvalobs type Table struct { + Name string Path string // Path of the dumped table DumpLabels LabelDumpFunc // Function that dumps labels from the table DumpSeries ObsDumpFunc // Function that dumps observations from the table @@ -22,6 +23,3 @@ type ObsDumpFunc func(label *Label, timespan *utils.TimeSpan, path string, pool // Lard Import function type ImportFunc func(tsid int32, label *Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) - -// How to read dumped CSV, returns one array for observations and one for flags -type ReadCSVFunc func(tsid int32, label *Label, filename string) ([][]any, [][]any, error) diff --git a/migrations/kvalobs/dump/data.go b/migrations/kvalobs/dump/data.go deleted file mode 100644 index 9e97aca1..00000000 --- a/migrations/kvalobs/dump/data.go +++ /dev/null @@ -1,91 +0,0 @@ -package dump - -import ( - "context" - "fmt" - "log/slog" - "path/filepath" - - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" - - kvalobs "migrate/kvalobs/db" - "migrate/utils" -) - -// Returns a DataTable for dump -func DataTable(path string) kvalobs.Table { - return kvalobs.Table{ - Path: filepath.Join(path, kvalobs.DATA_TABLE_NAME), - DumpLabels: dumpDataLabels, - DumpSeries: dumpDataSeries, - } -} - -func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*kvalobs.Label, error) { - query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level - FROM data - WHERE ($1::timestamp IS NULL OR obstime >= $1) - AND ($2::timestamp IS NULL OR obstime < $2) - ORDER BY stationid` - - slog.Info("Querying data labels...") - rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - slog.Info("Collecting data labels...") - labels := make([]*kvalobs.Label, 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByName[kvalobs.Label]) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - return labels, nil -} - -func dumpDataSeries(label *kvalobs.Label, timespan *utils.TimeSpan, path string, pool *pgxpool.Pool) error { - // NOTE: sensor and level could be NULL, but in reality they have default values - query := `SELECT obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed - FROM data - WHERE stationid = $1 - AND typeid = $2 - AND paramid = $3 - AND sensor = $4 - AND level = $5 - AND ($6::timestamp IS NULL OR obstime >= $6) - AND ($7::timestamp IS NULL OR obstime < $7) - ORDER BY obstime` - - // Convert to string because `sensor` in Kvalobs is a BPCHAR(1) - var sensor *string - if label.Sensor != nil { - sensorval := fmt.Sprint(*label.Sensor) - sensor = &sensorval - } - - rows, err := pool.Query( - context.TODO(), - query, - label.StationID, - label.TypeID, - label.ParamID, - sensor, - label.Level, - timespan.From, - timespan.To, - ) - if err != nil { - return err - } - - data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[kvalobs.DataObs]) - if err != nil { - return err - } - - return writeSeriesCSV(data, path, label) -} diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 252fab41..d1084573 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -9,32 +9,13 @@ import ( "strings" "sync" - "github.com/gocarina/gocsv" "github.com/jackc/pgx/v5/pgxpool" kvalobs "migrate/kvalobs/db" "migrate/utils" ) -func writeSeriesCSV[S kvalobs.DataSeries | kvalobs.TextSeries](series S, path string, label *kvalobs.Label) error { - filename := filepath.Join(path, label.ToFilename()) - file, err := os.Create(filename) - if err != nil { - slog.Error(err.Error()) - return err - } - - // Write number of lines on first line, keep headers on 2nd line - file.Write([]byte(fmt.Sprintf("%v\n", len(series)))) - if err = gocsv.Marshal(series, file); err != nil { - slog.Error(err.Error()) - return err - } - - return nil -} - -func getLabels(table kvalobs.Table, pool *pgxpool.Pool, config *Config) (labels []*kvalobs.Label, err error) { +func getLabels(table *kvalobs.Table, pool *pgxpool.Pool, config *Config) (labels []*kvalobs.Label, err error) { labelFile := table.Path + "_labels.csv" if _, err := os.Stat(labelFile); err != nil || config.UpdateLabels { @@ -57,7 +38,7 @@ func getStationLabelMap(labels []*kvalobs.Label) map[int32][]*kvalobs.Label { return labelmap } -func dumpTable(table kvalobs.Table, pool *pgxpool.Pool, config *Config) { +func dumpTable(table *kvalobs.Table, pool *pgxpool.Pool, config *Config) { if !config.LabelsOnly { utils.SetLogFile(table.Path, "dump") } @@ -136,13 +117,12 @@ func dumpDB(database kvalobs.DB, config *Config) { return } - if utils.IsEmptyOrEqual(config.Table, kvalobs.DATA_TABLE_NAME) { - table := DataTable(path) - dumpTable(table, pool, config) - } + for name, table := range database.Tables { + if !utils.IsEmptyOrEqual(config.Table, name) { + continue + } - if utils.IsEmptyOrEqual(config.Table, kvalobs.TEXT_TABLE_NAME) { - table := TextTable(path) + table.Path = filepath.Join(path, table.Name) dumpTable(table, pool, config) } } diff --git a/migrations/kvalobs/dump/main.go b/migrations/kvalobs/dump/main.go index 7d800063..bb7041ab 100644 --- a/migrations/kvalobs/dump/main.go +++ b/migrations/kvalobs/dump/main.go @@ -17,14 +17,11 @@ type Config struct { } func (config *Config) Execute() { - kvalobs, histkvalobs := db.InitDBs() - // tables := []*db.Table{} - - if utils.IsEmptyOrEqual(config.Database, kvalobs.Name) { - dumpDB(kvalobs, config) - } - - if utils.IsEmptyOrEqual(config.Database, histkvalobs.Name) { - dumpDB(histkvalobs, config) + dbs := db.InitDBs() + for name, db := range dbs { + if !utils.IsEmptyOrEqual(config.Database, name) { + continue + } + dumpDB(db, config) } } diff --git a/migrations/kvalobs/dump/text.go b/migrations/kvalobs/dump/text.go deleted file mode 100644 index c2bb49a2..00000000 --- a/migrations/kvalobs/dump/text.go +++ /dev/null @@ -1,78 +0,0 @@ -package dump - -import ( - "context" - "log/slog" - "path/filepath" - - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" - - "migrate/kvalobs/db" - "migrate/utils" -) - -// Returns a TextTable for dump -func TextTable(path string) db.Table { - return db.Table{ - Path: filepath.Join(path, db.TEXT_TABLE_NAME), - DumpLabels: dumpTextLabels, - DumpSeries: dumpTextSeries, - } -} - -func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*db.Label, error) { - // NOTE: `param` table is empty in histkvalobs - query := `SELECT DISTINCT stationid, typeid, paramid, NULL AS sensor, NULL AS level - FROM text_data - WHERE ($1::timestamp IS NULL OR obstime >= $1) - AND ($2::timestamp IS NULL OR obstime < $2) - ORDER BY stationid` - - slog.Info("Querying text labels...") - rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - slog.Info("Collecting text labels...") - labels := make([]*db.Label, 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByName[db.Label]) - if err != nil { - slog.Error(err.Error()) - return nil, err - } - - return labels, nil -} - -func dumpTextSeries(label *db.Label, timespan *utils.TimeSpan, path string, pool *pgxpool.Pool) error { - query := `SELECT obstime, original, tbtime FROM text_data - WHERE stationid = $1 - AND typeid = $2 - AND paramid = $3 - AND ($4::timestamp IS NULL OR obstime >= $4) - AND ($5::timestamp IS NULL OR obstime < $5) - ORDER BY obstime` - - rows, err := pool.Query( - context.TODO(), - query, - label.StationID, - label.TypeID, - label.ParamID, - timespan.From, - timespan.To, - ) - if err != nil { - return err - } - - data, err := pgx.CollectRows(rows, pgx.RowToAddrOfStructByName[db.TextObs]) - if err != nil { - return err - } - - return writeSeriesCSV(data, path, label) -} diff --git a/migrations/kvalobs/import/cache/main.go b/migrations/kvalobs/import/cache/main.go index cc3c4a7c..c5f4ebba 100644 --- a/migrations/kvalobs/import/cache/main.go +++ b/migrations/kvalobs/import/cache/main.go @@ -27,7 +27,6 @@ func New(kvalobs db.DB) *Cache { defer conn.Close(ctx) permits := stinfosys.NewPermitTables(conn) - // params := stinfosys.GetParamScalarMap(conn) // timeseries := timespans := cacheKvalobsTimeseriesTimespans(kvalobs) diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 4fa0d80e..701a9da4 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -17,7 +17,7 @@ import ( "migrate/utils" ) -func ImportTable(table kvalobs.Table, cache *cache.Cache, pool *pgxpool.Pool, config *Config) (int64, error) { +func ImportTable(table *kvalobs.Table, cache *cache.Cache, pool *pgxpool.Pool, config *Config) (int64, error) { fmt.Printf("Importing from %q...\n", table.Path) defer fmt.Println(strings.Repeat("- ", 40)) @@ -104,22 +104,16 @@ func ImportTable(table kvalobs.Table, cache *cache.Cache, pool *pgxpool.Pool, co return rowsInserted, nil } -// TODO: while importing we trust that kvalobs and stinfosys have the same -// non scalar parameters, which might not be the case func ImportDB(database kvalobs.DB, cache *cache.Cache, pool *pgxpool.Pool, config *Config) { path := filepath.Join(config.Path, database.Name) - if utils.IsEmptyOrEqual(config.Table, kvalobs.DATA_TABLE_NAME) { - table := DataTable(path) - utils.SetLogFile(table.Path, "import") - - ImportTable(table, cache, pool, config) - } + for name, table := range database.Tables { + if !utils.IsEmptyOrEqual(config.Table, name) { + continue + } - if utils.IsEmptyOrEqual(config.Table, kvalobs.TEXT_TABLE_NAME) { - table := TextTable(path) + table.Path = filepath.Join(path, table.Name) utils.SetLogFile(table.Path, "import") - ImportTable(table, cache, pool, config) } } diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go index b3a4bf07..85d9100d 100644 --- a/migrations/kvalobs/import/main.go +++ b/migrations/kvalobs/import/main.go @@ -14,23 +14,15 @@ import ( "migrate/utils" ) -// NOTE: -// - for both kvalobs and histkvalobs: -// - all stinfo non-scalar params that can be found in Kvalobs are stored in `text_data` -// - 305, 306, 307, 308 are also in `data` but should be treated as `text_data` -// => should always use readDataCSV and lard.InsertData for these -// - only for histkvalobs -// - 2751, 2752, 2753, 2754 are in `text_data` but should be treated as `data`? -// => These are more complicated, but probably we should - type Config struct { kvalobs.BaseConfig Reindex bool `help:"Drop PG indices before insertion. Might improve performance"` } func (config *Config) Execute() error { - prod, hist := kvalobs.InitDBs() - cache := cache.New(prod) + dbs := kvalobs.InitDBs() + // Only cache from kvalobs? + cache := cache.New(dbs["kvalobs"]) pool, err := pgxpool.New(context.Background(), os.Getenv(lard.LARD_ENV_VAR)) if err != nil { @@ -54,12 +46,12 @@ func (config *Config) Execute() error { } }() - if utils.IsEmptyOrEqual(config.Database, prod.Name) { - ImportDB(prod, cache, pool, config) - } + for name, db := range dbs { + if !utils.IsEmptyOrEqual(config.Database, name) { + continue + } + ImportDB(db, cache, pool, config) - if utils.IsEmptyOrEqual(config.Database, hist.Name) { - ImportDB(hist, cache, pool, config) } return nil diff --git a/migrations/kvalobs/import/text.go b/migrations/kvalobs/import/text.go deleted file mode 100644 index 75312db2..00000000 --- a/migrations/kvalobs/import/text.go +++ /dev/null @@ -1,130 +0,0 @@ -package port - -import ( - "bufio" - "log/slog" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - kvalobs "migrate/kvalobs/db" - "migrate/lard" - - "github.com/jackc/pgx/v5/pgxpool" -) - -// Returns a TextTable for import -func TextTable(path string) kvalobs.Table { - return kvalobs.Table{ - Path: filepath.Join(path, kvalobs.TEXT_TABLE_NAME), - Import: importText, - } -} - -func importText(tsid int32, label *kvalobs.Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) { - file, err := os.Open(filename) - if err != nil { - slog.Error(logStr + err.Error()) - return 0, err - } - defer file.Close() - - scanner := bufio.NewScanner(file) - - // Parse number of rows - scanner.Scan() - rowCount, _ := strconv.Atoi(scanner.Text()) - - // Skip header - scanner.Scan() - - if label.IsMetarCloudType() { - data, err := parseMetarCloudType(tsid, rowCount, scanner) - if err != nil { - slog.Error(logStr + err.Error()) - return 0, err - } - count, err := lard.InsertData(data, pool, logStr) - if err != nil { - slog.Error(logStr + err.Error()) - return 0, err - } - - return count, nil - } - - text, err := parseTextCSV(tsid, rowCount, scanner) - if err != nil { - slog.Error(logStr + err.Error()) - return 0, err - } - - count, err := lard.InsertTextData(text, pool, logStr) - if err != nil { - slog.Error(logStr + err.Error()) - return 0, err - } - - return count, nil -} - -// Text obs are not flagged -func parseTextCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { - data := make([][]any, 0, rowCount) - for scanner.Scan() { - // obstime, original, tbtime - fields := strings.Split(scanner.Text(), ",") - - obstime, err := time.Parse(time.RFC3339, fields[0]) - if err != nil { - return nil, err - } - - lardObs := lard.TextObs{ - Id: tsid, - Obstime: obstime, - Text: &fields[1], - } - - data = append(data, lardObs.ToRow()) - } - - return data, nil -} - -// Function for paramids 2751, 2752, 2753, 2754 that were stored as text data -// but should instead be treated as scalars -// TODO: I'm not sure these params should be scalars given that the other cloud types are not. -// Should all cloud types be integers? -func parseMetarCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { - data := make([][]any, 0, rowCount) - for scanner.Scan() { - // obstime, original, tbtime - fields := strings.Split(scanner.Text(), ",") - - obstime, err := time.Parse(time.RFC3339, fields[0]) - if err != nil { - return nil, err - } - - val, err := strconv.ParseFloat(fields[1], 32) - if err != nil { - return nil, err - } - - original := float32(val) - lardObs := lard.DataObs{ - Id: tsid, - Obstime: obstime, - Data: &original, - } - - data = append(data, lardObs.ToRow()) - } - - // TODO: Original text obs were not flagged, so we don't return a flags? - // Or should we return default values? - return data, nil -} diff --git a/migrations/tests/files/kvalobs/text/18700/18700_1000_316__.csv b/migrations/tests/files/kvalobs/text_data/18700/18700_1000_316__.csv similarity index 100% rename from migrations/tests/files/kvalobs/text/18700/18700_1000_316__.csv rename to migrations/tests/files/kvalobs/text_data/18700/18700_1000_316__.csv diff --git a/migrations/tests/kvalobs_test.go b/migrations/tests/kvalobs_test.go index ae923502..67911859 100644 --- a/migrations/tests/kvalobs_test.go +++ b/migrations/tests/kvalobs_test.go @@ -20,8 +20,8 @@ const LARD_STRING string = "host=localhost user=postgres dbname=postgres passwor const DUMPS_PATH string = "./files" type KvalobsTestCase struct { - db kvalobs.DB - table kvalobs.Table + db string + table string station int32 paramid int32 typeid int32 @@ -59,22 +59,20 @@ func TestImportDataKvalobs(t *testing.T) { } defer pool.Close() - prod, hist := kvalobs.InitDBs() - prod.Path = filepath.Join(DUMPS_PATH, prod.Name) - hist.Path = filepath.Join(DUMPS_PATH, hist.Name) + dbs := kvalobs.InitDBs() cases := []KvalobsTestCase{ { - db: hist, - table: port.DataTable(hist.Path), + db: "histkvalobs", + table: "data", station: 18700, paramid: 313, permit: 1, expectedRows: 39, }, { - db: prod, - table: port.TextTable(prod.Path), + db: "kvalobs", + table: "text_data", station: 18700, permit: 1, expectedRows: 182, @@ -83,7 +81,12 @@ func TestImportDataKvalobs(t *testing.T) { for _, c := range cases { config, cache := c.mockConfig() - insertedRows, err := port.ImportTable(c.table, cache, pool, config) + db := dbs[c.db] + + table := db.Tables[c.table] + table.Path = filepath.Join(DUMPS_PATH, db.Name, table.Name) + + insertedRows, err := port.ImportTable(table, cache, pool, config) switch { case err != nil: From a734882c1a3a7febb65e8791a71f851735d91822 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Mon, 9 Dec 2024 12:25:57 +0100 Subject: [PATCH 60/67] Add psql command --- justfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/justfile b/justfile index 7b969f0a..f8889de0 100644 --- a/justfile +++ b/justfile @@ -22,6 +22,10 @@ debug_test TEST: setup debug_migrations: setup -@ cd migrations && go test -v ./... +# psql into the container database +psql: + @docker exec -it lard_tests psql -U postgres + setup: @ echo "Starting Postgres docker container..." docker run --name lard_tests -e POSTGRES_PASSWORD=postgres -p 5432:5432 -d postgres From 0e1fa4eddfc3e3d016b893b9f2f2b2ade7105dc2 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Tue, 10 Dec 2024 17:11:08 +0100 Subject: [PATCH 61/67] Improve label dump (wip) --- db/partitions_generated.sql | 4 + migrations/kvalobs/db/label.go | 2 + migrations/kvalobs/db/label_dump_functions.go | 159 ++++++++++++++---- migrations/kvalobs/dump/dump.go | 14 +- migrations/kvalobs/import/main.go | 4 +- migrations/tests/kdvh_test.go | 2 +- migrations/utils/time.go | 18 +- 7 files changed, 162 insertions(+), 41 deletions(-) diff --git a/db/partitions_generated.sql b/db/partitions_generated.sql index 070a914d..73aebbb4 100644 --- a/db/partitions_generated.sql +++ b/db/partitions_generated.sql @@ -1,4 +1,6 @@ -- Generated by simple script for testing +CREATE TABLE IF NOT EXISTS data_y1850_to_y1950 PARTITION OF public.data +FOR VALUES FROM ('1850-01-01 00:00:00+00') TO ('1950-01-01 00:00:00+00'); CREATE TABLE IF NOT EXISTS data_y1950_to_y2000 PARTITION OF public.data FOR VALUES FROM ('1950-01-01 00:00:00+00') TO ('2000-01-01 00:00:00+00'); CREATE TABLE IF NOT EXISTS data_y2000_to_y2010 PARTITION OF public.data @@ -35,6 +37,8 @@ CREATE TABLE IF NOT EXISTS data_y2028_to_y2029 PARTITION OF public.data FOR VALUES FROM ('2028-01-01 00:00:00+00') TO ('2029-01-01 00:00:00+00'); CREATE TABLE IF NOT EXISTS data_y2029_to_y2030 PARTITION OF public.data FOR VALUES FROM ('2029-01-01 00:00:00+00') TO ('2030-01-01 00:00:00+00'); +CREATE TABLE IF NOT EXISTS nonscalar_data_y1850_to_y1950 PARTITION OF public.nonscalar_data +FOR VALUES FROM ('1850-01-01 00:00:00+00') TO ('1950-01-01 00:00:00+00'); CREATE TABLE IF NOT EXISTS nonscalar_data_y1950_to_y2000 PARTITION OF public.nonscalar_data FOR VALUES FROM ('1950-01-01 00:00:00+00') TO ('2000-01-01 00:00:00+00'); CREATE TABLE IF NOT EXISTS nonscalar_data_y2000_to_y2010 PARTITION OF public.nonscalar_data diff --git a/migrations/kvalobs/db/label.go b/migrations/kvalobs/db/label.go index 0ddedddb..0db0173c 100644 --- a/migrations/kvalobs/db/label.go +++ b/migrations/kvalobs/db/label.go @@ -92,6 +92,8 @@ func WriteLabelCSV(path string, labels []*Label) error { err = gocsv.Marshal(labels, file) if err != nil { slog.Error(err.Error()) + } else { + slog.Info(fmt.Sprintf("Dumped %d labels!", len(labels))) } return err } diff --git a/migrations/kvalobs/db/label_dump_functions.go b/migrations/kvalobs/db/label_dump_functions.go index 34827ea6..94b31a5c 100644 --- a/migrations/kvalobs/db/label_dump_functions.go +++ b/migrations/kvalobs/db/label_dump_functions.go @@ -4,6 +4,8 @@ import ( "context" "log/slog" "migrate/utils" + "slices" + "sync" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgxpool" @@ -11,53 +13,152 @@ import ( // Here are implemented the `LabelDumpFunc` stored inside the Table struct -func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, error) { - query := `SELECT DISTINCT stationid, typeid, paramid, sensor::int, level - FROM data - WHERE ($1::timestamp IS NULL OR obstime >= $1) - AND ($2::timestamp IS NULL OR obstime < $2) - ORDER BY stationid` - - slog.Info("Querying data labels...") - rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) +const OBSERVATIONS_QUERY string = `SELECT DISTINCT stationid, typeid FROM observations +WHERE ($1::timestamp IS NULL OR obstime >= $1) + AND ($2::timestamp IS NULL OR obstime < $2) +ORDER BY stationid` + +const OBSDATA_QUERY string = `SELECT DISTINCT paramid, sensor::int, level FROM obsdata +JOIN observations USING(observationid) +WHERE stationid = $1 + AND typeid = $2 + AND ($3::timestamp IS NULL OR obstime >= $3) + AND ($4::timestamp IS NULL OR obstime < $4)` + +const OBSTEXTDATA_QUERY string = `SELECT DISTINCT paramid FROM obstextdata +JOIN observations USING(observationid) +WHERE stationid = $1 + AND typeid = $2 + AND ($3::timestamp IS NULL OR obstime >= $3) + AND ($4::timestamp IS NULL OR obstime < $4)` + +type StationType struct { + stationid int32 + typeid int32 +} + +func GetUniqueStationsAndTypeIds(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*StationType, error) { + rows, err := pool.Query(context.TODO(), OBSERVATIONS_QUERY, timespan.From, timespan.To) if err != nil { - slog.Error(err.Error()) return nil, err } - slog.Info("Collecting data labels...") - labels := make([]*Label, 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByName[Label]) + labels := make([]*StationType, 0, rows.CommandTag().RowsAffected()) + return pgx.AppendRows(labels, rows, func(row pgx.CollectableRow) (*StationType, error) { + var label StationType + err := row.Scan(&label.stationid, &label.typeid) + return &label, err + }) +} + +func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, error) { + // First query stationid and typeid from observations + // Then query paramid, sensor, level from obsdata + // This is faster than querying all of them together from data + + slog.Info("Querying data labels labels...") + // TODO: this should be done outside (can be reused for data and text) + stationsAndTypes, err := GetUniqueStationsAndTypeIds(timespan, pool) if err != nil { slog.Error(err.Error()) return nil, err } + bar := utils.NewBar(len(stationsAndTypes), "Data labels") + var labels []*Label + var wg sync.WaitGroup + + // TODO: can we move this somewhere else? + semaphore := make(chan struct{}, 4) + + for _, s := range stationsAndTypes { + wg.Add(1) + semaphore <- struct{}{} + + go func() { + defer func() { + bar.Add(1) + wg.Done() + <-semaphore + }() + + rows, err := pool.Query(context.TODO(), OBSDATA_QUERY, s.stationid, s.typeid, timespan.From, timespan.To) + if err != nil { + slog.Error(err.Error()) + return + } + + innerLabels := make([]*Label, 0, rows.CommandTag().RowsAffected()) + innerLabels, err = pgx.AppendRows(innerLabels, rows, func(row pgx.CollectableRow) (*Label, error) { + label := Label{StationID: s.stationid, TypeID: s.typeid} + err := row.Scan(&label.ParamID, &label.Sensor, &label.Level) + return &label, err + }) + + if err != nil { + slog.Error(err.Error()) + return + } + + labels = slices.Concat(labels, innerLabels) + }() + } + + wg.Wait() + return labels, nil } func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, error) { - // NOTE: `param` table is empty in histkvalobs - query := `SELECT DISTINCT stationid, typeid, paramid, NULL AS sensor, NULL AS level - FROM text_data - WHERE ($1::timestamp IS NULL OR obstime >= $1) - AND ($2::timestamp IS NULL OR obstime < $2) - ORDER BY stationid` - - slog.Info("Querying text labels...") - rows, err := pool.Query(context.TODO(), query, timespan.From, timespan.To) - if err != nil { - slog.Error(err.Error()) - return nil, err - } + // First query stationid and typeid from observations + // Then query paramid, sensor, level from obsdata + // This is faster than querying all of them together from data - slog.Info("Collecting text labels...") - labels := make([]*Label, 0, rows.CommandTag().RowsAffected()) - labels, err = pgx.AppendRows(labels, rows, pgx.RowToAddrOfStructByName[Label]) + slog.Info("Querying data labels labels...") + // TODO: this should be done outside (can be reused for data and text) + stationsAndTypes, err := GetUniqueStationsAndTypeIds(timespan, pool) if err != nil { slog.Error(err.Error()) return nil, err } + bar := utils.NewBar(len(stationsAndTypes), "Text labels") + var labels []*Label + var wg sync.WaitGroup + + // TODO: can we move this somewhere else? + semaphore := make(chan struct{}, 4) + for _, s := range stationsAndTypes { + wg.Add(1) + semaphore <- struct{}{} + + go func() { + defer func() { + bar.Add(1) + wg.Done() + <-semaphore + }() + + rows, err := pool.Query(context.TODO(), OBSTEXTDATA_QUERY, s.stationid, s.typeid, timespan.From, timespan.To) + if err != nil { + slog.Error(err.Error()) + return + } + + innerLabels := make([]*Label, 0, rows.CommandTag().RowsAffected()) + innerLabels, err = pgx.AppendRows(innerLabels, rows, func(row pgx.CollectableRow) (*Label, error) { + label := Label{StationID: s.stationid, TypeID: s.typeid} + err := row.Scan(&label.ParamID) + return &label, err + }) + + if err != nil { + slog.Error(err.Error()) + return + } + labels = slices.Concat(labels, innerLabels) + }() + } + wg.Wait() return labels, nil } diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index d1084573..924dcd3c 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -15,11 +15,11 @@ import ( "migrate/utils" ) -func getLabels(table *kvalobs.Table, pool *pgxpool.Pool, config *Config) (labels []*kvalobs.Label, err error) { - labelFile := table.Path + "_labels.csv" +func getLabels(table *kvalobs.Table, pool *pgxpool.Pool, timespan *utils.TimeSpan, updateLabels bool) (labels []*kvalobs.Label, err error) { + labelFile := fmt.Sprintf("%s_labels_%s.csv", table.Path, timespan.ToString()) - if _, err := os.Stat(labelFile); err != nil || config.UpdateLabels { - labels, err = table.DumpLabels(config.TimeSpan(), pool) + if _, err := os.Stat(labelFile); err != nil || updateLabels { + labels, err = table.DumpLabels(timespan, pool) if err != nil { return nil, err } @@ -45,13 +45,13 @@ func dumpTable(table *kvalobs.Table, pool *pgxpool.Pool, config *Config) { fmt.Printf("Dumping to %q...\n", table.Path) defer fmt.Println(strings.Repeat("- ", 40)) - labels, err := getLabels(table, pool, config) + timespan := config.TimeSpan() + labels, err := getLabels(table, pool, timespan, config.UpdateLabels) if err != nil || config.LabelsOnly { return } stationMap := getStationLabelMap(labels) - timespan := config.TimeSpan() // Used to limit connections to the database semaphore := make(chan struct{}, config.MaxConn) @@ -117,6 +117,8 @@ func dumpDB(database kvalobs.DB, config *Config) { return } + // TODO: dump labels first for both tables and then pass them to dumpTable + // or have only choices of dumping labels for name, table := range database.Tables { if !utils.IsEmptyOrEqual(config.Table, name) { continue diff --git a/migrations/kvalobs/import/main.go b/migrations/kvalobs/import/main.go index 85d9100d..8b0ea3c3 100644 --- a/migrations/kvalobs/import/main.go +++ b/migrations/kvalobs/import/main.go @@ -21,8 +21,8 @@ type Config struct { func (config *Config) Execute() error { dbs := kvalobs.InitDBs() - // Only cache from kvalobs? - cache := cache.New(dbs["kvalobs"]) + // Only cache from histkvalobs? + cache := cache.New(dbs["histkvalobs"]) pool, err := pgxpool.New(context.Background(), os.Getenv(lard.LARD_ENV_VAR)) if err != nil { diff --git a/migrations/tests/kdvh_test.go b/migrations/tests/kdvh_test.go index 6e1e183f..98366021 100644 --- a/migrations/tests/kdvh_test.go +++ b/migrations/tests/kdvh_test.go @@ -63,7 +63,7 @@ func TestImportKDVH(t *testing.T) { kdvh := db.Init() - // TODO: test does not fail, if flags are not inserted + // TODO: test does not fail if flags are not inserted // TODO: bar does not work well with log print outs for _, c := range testCases { config, cache := c.mockConfig() diff --git a/migrations/utils/time.go b/migrations/utils/time.go index 7efa82e0..caa674d1 100644 --- a/migrations/utils/time.go +++ b/migrations/utils/time.go @@ -18,9 +18,9 @@ func (ts *Timestamp) UnmarshalText(b []byte) error { return nil } -func (ts *Timestamp) Format(layout string) string { - return ts.t.Format(layout) -} +// func (ts *Timestamp) Format(layout string) string { +// return ts.t.Format(layout) +// } func (ts *Timestamp) Inner() *time.Time { if ts == nil { @@ -34,3 +34,15 @@ type TimeSpan struct { From *time.Time To *time.Time } + +func (t *TimeSpan) ToString() string { + from := "from" + to := "to" + if t.From != nil { + from += t.From.Format(time.DateOnly) + } + if t.To != nil { + to += t.To.Format(time.DateOnly) + } + return from + "_" + to +} From 1445cbf0b01a2500a732cf3d21c4e33d3eab077a Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 11 Dec 2024 12:34:27 +0100 Subject: [PATCH 62/67] Improve label dumps (final) --- migrations/kvalobs/db/label_dump_functions.go | 67 ++++++++++--------- migrations/kvalobs/db/table.go | 2 +- migrations/kvalobs/dump/dump.go | 10 ++- migrations/kvalobs/import/import.go | 1 + 4 files changed, 41 insertions(+), 39 deletions(-) diff --git a/migrations/kvalobs/db/label_dump_functions.go b/migrations/kvalobs/db/label_dump_functions.go index 94b31a5c..c5edba9c 100644 --- a/migrations/kvalobs/db/label_dump_functions.go +++ b/migrations/kvalobs/db/label_dump_functions.go @@ -13,11 +13,6 @@ import ( // Here are implemented the `LabelDumpFunc` stored inside the Table struct -const OBSERVATIONS_QUERY string = `SELECT DISTINCT stationid, typeid FROM observations -WHERE ($1::timestamp IS NULL OR obstime >= $1) - AND ($2::timestamp IS NULL OR obstime < $2) -ORDER BY stationid` - const OBSDATA_QUERY string = `SELECT DISTINCT paramid, sensor::int, level FROM obsdata JOIN observations USING(observationid) WHERE stationid = $1 @@ -37,41 +32,53 @@ type StationType struct { typeid int32 } -func GetUniqueStationsAndTypeIds(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*StationType, error) { - rows, err := pool.Query(context.TODO(), OBSERVATIONS_QUERY, timespan.From, timespan.To) +// Lazily initialized slice of distinct stationids and typeids from the `observations` table +var UNIQUE_STATIONS_TYPES []*StationType = nil + +func initUniqueStationsAndTypeIds(timespan *utils.TimeSpan, pool *pgxpool.Pool) error { + if UNIQUE_STATIONS_TYPES != nil { + return nil + } + + rows, err := pool.Query(context.TODO(), + `SELECT DISTINCT stationid, typeid FROM observations + WHERE ($1::timestamp IS NULL OR obstime >= $1) + AND ($2::timestamp IS NULL OR obstime < $2) + ORDER BY stationid`, + timespan.From, timespan.To) if err != nil { - return nil, err + return err } - labels := make([]*StationType, 0, rows.CommandTag().RowsAffected()) - return pgx.AppendRows(labels, rows, func(row pgx.CollectableRow) (*StationType, error) { + UNIQUE_STATIONS_TYPES = make([]*StationType, 0, rows.CommandTag().RowsAffected()) + UNIQUE_STATIONS_TYPES, err = pgx.AppendRows(UNIQUE_STATIONS_TYPES, rows, func(row pgx.CollectableRow) (*StationType, error) { var label StationType err := row.Scan(&label.stationid, &label.typeid) return &label, err }) + + if err != nil { + return err + } + return nil } -func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, error) { +func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool, maxConn int) ([]*Label, error) { // First query stationid and typeid from observations // Then query paramid, sensor, level from obsdata // This is faster than querying all of them together from data - - slog.Info("Querying data labels labels...") - // TODO: this should be done outside (can be reused for data and text) - stationsAndTypes, err := GetUniqueStationsAndTypeIds(timespan, pool) - if err != nil { + slog.Info("Querying data labels...") + if err := initUniqueStationsAndTypeIds(timespan, pool); err != nil { slog.Error(err.Error()) return nil, err } - bar := utils.NewBar(len(stationsAndTypes), "Data labels") + bar := utils.NewBar(len(UNIQUE_STATIONS_TYPES), "Stations") var labels []*Label var wg sync.WaitGroup - // TODO: can we move this somewhere else? - semaphore := make(chan struct{}, 4) - - for _, s := range stationsAndTypes { + semaphore := make(chan struct{}, maxConn) + for _, s := range UNIQUE_STATIONS_TYPES { wg.Add(1) semaphore <- struct{}{} @@ -109,26 +116,22 @@ func dumpDataLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, err return labels, nil } -func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, error) { +func dumpTextLabels(timespan *utils.TimeSpan, pool *pgxpool.Pool, maxConn int) ([]*Label, error) { // First query stationid and typeid from observations - // Then query paramid, sensor, level from obsdata + // Then query paramid from obstextdata // This is faster than querying all of them together from data - - slog.Info("Querying data labels labels...") - // TODO: this should be done outside (can be reused for data and text) - stationsAndTypes, err := GetUniqueStationsAndTypeIds(timespan, pool) - if err != nil { + slog.Info("Querying text labels...") + if err := initUniqueStationsAndTypeIds(timespan, pool); err != nil { slog.Error(err.Error()) return nil, err } - bar := utils.NewBar(len(stationsAndTypes), "Text labels") + bar := utils.NewBar(len(UNIQUE_STATIONS_TYPES), "Stations") var labels []*Label var wg sync.WaitGroup - // TODO: can we move this somewhere else? - semaphore := make(chan struct{}, 4) - for _, s := range stationsAndTypes { + semaphore := make(chan struct{}, maxConn) + for _, s := range UNIQUE_STATIONS_TYPES { wg.Add(1) semaphore <- struct{}{} diff --git a/migrations/kvalobs/db/table.go b/migrations/kvalobs/db/table.go index a9552e31..3c76d3dd 100644 --- a/migrations/kvalobs/db/table.go +++ b/migrations/kvalobs/db/table.go @@ -16,7 +16,7 @@ type Table struct { } // Function used to query labels from kvalobs given an optional timespan -type LabelDumpFunc func(timespan *utils.TimeSpan, pool *pgxpool.Pool) ([]*Label, error) +type LabelDumpFunc func(timespan *utils.TimeSpan, pool *pgxpool.Pool, maxConn int) ([]*Label, error) // Function used to query timeseries from kvalobs for a specific label and dump them inside path type ObsDumpFunc func(label *Label, timespan *utils.TimeSpan, path string, pool *pgxpool.Pool) error diff --git a/migrations/kvalobs/dump/dump.go b/migrations/kvalobs/dump/dump.go index 924dcd3c..6a53a995 100644 --- a/migrations/kvalobs/dump/dump.go +++ b/migrations/kvalobs/dump/dump.go @@ -15,11 +15,11 @@ import ( "migrate/utils" ) -func getLabels(table *kvalobs.Table, pool *pgxpool.Pool, timespan *utils.TimeSpan, updateLabels bool) (labels []*kvalobs.Label, err error) { +func getLabels(table *kvalobs.Table, pool *pgxpool.Pool, timespan *utils.TimeSpan, config *Config) (labels []*kvalobs.Label, err error) { labelFile := fmt.Sprintf("%s_labels_%s.csv", table.Path, timespan.ToString()) - if _, err := os.Stat(labelFile); err != nil || updateLabels { - labels, err = table.DumpLabels(timespan, pool) + if _, err := os.Stat(labelFile); err != nil || config.UpdateLabels { + labels, err = table.DumpLabels(timespan, pool, config.MaxConn) if err != nil { return nil, err } @@ -46,7 +46,7 @@ func dumpTable(table *kvalobs.Table, pool *pgxpool.Pool, config *Config) { defer fmt.Println(strings.Repeat("- ", 40)) timespan := config.TimeSpan() - labels, err := getLabels(table, pool, timespan, config.UpdateLabels) + labels, err := getLabels(table, pool, timespan, config) if err != nil || config.LabelsOnly { return } @@ -117,8 +117,6 @@ func dumpDB(database kvalobs.DB, config *Config) { return } - // TODO: dump labels first for both tables and then pass them to dumpTable - // or have only choices of dumping labels for name, table := range database.Tables { if !utils.IsEmptyOrEqual(config.Table, name) { continue diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 701a9da4..853950ce 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -27,6 +27,7 @@ func ImportTable(table *kvalobs.Table, cache *cache.Cache, pool *pgxpool.Pool, c return 0, err } + fmt.Printf("Number of stations to dump: %d...\n", len(stations)) var rowsInserted int64 for _, station := range stations { stnr, err := strconv.ParseInt(station.Name(), 10, 32) From 19a35b273f2d0e4b3211f4bfe3a1bd5da01b5555 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 11 Dec 2024 12:36:59 +0100 Subject: [PATCH 63/67] Remove unnecessary comments --- migrations/kvalobs/db/csv_parsers.go | 2 -- migrations/kvalobs/db/import_functions.go | 2 -- migrations/kvalobs/db/label_dump_functions.go | 2 -- migrations/kvalobs/db/series_dump_functions.go | 2 -- 4 files changed, 8 deletions(-) diff --git a/migrations/kvalobs/db/csv_parsers.go b/migrations/kvalobs/db/csv_parsers.go index bf2f774d..896ce725 100644 --- a/migrations/kvalobs/db/csv_parsers.go +++ b/migrations/kvalobs/db/csv_parsers.go @@ -9,8 +9,6 @@ import ( "time" ) -// Here we define how to parse dumped CSV depending on the table - func parseDataCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, [][]any, error) { data := make([][]any, 0, rowCount) flags := make([][]any, 0, rowCount) diff --git a/migrations/kvalobs/db/import_functions.go b/migrations/kvalobs/db/import_functions.go index 09f28509..0ae183de 100644 --- a/migrations/kvalobs/db/import_functions.go +++ b/migrations/kvalobs/db/import_functions.go @@ -10,8 +10,6 @@ import ( "github.com/jackc/pgx/v5/pgxpool" ) -// Here are implemented the `ImportFunc` stored inside the Table struct - // NOTE: // - for both kvalobs and histkvalobs: // - all stinfo non-scalar params that can be found in Kvalobs are stored in `text_data` diff --git a/migrations/kvalobs/db/label_dump_functions.go b/migrations/kvalobs/db/label_dump_functions.go index c5edba9c..22e18dd2 100644 --- a/migrations/kvalobs/db/label_dump_functions.go +++ b/migrations/kvalobs/db/label_dump_functions.go @@ -11,8 +11,6 @@ import ( "github.com/jackc/pgx/v5/pgxpool" ) -// Here are implemented the `LabelDumpFunc` stored inside the Table struct - const OBSDATA_QUERY string = `SELECT DISTINCT paramid, sensor::int, level FROM obsdata JOIN observations USING(observationid) WHERE stationid = $1 diff --git a/migrations/kvalobs/db/series_dump_functions.go b/migrations/kvalobs/db/series_dump_functions.go index 6f886ba3..538db49d 100644 --- a/migrations/kvalobs/db/series_dump_functions.go +++ b/migrations/kvalobs/db/series_dump_functions.go @@ -13,8 +13,6 @@ import ( "github.com/jackc/pgx/v5/pgxpool" ) -// Here are implemented the `ObsDumpFunc` stored inside the Table struct - func dumpDataSeries(label *Label, timespan *utils.TimeSpan, path string, pool *pgxpool.Pool) error { // NOTE: sensor and level could be NULL, but in reality they have default values query := `SELECT obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed From 0ac0890c00b139a3c09526265eb3f62b9e81a7e5 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 11 Dec 2024 13:32:21 +0100 Subject: [PATCH 64/67] Update check command --- migrations/kvalobs/check/main.go | 80 +++++++---------------------- migrations/stinfosys/non_scalars.go | 26 +--------- 2 files changed, 20 insertions(+), 86 deletions(-) diff --git a/migrations/kvalobs/check/main.go b/migrations/kvalobs/check/main.go index 0370e837..244ded91 100644 --- a/migrations/kvalobs/check/main.go +++ b/migrations/kvalobs/check/main.go @@ -1,55 +1,42 @@ package check import ( - "context" + "errors" "fmt" "log" - "os" - "path/filepath" "slices" "strings" - "time" - - "github.com/jackc/pgx/v5" "migrate/kvalobs/db" "migrate/stinfosys" - "migrate/utils" ) type Config struct { - Path string `arg:"-p" default:"./dumps" help:"Directory of the dumped data"` - CheckName string `arg:"positional" required:"true" help:"Choices: ['overlap', 'non-scalars']"` + DataFilename string `arg:"positional" required:"true" help:"data label file"` + TextFilename string `arg:"positional" required:"true" help:"text label file"` } func (c *Config) Execute() { - dbs := db.InitDBs() - if utils.IsEmptyOrEqual(c.CheckName, "overlap") { - fmt.Println("Checking if some param IDs are stored in both the `data` and `text_data` tables") - for _, db := range dbs { - c.checkDataAndTextParamsOverlap(&db) - } - } - if utils.IsEmptyOrEqual(c.CheckName, "non-scalars") { - fmt.Println("Checking if param IDs in `text_data` match non-scalar parameters in Stinfosys") - stinfoParams := getStinfoNonScalars() - for _, db := range dbs { - c.checkNonScalars(&db, stinfoParams) - } + dataParamids, derr := loadParamids(c.DataFilename) + textParamids, terr := loadParamids(c.TextFilename) + if derr != nil || terr != nil { + fmt.Println(errors.Join(derr, terr)) + return } + + fmt.Println("Checking if some param IDs are stored in both the `data` and `text_data` tables") + c.checkDataAndTextParamsOverlap(dataParamids, textParamids) + + fmt.Println("Checking if param IDs in `text_data` match non-scalar parameters in Stinfosys") + conn, ctx := stinfosys.Connect() + defer conn.Close(ctx) + stinfoParams := stinfosys.GetNonScalars(conn) + c.checkNonScalars(dataParamids, textParamids, stinfoParams) } // Simply checks if some params are found both in the data and text_data -func (c *Config) checkDataAndTextParamsOverlap(database *db.DB) { +func (c *Config) checkDataAndTextParamsOverlap(dataParamids, textParamids map[int32]int32) { defer fmt.Println(strings.Repeat("- ", 40)) - datapath := filepath.Join(c.Path, database.Name, db.DATA_TABLE_NAME+"_labels.csv") - textpath := filepath.Join(c.Path, database.Name, db.TEXT_TABLE_NAME+"_labels.csv") - - dataParamids, derr := loadParamids(datapath) - textParamids, terr := loadParamids(textpath) - if derr != nil || terr != nil { - return - } ids := make([]int32, 0, len(textParamids)) for id := range dataParamids { @@ -89,38 +76,9 @@ type StinfoPair struct { IsScalar bool `db:"scalar"` } -func getStinfoNonScalars() []int32 { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - conn, err := pgx.Connect(ctx, os.Getenv(stinfosys.STINFO_ENV_VAR)) - if err != nil { - log.Fatal("Could not connect to Stinfosys. Make sure to be connected to the VPN. " + err.Error()) - } - defer conn.Close(ctx) - - rows, err := conn.Query(context.TODO(), "SELECT paramid FROM param WHERE scalar = false ORDER BY paramid") - if err != nil { - log.Fatal(err) - } - nonscalars, err := pgx.CollectRows(rows, pgx.RowTo[int32]) - if err != nil { - log.Fatal(err) - } - return nonscalars -} - // Checks that text params in Kvalobs are considered non-scalar in Stinfosys -func (c *Config) checkNonScalars(database *db.DB, nonscalars []int32) { +func (c *Config) checkNonScalars(dataParamids, textParamids map[int32]int32, nonscalars []int32) { defer fmt.Println(strings.Repeat("- ", 40)) - datapath := filepath.Join(c.Path, database.Name, db.DATA_TABLE_NAME+"_labels.csv") - textpath := filepath.Join(c.Path, database.Name, db.TEXT_TABLE_NAME+"_labels.csv") - - dataParamids, derr := loadParamids(datapath) - textParamids, terr := loadParamids(textpath) - if derr != nil || terr != nil { - return - } for _, id := range nonscalars { if _, ok := textParamids[id]; ok { diff --git a/migrations/stinfosys/non_scalars.go b/migrations/stinfosys/non_scalars.go index def5e59a..3439311e 100644 --- a/migrations/stinfosys/non_scalars.go +++ b/migrations/stinfosys/non_scalars.go @@ -7,7 +7,7 @@ import ( "github.com/jackc/pgx/v5" ) -func getNonScalars(conn *pgx.Conn) []int32 { +func GetNonScalars(conn *pgx.Conn) []int32 { rows, err := conn.Query(context.TODO(), "SELECT paramid FROM param WHERE scalar = false ORDER BY paramid") if err != nil { log.Fatal(err) @@ -18,27 +18,3 @@ func getNonScalars(conn *pgx.Conn) []int32 { } return nonscalars } - -// Tells if a paramid is scalar or not -type ScalarMap = map[int32]bool - -func GetParamScalarMap(conn *pgx.Conn) ScalarMap { - cache := make(ScalarMap) - - rows, err := conn.Query(context.TODO(), "SELECT paramid, scalar FROM param") - if err != nil { - log.Fatal(err) - } - - for rows.Next() { - var paramid int32 - var isScalar bool - if err := rows.Scan(¶mid, &isScalar); err != nil { - log.Fatal(err) - } - cache[paramid] = isScalar - } - - return cache - -} From 1acb368511e8735e2b103ac47eaeda684a3c7b67 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Wed, 11 Dec 2024 13:32:30 +0100 Subject: [PATCH 65/67] Update comments --- migrations/README.md | 6 ++++-- migrations/kvalobs/db/csv_parsers.go | 2 +- migrations/kvalobs/db/main.go | 13 ++++--------- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/migrations/README.md b/migrations/README.md index bfa03aa5..92a845e8 100644 --- a/migrations/README.md +++ b/migrations/README.md @@ -1,6 +1,6 @@ # Migrations -Go package used to dump tables from old databases (KDVH, Kvalobs) and import them into LARD. +Go package that dumps tables from old databases (KDVH, Kvalobs) and imports them into LARD. ## Usage @@ -10,16 +10,18 @@ Go package used to dump tables from old databases (KDVH, Kvalobs) and import the go build ``` -1. Dump tables from KDVH +1. Dump tables ```terminal ./migrate kdvh dump + ./migrate kvalobs dump ``` 1. Import dumps into LARD ```terminal ./migrate kdvh import + ./migrate kvalobs import ``` For each command, you can use the `--help` flag to see all available options. diff --git a/migrations/kvalobs/db/csv_parsers.go b/migrations/kvalobs/db/csv_parsers.go index 896ce725..1c897a80 100644 --- a/migrations/kvalobs/db/csv_parsers.go +++ b/migrations/kvalobs/db/csv_parsers.go @@ -100,7 +100,7 @@ func parseTextCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, er // Function for paramids 2751, 2752, 2753, 2754 that were stored as text data // but should instead be treated as scalars // TODO: I'm not sure these params should be scalars given that the other cloud types are not. -// Should all cloud types be integers? +// Should all cloud types be integers or text? func parseMetarCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { data := make([][]any, 0, rowCount) for scanner.Scan() { diff --git a/migrations/kvalobs/db/main.go b/migrations/kvalobs/db/main.go index 2383245c..fcf24b00 100644 --- a/migrations/kvalobs/db/main.go +++ b/migrations/kvalobs/db/main.go @@ -28,7 +28,7 @@ import ( // useinfo | character(16) | | | '0000000000000000'::bpchar // cfailed | text | | | // -// - `data_history`: stores observations similar to `data`, but not sure what history refers to +// - `data_history`: stores the history of QC pipelines for data observations // // - `default_missing`: // - `default_missing_values`: default values for some paramids (-32767) @@ -91,21 +91,16 @@ import ( // tbtime | timestamp without time zone | | not null | // typeid | integer | | not null | // -// - `text_data_history`: stores observations similar to `text_data`, but not sure what history refers to -// -// NOTE: In `histkvalobs` only `data`, `data_history`, `text_data`, and `text_data_history` are non-empty. +// - `text_data_history`: stores the history of QC pipelines for text observations () // // IMPORTANT: considerations for migrations to LARD // - LARD stores Timeseries labels (stationid, paramid, typeid, sensor, level) in a separate table -// - (sensor, level) can be NULL, while in Kvalobs they have default values (0,0) +// - In LARD (sensor, level) can both be NULL, while in Kvalobs they have default values ('0',0) // => POSSIBLE INCONSISTENCY when importing to LARD -// - Timestamps are UTC +// - Timestamps in Kvalobs are UTC // - Kvalobs doesn't have the concept of timeseries ID, // instead there is a sequential ID associated with each observation row -const DATA_TABLE_NAME string = "data" -const TEXT_TABLE_NAME string = "text" // text_data - // Special values that are treated as NULL in Kvalobs // TODO: are there more values we should be looking for? var NULL_VALUES []float32 = []float32{-32767, -32766} From 3509faf3092436ac4d4b85bd3ae24e40bf9dea6b Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Fri, 13 Dec 2024 12:10:06 +0100 Subject: [PATCH 66/67] Insert partitions for tests --- db/flags.sql | 2 +- integration_tests/src/main.rs | 25 ++++++------------------- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/db/flags.sql b/db/flags.sql index b54bc595..4f2ef406 100644 --- a/db/flags.sql +++ b/db/flags.sql @@ -11,4 +11,4 @@ CREATE TABLE IF NOT EXISTS flags.kvdata ( CONSTRAINT unique_kvdata_timeseries_obstime UNIQUE (timeseries, obstime) ); CREATE INDEX IF NOT EXISTS kvdata_obstime_index ON flags.kvdata (obstime); -CREATE INDEX IF NOT EXISTS kvdata_timeseries_index ON flags.kvdata USING HASH (timeseries); +CREATE INDEX IF NOT EXISTS kvdata_timeseries_index ON flags.kvdata USING HASH (timeseries); diff --git a/integration_tests/src/main.rs b/integration_tests/src/main.rs index a73a1241..36eece8e 100644 --- a/integration_tests/src/main.rs +++ b/integration_tests/src/main.rs @@ -9,22 +9,6 @@ async fn insert_schema(client: &tokio_postgres::Client, filename: &str) -> Resul client.batch_execute(schema.as_str()).await } -fn format_partition(start: &str, end: &str, table: &str) -> String { - // TODO: add multiple partitions? - format!( - "CREATE TABLE {table}_y{start}_to_y{end} PARTITION OF {table} \ - FOR VALUES FROM ('{start}-01-01 00:00:00+00') TO ('{end}-01-01 00:00:00+00')", - ) -} - -async fn create_data_partitions(client: &tokio_postgres::Client) -> Result<(), Error> { - let scalar_string = format_partition("1950", "2100", "public.data"); - let nonscalar_string = format_partition("1950", "2100", "public.nonscalar_data"); - - client.batch_execute(scalar_string.as_str()).await?; - client.batch_execute(nonscalar_string.as_str()).await -} - #[tokio::main] async fn main() { let (client, connection) = tokio_postgres::connect(CONNECT_STRING, NoTls) @@ -38,10 +22,13 @@ async fn main() { }); // NOTE: order matters - let schemas = ["db/public.sql", "db/labels.sql", "db/flags.sql"]; + let schemas = [ + "db/public.sql", + "db/partitions_generated.sql", + "db/labels.sql", + "db/flags.sql", + ]; for schema in schemas { insert_schema(&client, schema).await.unwrap(); } - - create_data_partitions(&client).await.unwrap(); } From 046151bd048c22f202e94469d7dd742e24852807 Mon Sep 17 00:00:00 2001 From: Manuel Carrer Date: Fri, 13 Dec 2024 12:10:23 +0100 Subject: [PATCH 67/67] Allow specifying time range for import --- migrations/kvalobs/db/csv_parsers.go | 37 ++++++++++++++++++++--- migrations/kvalobs/db/import_functions.go | 13 ++++---- migrations/kvalobs/db/table.go | 2 +- migrations/kvalobs/import/import.go | 11 ++++--- 4 files changed, 48 insertions(+), 15 deletions(-) diff --git a/migrations/kvalobs/db/csv_parsers.go b/migrations/kvalobs/db/csv_parsers.go index 1c897a80..ada02d32 100644 --- a/migrations/kvalobs/db/csv_parsers.go +++ b/migrations/kvalobs/db/csv_parsers.go @@ -3,13 +3,14 @@ package db import ( "bufio" "migrate/lard" + "migrate/utils" "slices" "strconv" "strings" "time" ) -func parseDataCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, [][]any, error) { +func parseDataCSV(tsid int32, rowCount int, timespan *utils.TimeSpan, scanner *bufio.Scanner) ([][]any, [][]any, error) { data := make([][]any, 0, rowCount) flags := make([][]any, 0, rowCount) var originalPtr, correctedPtr *float32 @@ -23,6 +24,13 @@ func parseDataCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, [] return nil, nil, err } + if timespan.From != nil && obstime.Sub(*timespan.From) < 0 { + continue + } + if timespan.To != nil && obstime.Sub(*timespan.To) > 0 { + break + } + obsvalue64, err := strconv.ParseFloat(fields[1], 32) if err != nil { return nil, nil, err @@ -74,7 +82,7 @@ func parseDataCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, [] } // Text obs are not flagged -func parseTextCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { +func parseTextCSV(tsid int32, rowCount int, timespan *utils.TimeSpan, scanner *bufio.Scanner) ([][]any, error) { data := make([][]any, 0, rowCount) for scanner.Scan() { // obstime, original, tbtime @@ -85,6 +93,13 @@ func parseTextCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, er return nil, err } + if timespan.From != nil && obstime.Sub(*timespan.From) < 0 { + continue + } + if timespan.To != nil && obstime.Sub(*timespan.To) > 0 { + break + } + lardObs := lard.TextObs{ Id: tsid, Obstime: obstime, @@ -101,7 +116,7 @@ func parseTextCSV(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, er // but should instead be treated as scalars // TODO: I'm not sure these params should be scalars given that the other cloud types are not. // Should all cloud types be integers or text? -func parseMetarCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { +func parseMetarCloudType(tsid int32, rowCount int, timespan *utils.TimeSpan, scanner *bufio.Scanner) ([][]any, error) { data := make([][]any, 0, rowCount) for scanner.Scan() { // obstime, original, tbtime @@ -112,6 +127,13 @@ func parseMetarCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([][] return nil, err } + if timespan.From != nil && obstime.Sub(*timespan.From) < 0 { + continue + } + if timespan.To != nil && obstime.Sub(*timespan.To) > 0 { + break + } + val, err := strconv.ParseFloat(fields[1], 32) if err != nil { return nil, err @@ -134,7 +156,7 @@ func parseMetarCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([][] // Function for paramids 305, 306, 307, 308 that were stored as scalar data // but should be treated as text -func parseSpecialCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([][]any, error) { +func parseSpecialCloudType(tsid int32, rowCount int, timespan *utils.TimeSpan, scanner *bufio.Scanner) ([][]any, error) { data := make([][]any, 0, rowCount) for scanner.Scan() { // obstime, original, tbtime, corrected, controlinfo, useinfo, cfailed @@ -146,6 +168,13 @@ func parseSpecialCloudType(tsid int32, rowCount int, scanner *bufio.Scanner) ([] return nil, err } + if timespan.From != nil && obstime.Sub(*timespan.From) < 0 { + continue + } + if timespan.To != nil && obstime.Sub(*timespan.To) > 0 { + break + } + lardObs := lard.TextObs{ Id: tsid, Obstime: obstime, diff --git a/migrations/kvalobs/db/import_functions.go b/migrations/kvalobs/db/import_functions.go index 0ae183de..5b5a8327 100644 --- a/migrations/kvalobs/db/import_functions.go +++ b/migrations/kvalobs/db/import_functions.go @@ -4,6 +4,7 @@ import ( "bufio" "log/slog" "migrate/lard" + "migrate/utils" "os" "strconv" @@ -18,7 +19,7 @@ import ( // - only for histkvalobs // - 2751, 2752, 2753, 2754 are in `text_data` but should be treated as `data`? -func importData(tsid int32, label *Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) { +func importData(tsid int32, label *Label, filename, logStr string, timespan *utils.TimeSpan, pool *pgxpool.Pool) (int64, error) { file, err := os.Open(filename) if err != nil { slog.Error(logStr + err.Error()) @@ -36,7 +37,7 @@ func importData(tsid int32, label *Label, filename, logStr string, pool *pgxpool scanner.Scan() if label.IsSpecialCloudType() { - text, err := parseSpecialCloudType(tsid, rowCount, scanner) + text, err := parseSpecialCloudType(tsid, rowCount, timespan, scanner) if err != nil { slog.Error(logStr + err.Error()) return 0, err @@ -51,7 +52,7 @@ func importData(tsid int32, label *Label, filename, logStr string, pool *pgxpool return count, nil } - data, flags, err := parseDataCSV(tsid, rowCount, scanner) + data, flags, err := parseDataCSV(tsid, rowCount, timespan, scanner) count, err := lard.InsertData(data, pool, logStr) if err != nil { slog.Error(logStr + err.Error()) @@ -66,7 +67,7 @@ func importData(tsid int32, label *Label, filename, logStr string, pool *pgxpool return count, nil } -func importText(tsid int32, label *Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) { +func importText(tsid int32, label *Label, filename, logStr string, timespan *utils.TimeSpan, pool *pgxpool.Pool) (int64, error) { file, err := os.Open(filename) if err != nil { slog.Error(logStr + err.Error()) @@ -84,7 +85,7 @@ func importText(tsid int32, label *Label, filename, logStr string, pool *pgxpool scanner.Scan() if label.IsMetarCloudType() { - data, err := parseMetarCloudType(tsid, rowCount, scanner) + data, err := parseMetarCloudType(tsid, rowCount, timespan, scanner) if err != nil { slog.Error(logStr + err.Error()) return 0, err @@ -98,7 +99,7 @@ func importText(tsid int32, label *Label, filename, logStr string, pool *pgxpool return count, nil } - text, err := parseTextCSV(tsid, rowCount, scanner) + text, err := parseTextCSV(tsid, rowCount, timespan, scanner) if err != nil { slog.Error(logStr + err.Error()) return 0, err diff --git a/migrations/kvalobs/db/table.go b/migrations/kvalobs/db/table.go index 3c76d3dd..942552bb 100644 --- a/migrations/kvalobs/db/table.go +++ b/migrations/kvalobs/db/table.go @@ -22,4 +22,4 @@ type LabelDumpFunc func(timespan *utils.TimeSpan, pool *pgxpool.Pool, maxConn in type ObsDumpFunc func(label *Label, timespan *utils.TimeSpan, path string, pool *pgxpool.Pool) error // Lard Import function -type ImportFunc func(tsid int32, label *Label, filename, logStr string, pool *pgxpool.Pool) (int64, error) +type ImportFunc func(tsid int32, label *Label, filename, logStr string, timespan *utils.TimeSpan, pool *pgxpool.Pool) (int64, error) diff --git a/migrations/kvalobs/import/import.go b/migrations/kvalobs/import/import.go index 853950ce..785b2a75 100644 --- a/migrations/kvalobs/import/import.go +++ b/migrations/kvalobs/import/import.go @@ -27,7 +27,8 @@ func ImportTable(table *kvalobs.Table, cache *cache.Cache, pool *pgxpool.Pool, c return 0, err } - fmt.Printf("Number of stations to dump: %d...\n", len(stations)) + importTimespan := config.TimeSpan() + fmt.Printf("Number of stations to import: %d...\n", len(stations)) var rowsInserted int64 for _, station := range stations { stnr, err := strconv.ParseInt(station.Name(), 10, 32) @@ -72,21 +73,23 @@ func ImportTable(table *kvalobs.Table, cache *cache.Cache, pool *pgxpool.Pool, c return } - timespan, err := cache.GetSeriesTimespan(label) + tsTimespan, err := cache.GetSeriesTimespan(label) if err != nil { slog.Error(logStr + err.Error()) return } // TODO: figure out where to get fromtime, kvalobs directly? Stinfosys? - tsid, err := lard.GetTimeseriesID(label.ToLard(), timespan, pool) + tsid, err := lard.GetTimeseriesID(label.ToLard(), tsTimespan, pool) if err != nil { slog.Error(logStr + err.Error()) return } filename := filepath.Join(stationDir, file.Name()) - count, err := table.Import(tsid, label, filename, logStr, pool) + // TODO: it's probably better to dump in different directories + // instead of introducing runtime checks + count, err := table.Import(tsid, label, filename, logStr, importTimespan, pool) if err != nil { // Logged inside table.Import return