diff --git a/checkpoint.go b/checkpoint.go index cef9b8f1f5..cf8809794a 100644 --- a/checkpoint.go +++ b/checkpoint.go @@ -262,7 +262,7 @@ func (d *DB) Checkpoint( } } - var excludedFiles map[deletedFileEntry]*fileMetadata + var excludedTables map[deletedFileEntry]*fileMetadata var remoteFiles []base.DiskFileNum // Set of FileBacking.DiskFileNum which will be required by virtual sstables // in the checkpoint. @@ -272,10 +272,10 @@ func (d *DB) Checkpoint( iter := current.Levels[l].Iter() for f := iter.First(); f != nil; f = iter.Next() { if excludeFromCheckpoint(f, opt, d.cmp) { - if excludedFiles == nil { - excludedFiles = make(map[deletedFileEntry]*fileMetadata) + if excludedTables == nil { + excludedTables = make(map[deletedFileEntry]*fileMetadata) } - excludedFiles[deletedFileEntry{ + excludedTables[deletedFileEntry{ Level: l, FileNum: f.FileNum, }] = f @@ -325,7 +325,7 @@ func (d *DB) Checkpoint( ckErr = d.writeCheckpointManifest( fs, formatVers, destDir, dir, manifestFileNum, manifestSize, - excludedFiles, removeBackingTables, + excludedTables, removeBackingTables, ) if ckErr != nil { return ckErr @@ -480,7 +480,7 @@ func (d *DB) writeCheckpointManifest( if len(excludedFiles) > 0 { // Write out an additional VersionEdit that deletes the excluded SST files. ve := versionEdit{ - DeletedFiles: excludedFiles, + DeletedTables: excludedFiles, RemovedBackingTables: removeBackingTables, } diff --git a/compaction.go b/compaction.go index 8e1562c048..36f630e499 100644 --- a/compaction.go +++ b/compaction.go @@ -657,9 +657,9 @@ func (c *compaction) hasExtraLevelData() bool { // this compaction have revisions of the same user key present in both sstables, // when it shouldn't (eg. when splitting flushes). func (c *compaction) errorOnUserKeyOverlap(ve *versionEdit) error { - if n := len(ve.NewFiles); n > 1 { - meta := ve.NewFiles[n-1].Meta - prevMeta := ve.NewFiles[n-2].Meta + if n := len(ve.NewTables); n > 1 { + meta := ve.NewTables[n-1].Meta + prevMeta := ve.NewTables[n-2].Meta if !prevMeta.Largest.IsExclusiveSentinel() && c.cmp(prevMeta.Largest.UserKey, meta.Smallest.UserKey) >= 0 { return errors.Errorf("pebble: compaction split user key across two sstables: %s in %s and %s", @@ -1255,7 +1255,7 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) { var ingestSplitFiles []ingestSplitFile ingestFlushable := c.flushing[0].flushable.(*ingestedFlushable) - updateLevelMetricsOnExcise := func(m *fileMetadata, level int, added []newFileEntry) { + updateLevelMetricsOnExcise := func(m *fileMetadata, level int, added []newTableEntry) { levelMetrics := c.metrics[level] if levelMetrics == nil { levelMetrics = &LevelMetrics{} @@ -1274,7 +1274,7 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) { if suggestSplit || ingestFlushable.exciseSpan.Valid() { // We could add deleted files to ve. - ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) + ve.DeletedTables = make(map[manifest.DeletedTableEntry]*manifest.FileMetadata) } ctx := context.Background() @@ -1287,7 +1287,7 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) { }, v: c.version, } - replacedFiles := make(map[base.FileNum][]newFileEntry) + replacedFiles := make(map[base.FileNum][]newTableEntry) for _, file := range ingestFlushable.files { var fileToSplit *fileMetadata var level int @@ -1312,7 +1312,7 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) { } // Add the current flushableIngest file to the version. - ve.NewFiles = append(ve.NewFiles, newFileEntry{Level: level, Meta: file.FileMetadata}) + ve.NewTables = append(ve.NewTables, newTableEntry{Level: level, Meta: file.FileMetadata}) if fileToSplit != nil { ingestSplitFiles = append(ingestSplitFiles, ingestSplitFile{ ingestFile: file.FileMetadata, @@ -1340,7 +1340,7 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) { return nil, err } - if _, ok := ve.DeletedFiles[deletedFileEntry{ + if _, ok := ve.DeletedTables[deletedFileEntry{ Level: l, FileNum: m.FileNum, }]; !ok { @@ -1500,8 +1500,8 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) { } if err == nil { validateVersionEdit(ve, d.opts.Comparer.ValidateKey, d.opts.Comparer.FormatKey, d.opts.Logger) - for i := range ve.NewFiles { - e := &ve.NewFiles[i] + for i := range ve.NewTables { + e := &ve.NewTables[i] info.Output = append(info.Output, e.Meta.TableInfo()) // Ingested tables are not necessarily flushed to L0. Record the level of // each ingested file explicitly. @@ -1509,7 +1509,7 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) { info.IngestLevels = append(info.IngestLevels, e.Level) } } - if len(ve.NewFiles) == 0 { + if len(ve.NewTables) == 0 { info.Err = errEmptyTable } @@ -1548,7 +1548,7 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) { } } - if len(ve.DeletedFiles) > 0 { + if len(ve.DeletedTables) > 0 { // Iterate through all other compactions, and check if their inputs have // been replaced due to an ingest-time split or excise. In that case, // cancel the compaction. @@ -1556,7 +1556,7 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) { for i := range c2.inputs { iter := c2.inputs[i].files.Iter() for f := iter.First(); f != nil; f = iter.Next() { - if _, ok := ve.DeletedFiles[deletedFileEntry{FileNum: f.FileNum, Level: c2.inputs[i].level}]; ok { + if _, ok := ve.DeletedTables[deletedFileEntry{FileNum: f.FileNum, Level: c2.inputs[i].level}]; ok { c2.cancel.Store(true) break } @@ -1593,7 +1593,7 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) { flushed = d.mu.mem.queue[:n] d.mu.mem.queue = d.mu.mem.queue[n:] d.updateReadStateLocked(d.opts.DebugCheck) - d.updateTableStatsLocked(ve.NewFiles) + d.updateTableStatsLocked(ve.NewTables) if ingest { d.mu.versions.metrics.Flush.AsIngestCount++ for _, l := range c.metrics { @@ -2293,22 +2293,22 @@ func (d *DB) compact(c *compaction, errChannel chan error) { // // d.mu must be held when calling this method. func (d *DB) cleanupVersionEdit(ve *versionEdit) { - obsoleteFiles := make([]*fileBacking, 0, len(ve.NewFiles)) + obsoleteFiles := make([]*fileBacking, 0, len(ve.NewTables)) deletedFiles := make(map[base.FileNum]struct{}) - for key := range ve.DeletedFiles { + for key := range ve.DeletedTables { deletedFiles[key.FileNum] = struct{}{} } - for i := range ve.NewFiles { - if ve.NewFiles[i].Meta.Virtual { + for i := range ve.NewTables { + if ve.NewTables[i].Meta.Virtual { // We handle backing files separately. continue } - if _, ok := deletedFiles[ve.NewFiles[i].Meta.FileNum]; ok { + if _, ok := deletedFiles[ve.NewTables[i].Meta.FileNum]; ok { // This file is being moved in this ve to a different level. // Don't mark it as obsolete. continue } - obsoleteFiles = append(obsoleteFiles, ve.NewFiles[i].Meta.PhysicalMeta().FileBacking) + obsoleteFiles = append(obsoleteFiles, ve.NewTables[i].Meta.PhysicalMeta().FileBacking) } for i := range ve.CreatedBackingTables { if ve.CreatedBackingTables[i].IsUnused() { @@ -2387,8 +2387,8 @@ func (d *DB) compact1(c *compaction, errChannel chan error) (err error) { info.Done = true info.Err = err if err == nil { - for i := range ve.NewFiles { - e := &ve.NewFiles[i] + for i := range ve.NewTables { + e := &ve.NewTables[i] info.Output.Tables = append(info.Output.Tables, e.Meta.TableInfo()) } d.mu.snapshots.cumulativePinnedCount += stats.CumulativePinnedKeys @@ -2415,7 +2415,7 @@ func (d *DB) compact1(c *compaction, errChannel chan error) (err error) { // table list. if err == nil { d.updateReadStateLocked(d.opts.DebugCheck) - d.updateTableStatsLocked(ve.NewFiles) + d.updateTableStatsLocked(ve.NewTables) } d.deleteObsoleteFiles(jobID) @@ -2444,7 +2444,7 @@ func (d *DB) runCopyCompaction( return nil, compact.Stats{}, ErrCancelledCompaction } ve = &versionEdit{ - DeletedFiles: map[deletedFileEntry]*fileMetadata{ + DeletedTables: map[deletedFileEntry]*fileMetadata{ {Level: c.startLevel.level, FileNum: inputMeta.FileNum}: inputMeta, }, } @@ -2592,7 +2592,7 @@ func (d *DB) runCopyCompaction( } deleteOnExit = true } - ve.NewFiles = []newFileEntry{{ + ve.NewTables = []newTableEntry{{ Level: c.outputLevel.level, Meta: newMeta, }} @@ -2626,7 +2626,7 @@ func (d *DB) applyHintOnFile( levelMetrics *LevelMetrics, ve *versionEdit, hintOverlap deletionHintOverlap, -) (newFiles []manifest.NewFileEntry, err error) { +) (newFiles []manifest.NewTableEntry, err error) { if hintOverlap == hintDoesNotApply { return nil, nil } @@ -2634,7 +2634,7 @@ func (d *DB) applyHintOnFile( // The hint overlaps with at least part of the file. if hintOverlap == hintDeletesFile { // The hint deletes the entirety of this file. - ve.DeletedFiles[deletedFileEntry{ + ve.DeletedTables[deletedFileEntry{ Level: level, FileNum: f.FileNum, }] = f @@ -2652,7 +2652,7 @@ func (d *DB) applyHintOnFile( if err != nil { return nil, errors.Wrap(err, "error when running excise for delete-only compaction") } - if _, ok := ve.DeletedFiles[deletedFileEntry{ + if _, ok := ve.DeletedTables[deletedFileEntry{ Level: level, FileNum: f.FileNum, }]; !ok { @@ -2712,7 +2712,7 @@ func (d *DB) runDeleteOnlyCompactionForLevel( if err != nil { return err } - if _, ok := ve.DeletedFiles[manifest.DeletedFileEntry{Level: cl.level, FileNum: curFile.FileNum}]; ok { + if _, ok := ve.DeletedTables[manifest.DeletedTableEntry{Level: cl.level, FileNum: curFile.FileNum}]; ok { curFile = nil } if len(newFiles) > 0 { @@ -2727,7 +2727,7 @@ func (d *DB) runDeleteOnlyCompactionForLevel( break } } - if _, ok := ve.DeletedFiles[deletedFileEntry{ + if _, ok := ve.DeletedTables[deletedFileEntry{ Level: cl.level, FileNum: f.FileNum, }]; !ok { @@ -2787,7 +2787,7 @@ func (d *DB) runDeleteOnlyCompaction( c.metrics = make(map[int]*LevelMetrics, len(c.inputs)) fragments := fragmentDeleteCompactionHints(d.cmp, c.deletionHints) ve = &versionEdit{ - DeletedFiles: map[deletedFileEntry]*fileMetadata{}, + DeletedTables: map[deletedFileEntry]*fileMetadata{}, } for _, cl := range c.inputs { levelMetrics := &LevelMetrics{} @@ -2797,10 +2797,10 @@ func (d *DB) runDeleteOnlyCompaction( c.metrics[cl.level] = levelMetrics } // Remove any files that were added and deleted in the same versionEdit. - ve.NewFiles = slices.DeleteFunc(ve.NewFiles, func(e manifest.NewFileEntry) bool { - deletedFileEntry := manifest.DeletedFileEntry{Level: e.Level, FileNum: e.Meta.FileNum} - if _, deleted := ve.DeletedFiles[deletedFileEntry]; deleted { - delete(ve.DeletedFiles, deletedFileEntry) + ve.NewTables = slices.DeleteFunc(ve.NewTables, func(e manifest.NewTableEntry) bool { + deletedFileEntry := manifest.DeletedTableEntry{Level: e.Level, FileNum: e.Meta.FileNum} + if _, deleted := ve.DeletedTables[deletedFileEntry]; deleted { + delete(ve.DeletedTables, deletedFileEntry) return true } return false @@ -2808,7 +2808,7 @@ func (d *DB) runDeleteOnlyCompaction( // Remove any entries from CreatedBackingTables that are not used in any // NewFiles. usedBackingFiles := make(map[base.DiskFileNum]struct{}) - for _, e := range ve.NewFiles { + for _, e := range ve.NewTables { if e.Meta.Virtual { usedBackingFiles[e.Meta.FileBacking.DiskFileNum] = struct{}{} } @@ -2841,10 +2841,10 @@ func (d *DB) runMoveCompaction( }, } ve = &versionEdit{ - DeletedFiles: map[deletedFileEntry]*fileMetadata{ + DeletedTables: map[deletedFileEntry]*fileMetadata{ {Level: c.startLevel.level, FileNum: meta.FileNum}: meta, }, - NewFiles: []newFileEntry{ + NewTables: []newTableEntry{ {Level: c.outputLevel.level, Meta: meta}, }, } @@ -3060,12 +3060,12 @@ func (d *DB) compactAndWrite( // tables in compact.Result. func (c *compaction) makeVersionEdit(result compact.Result) (*versionEdit, error) { ve := &versionEdit{ - DeletedFiles: map[deletedFileEntry]*fileMetadata{}, + DeletedTables: map[deletedFileEntry]*fileMetadata{}, } for _, cl := range c.inputs { iter := cl.files.Iter() for f := iter.First(); f != nil; f = iter.Next() { - ve.DeletedFiles[deletedFileEntry{ + ve.DeletedTables[deletedFileEntry{ Level: cl.level, FileNum: f.FileNum, }] = f @@ -3096,7 +3096,7 @@ func (c *compaction) makeVersionEdit(result compact.Result) (*versionEdit, error } inputLargestSeqNumAbsolute := c.inputLargestSeqNumAbsolute() - ve.NewFiles = make([]newFileEntry, len(result.Tables)) + ve.NewTables = make([]newTableEntry, len(result.Tables)) for i := range result.Tables { t := &result.Tables[i] @@ -3134,7 +3134,7 @@ func (c *compaction) makeVersionEdit(result compact.Result) (*versionEdit, error fileMeta.ExtendRangeKeyBounds(c.cmp, t.WriterMeta.SmallestRangeKey, t.WriterMeta.LargestRangeKey) } - ve.NewFiles[i] = newFileEntry{ + ve.NewTables[i] = newTableEntry{ Level: c.outputLevel.level, Meta: fileMeta, } @@ -3154,11 +3154,11 @@ func (c *compaction) makeVersionEdit(result compact.Result) (*versionEdit, error } // Sanity check that the tables are ordered and don't overlap. - for i := 1; i < len(ve.NewFiles); i++ { - if ve.NewFiles[i-1].Meta.UserKeyBounds().End.IsUpperBoundFor(c.cmp, ve.NewFiles[i].Meta.Smallest.UserKey) { + for i := 1; i < len(ve.NewTables); i++ { + if ve.NewTables[i-1].Meta.UserKeyBounds().End.IsUpperBoundFor(c.cmp, ve.NewTables[i].Meta.Smallest.UserKey) { return nil, base.AssertionFailedf("pebble: compaction output tables overlap: %s and %s", - ve.NewFiles[i-1].Meta.DebugString(c.formatKey, true), - ve.NewFiles[i].Meta.DebugString(c.formatKey, true), + ve.NewTables[i-1].Meta.DebugString(c.formatKey, true), + ve.NewTables[i].Meta.DebugString(c.formatKey, true), ) } } @@ -3259,11 +3259,11 @@ func validateVersionEdit( } // Validate both new and deleted files. - for _, f := range ve.NewFiles { + for _, f := range ve.NewTables { validateKey(f.Meta, f.Meta.Smallest.UserKey) validateKey(f.Meta, f.Meta.Largest.UserKey) } - for _, m := range ve.DeletedFiles { + for _, m := range ve.DeletedTables { validateKey(m, m.Smallest.UserKey) validateKey(m, m.Largest.UserKey) } diff --git a/compaction_test.go b/compaction_test.go index 73440b3371..57c8a940c1 100644 --- a/compaction_test.go +++ b/compaction_test.go @@ -768,7 +768,7 @@ func TestValidateVersionEdit(t *testing.T) { { desc: "single new file; start key", ve: &versionEdit{ - NewFiles: []manifest.NewFileEntry{ + NewTables: []manifest.NewTableEntry{ { Meta: newFileMeta( manifest.InternalKey{UserKey: []byte(badKey)}, @@ -783,7 +783,7 @@ func TestValidateVersionEdit(t *testing.T) { { desc: "single new file; end key", ve: &versionEdit{ - NewFiles: []manifest.NewFileEntry{ + NewTables: []manifest.NewTableEntry{ { Meta: newFileMeta( manifest.InternalKey{UserKey: []byte("a")}, @@ -798,7 +798,7 @@ func TestValidateVersionEdit(t *testing.T) { { desc: "multiple new files", ve: &versionEdit{ - NewFiles: []manifest.NewFileEntry{ + NewTables: []manifest.NewTableEntry{ { Meta: newFileMeta( manifest.InternalKey{UserKey: []byte("a")}, @@ -819,7 +819,7 @@ func TestValidateVersionEdit(t *testing.T) { { desc: "single deleted file; start key", ve: &versionEdit{ - DeletedFiles: map[manifest.DeletedFileEntry]*manifest.FileMetadata{ + DeletedTables: map[manifest.DeletedTableEntry]*manifest.FileMetadata{ deletedFileEntry{Level: 0, FileNum: 0}: newFileMeta( manifest.InternalKey{UserKey: []byte(badKey)}, manifest.InternalKey{UserKey: []byte("z")}, @@ -832,7 +832,7 @@ func TestValidateVersionEdit(t *testing.T) { { desc: "single deleted file; end key", ve: &versionEdit{ - DeletedFiles: map[manifest.DeletedFileEntry]*manifest.FileMetadata{ + DeletedTables: map[manifest.DeletedTableEntry]*manifest.FileMetadata{ deletedFileEntry{Level: 0, FileNum: 0}: newFileMeta( manifest.InternalKey{UserKey: []byte("a")}, manifest.InternalKey{UserKey: []byte(badKey)}, @@ -845,7 +845,7 @@ func TestValidateVersionEdit(t *testing.T) { { desc: "multiple deleted files", ve: &versionEdit{ - DeletedFiles: map[manifest.DeletedFileEntry]*manifest.FileMetadata{ + DeletedTables: map[manifest.DeletedTableEntry]*manifest.FileMetadata{ deletedFileEntry{Level: 0, FileNum: 0}: newFileMeta( manifest.InternalKey{UserKey: []byte("a")}, manifest.InternalKey{UserKey: []byte("c")}, @@ -862,7 +862,7 @@ func TestValidateVersionEdit(t *testing.T) { { desc: "no errors", ve: &versionEdit{ - NewFiles: []manifest.NewFileEntry{ + NewTables: []manifest.NewTableEntry{ { Level: 0, Meta: newFileMeta( @@ -878,7 +878,7 @@ func TestValidateVersionEdit(t *testing.T) { ), }, }, - DeletedFiles: map[manifest.DeletedFileEntry]*manifest.FileMetadata{ + DeletedTables: map[manifest.DeletedTableEntry]*manifest.FileMetadata{ deletedFileEntry{Level: 6, FileNum: 0}: newFileMeta( manifest.InternalKey{UserKey: []byte("a")}, manifest.InternalKey{UserKey: []byte("d")}, @@ -2043,19 +2043,19 @@ func TestCompactionErrorOnUserKeyOverlap(t *testing.T) { comparer: DefaultComparer, formatKey: DefaultComparer.FormatKey, } - var files []manifest.NewFileEntry + var files []manifest.NewTableEntry fileNum := base.FileNum(1) for _, data := range strings.Split(d.Input, "\n") { meta := parseMeta(data) meta.FileNum = fileNum fileNum++ - files = append(files, manifest.NewFileEntry{Level: 1, Meta: meta}) + files = append(files, manifest.NewTableEntry{Level: 1, Meta: meta}) } result := "OK" ve := &versionEdit{ - NewFiles: files, + NewTables: files, } if err := c.errorOnUserKeyOverlap(ve); err != nil { result = fmt.Sprint(err) diff --git a/data_test.go b/data_test.go index 10d803f9c2..305c7fb534 100644 --- a/data_test.go +++ b/data_test.go @@ -886,7 +886,7 @@ func runDBDefineCmdReuseFS(td *datadriven.TestData, opts *Options) (*DB, error) return err } largestSeqNum := d.mu.versions.logSeqNum.Load() - for _, f := range newVE.NewFiles { + for _, f := range newVE.NewTables { if start != nil { f.Meta.SmallestPointKey = *start f.Meta.Smallest = *start @@ -898,7 +898,7 @@ func runDBDefineCmdReuseFS(td *datadriven.TestData, opts *Options) (*DB, error) if largestSeqNum <= f.Meta.LargestSeqNum { largestSeqNum = f.Meta.LargestSeqNum + 1 } - ve.NewFiles = append(ve.NewFiles, newFileEntry{ + ve.NewTables = append(ve.NewTables, newTableEntry{ Level: level, Meta: f.Meta, }) @@ -1043,16 +1043,16 @@ func runDBDefineCmdReuseFS(td *datadriven.TestData, opts *Options) (*DB, error) return nil, err } - if len(ve.NewFiles) > 0 { + if len(ve.NewTables) > 0 { jobID := d.newJobIDLocked() d.mu.versions.logLock() - if err := d.mu.versions.logAndApply(jobID, ve, newFileMetrics(ve.NewFiles), false, func() []compactionInfo { + if err := d.mu.versions.logAndApply(jobID, ve, newFileMetrics(ve.NewTables), false, func() []compactionInfo { return nil }); err != nil { return nil, err } d.updateReadStateLocked(nil) - d.updateTableStatsLocked(ve.NewFiles) + d.updateTableStatsLocked(ve.NewTables) } return d, nil diff --git a/db.go b/db.go index 342604cb11..d37d6d6c50 100644 --- a/db.go +++ b/db.go @@ -492,7 +492,7 @@ type DB struct { // Compactions, ingests, flushes append files to be processed. An // active stat collection goroutine clears the list and processes // them. - pending []manifest.NewFileEntry + pending []manifest.NewTableEntry } tableValidation struct { @@ -502,7 +502,7 @@ type DB struct { // pending is a slice of metadata for sstables waiting to be // validated. Only physical sstables should be added to the pending // queue. - pending []newFileEntry + pending []newTableEntry // validating is set to true when validation is running. validating bool } diff --git a/file_cache_test.go b/file_cache_test.go index dd15e020ce..b36e86546c 100644 --- a/file_cache_test.go +++ b/file_cache_test.go @@ -372,8 +372,8 @@ func TestVirtualReadsWiring(t *testing.T) { // Write the version edit. fileMetrics := func(ve *versionEdit) map[int]*LevelMetrics { - metrics := newFileMetrics(ve.NewFiles) - for de, f := range ve.DeletedFiles { + metrics := newFileMetrics(ve.NewTables) + for de, f := range ve.DeletedTables { lm := metrics[de.Level] if lm == nil { lm = &LevelMetrics{} @@ -397,14 +397,14 @@ func TestVirtualReadsWiring(t *testing.T) { } ve := manifest.VersionEdit{} - d1 := manifest.DeletedFileEntry{Level: 6, FileNum: parentFile.FileNum} - n1 := manifest.NewFileEntry{Level: 6, Meta: v1} - n2 := manifest.NewFileEntry{Level: 6, Meta: v2} - - ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) - ve.DeletedFiles[d1] = parentFile - ve.NewFiles = append(ve.NewFiles, n1) - ve.NewFiles = append(ve.NewFiles, n2) + d1 := manifest.DeletedTableEntry{Level: 6, FileNum: parentFile.FileNum} + n1 := manifest.NewTableEntry{Level: 6, Meta: v1} + n2 := manifest.NewTableEntry{Level: 6, Meta: v2} + + ve.DeletedTables = make(map[manifest.DeletedTableEntry]*manifest.FileMetadata) + ve.DeletedTables[d1] = parentFile + ve.NewTables = append(ve.NewTables, n1) + ve.NewTables = append(ve.NewTables, n2) ve.CreatedBackingTables = append(ve.CreatedBackingTables, parentFile.FileBacking) require.NoError(t, applyVE(&ve)) diff --git a/ingest.go b/ingest.go index ac4efb9dec..fbc399d733 100644 --- a/ingest.go +++ b/ingest.go @@ -1767,9 +1767,9 @@ func (d *DB) ingest( info.Tables = make([]struct { TableInfo Level int - }, len(ve.NewFiles)) - for i := range ve.NewFiles { - e := &ve.NewFiles[i] + }, len(ve.NewTables)) + for i := range ve.NewTables { + e := &ve.NewTables[i] info.Tables[i].Level = e.Level info.Tables[i].TableInfo = e.Meta.TableInfo() stats.Bytes += e.Meta.Size @@ -1819,14 +1819,14 @@ func (d *DB) ingest( // the mutex is not held. func (d *DB) excise( ctx context.Context, exciseSpan base.UserKeyBounds, m *fileMetadata, ve *versionEdit, level int, -) ([]manifest.NewFileEntry, error) { +) ([]manifest.NewTableEntry, error) { numCreatedFiles := 0 // Check if there's actually an overlap between m and exciseSpan. mBounds := base.UserKeyBoundsFromInternal(m.Smallest, m.Largest) if !exciseSpan.Overlaps(d.cmp, &mBounds) { return nil, nil } - ve.DeletedFiles[deletedFileEntry{ + ve.DeletedTables[deletedFileEntry{ Level: level, FileNum: m.FileNum, }] = m @@ -1950,7 +1950,7 @@ func (d *DB) excise( return nil, err } leftFile.ValidateVirtual(m) - ve.NewFiles = append(ve.NewFiles, newFileEntry{Level: level, Meta: leftFile}) + ve.NewTables = append(ve.NewTables, newTableEntry{Level: level, Meta: leftFile}) needsBacking = true numCreatedFiles++ } @@ -1965,7 +1965,7 @@ func (d *DB) excise( // indicated by the VersionEdit.CreatedBackingTables invariant. ve.CreatedBackingTables = append(ve.CreatedBackingTables, m.FileBacking) } - return ve.NewFiles[len(ve.NewFiles)-numCreatedFiles:], nil + return ve.NewTables[len(ve.NewTables)-numCreatedFiles:], nil } // Create a new file, rightFile, between [firstKeyAfter(exciseSpan.End), m.Largest]. // @@ -2064,7 +2064,7 @@ func (d *DB) excise( return nil, err } rightFile.ValidateVirtual(m) - ve.NewFiles = append(ve.NewFiles, newFileEntry{Level: level, Meta: rightFile}) + ve.NewTables = append(ve.NewTables, newTableEntry{Level: level, Meta: rightFile}) needsBacking = true numCreatedFiles++ } @@ -2077,7 +2077,7 @@ func (d *DB) excise( ve.CreatedBackingTables = append(ve.CreatedBackingTables, m.FileBacking) } - return ve.NewFiles[len(ve.NewFiles)-numCreatedFiles:], nil + return ve.NewTables[len(ve.NewTables)-numCreatedFiles:], nil } type ingestSplitFile struct { @@ -2100,9 +2100,9 @@ type ingestSplitFile struct { func (d *DB) ingestSplit( ctx context.Context, ve *versionEdit, - updateMetrics func(*fileMetadata, int, []newFileEntry), + updateMetrics func(*fileMetadata, int, []newTableEntry), files []ingestSplitFile, - replacedFiles map[base.FileNum][]newFileEntry, + replacedFiles map[base.FileNum][]newTableEntry, ) error { for _, s := range files { ingestFileBounds := s.ingestFile.UserKeyBounds() @@ -2168,7 +2168,7 @@ func (d *DB) ingestSplit( if err != nil { return err } - if _, ok := ve.DeletedFiles[deletedFileEntry{ + if _, ok := ve.DeletedTables[deletedFileEntry{ Level: s.level, FileNum: splitFile.FileNum, }]; !ok { @@ -2185,17 +2185,17 @@ func (d *DB) ingestSplit( } // Flatten the version edit by removing any entries from ve.NewFiles that // are also in ve.DeletedFiles. - newNewFiles := ve.NewFiles[:0] - for i := range ve.NewFiles { - fn := ve.NewFiles[i].Meta.FileNum - deEntry := deletedFileEntry{Level: ve.NewFiles[i].Level, FileNum: fn} - if _, ok := ve.DeletedFiles[deEntry]; ok { - delete(ve.DeletedFiles, deEntry) + newNewFiles := ve.NewTables[:0] + for i := range ve.NewTables { + fn := ve.NewTables[i].Meta.FileNum + deEntry := deletedFileEntry{Level: ve.NewTables[i].Level, FileNum: fn} + if _, ok := ve.DeletedTables[deEntry]; ok { + delete(ve.DeletedTables, deEntry) } else { - newNewFiles = append(newNewFiles, ve.NewFiles[i]) + newNewFiles = append(newNewFiles, ve.NewTables[i]) } } - ve.NewFiles = newNewFiles + ve.NewTables = newNewFiles return nil } @@ -2211,10 +2211,10 @@ func (d *DB) ingestApply( defer d.mu.Unlock() ve := &versionEdit{ - NewFiles: make([]newFileEntry, lr.fileCount()), + NewTables: make([]newTableEntry, lr.fileCount()), } if exciseSpan.Valid() || (d.opts.Experimental.IngestSplit != nil && d.opts.Experimental.IngestSplit()) { - ve.DeletedFiles = map[manifest.DeletedFileEntry]*manifest.FileMetadata{} + ve.DeletedTables = map[manifest.DeletedTableEntry]*manifest.FileMetadata{} } metrics := make(map[int]*LevelMetrics) @@ -2291,7 +2291,7 @@ func (d *DB) ingestApply( ve.CreatedBackingTables = append(ve.CreatedBackingTables, m.FileBacking) } - f := &ve.NewFiles[i] + f := &ve.NewTables[i] var err error if specifiedLevel != -1 { f.Level = specifiedLevel @@ -2367,8 +2367,8 @@ func (d *DB) ingestApply( // possible for a file that we want to split to no longer exist or have a // newer fileMetadata due to a split induced by another ingestion file, or an // excise. - replacedFiles := make(map[base.FileNum][]newFileEntry) - updateLevelMetricsOnExcise := func(m *fileMetadata, level int, added []newFileEntry) { + replacedFiles := make(map[base.FileNum][]newTableEntry) + updateLevelMetricsOnExcise := func(m *fileMetadata, level int, added []newTableEntry) { levelMetrics := metrics[level] if levelMetrics == nil { levelMetrics = &LevelMetrics{} @@ -2407,7 +2407,7 @@ func (d *DB) ingestApply( return nil, err } - if _, ok := ve.DeletedFiles[deletedFileEntry{ + if _, ok := ve.DeletedTables[deletedFileEntry{ Level: level, FileNum: m.FileNum, }]; !ok { @@ -2499,13 +2499,13 @@ func (d *DB) ingestApply( // updateReadStateLocked could have generated obsolete tables, schedule a // cleanup job if necessary. d.deleteObsoleteFiles(jobID) - d.updateTableStatsLocked(ve.NewFiles) + d.updateTableStatsLocked(ve.NewTables) // The ingestion may have pushed a level over the threshold for compaction, // so check to see if one is necessary and schedule it. d.maybeScheduleCompaction() - var toValidate []manifest.NewFileEntry + var toValidate []manifest.NewTableEntry dedup := make(map[base.DiskFileNum]struct{}) - for _, entry := range ve.NewFiles { + for _, entry := range ve.NewTables { if _, ok := dedup[entry.Meta.FileBacking.DiskFileNum]; !ok { toValidate = append(toValidate, entry) dedup[entry.Meta.FileBacking.DiskFileNum] = struct{}{} @@ -2515,14 +2515,14 @@ func (d *DB) ingestApply( return ve, nil } -// maybeValidateSSTablesLocked adds the slice of newFileEntrys to the pending +// maybeValidateSSTablesLocked adds the slice of newTableEntrys to the pending // queue of files to be validated, when the feature is enabled. // // Note that if two entries with the same backing file are added twice, then the // block checksums for the backing file will be validated twice. // // DB.mu must be locked when calling. -func (d *DB) maybeValidateSSTablesLocked(newFiles []newFileEntry) { +func (d *DB) maybeValidateSSTablesLocked(newFiles []newTableEntry) { // Only add to the validation queue when the feature is enabled. if !d.opts.Experimental.ValidateOnIngest { return @@ -2568,7 +2568,7 @@ func (d *DB) validateSSTables() { // If we fail to validate any files due to reasons other than uncovered // corruption, accumulate them and re-queue them for another attempt. - var retry []manifest.NewFileEntry + var retry []manifest.NewTableEntry for _, f := range pending { // The file may have been moved or deleted since it was ingested, in diff --git a/ingest_test.go b/ingest_test.go index 93ab1bced4..f4176cff83 100644 --- a/ingest_test.go +++ b/ingest_test.go @@ -867,7 +867,7 @@ func TestExcise(t *testing.T) { case "excise": ve := &versionEdit{ - DeletedFiles: map[deletedFileEntry]*fileMetadata{}, + DeletedTables: map[deletedFileEntry]*fileMetadata{}, } var exciseSpan KeyRange if len(td.CmdArgs) != 2 { @@ -893,7 +893,7 @@ func TestExcise(t *testing.T) { d.mu.Lock() d.mu.versions.logUnlock() d.mu.Unlock() - return fmt.Sprintf("would excise %d files, use ingest-and-excise to excise.\n%s", len(ve.DeletedFiles), ve.DebugString(base.DefaultFormatter)) + return fmt.Sprintf("would excise %d files, use ingest-and-excise to excise.\n%s", len(ve.DeletedTables), ve.DebugString(base.DefaultFormatter)) case "confirm-backing": // Confirms that the files have the same FileBacking. @@ -1219,7 +1219,7 @@ func testIngestSharedImpl( case "excise": ve := &versionEdit{ - DeletedFiles: map[deletedFileEntry]*fileMetadata{}, + DeletedTables: map[deletedFileEntry]*fileMetadata{}, } var exciseSpan KeyRange if len(td.CmdArgs) != 2 { @@ -1247,7 +1247,7 @@ func testIngestSharedImpl( d.mu.Lock() d.mu.versions.logUnlock() d.mu.Unlock() - return fmt.Sprintf("would excise %d files, use ingest-and-excise to excise.\n%s", len(ve.DeletedFiles), ve.String()) + return fmt.Sprintf("would excise %d files, use ingest-and-excise to excise.\n%s", len(ve.DeletedTables), ve.String()) case "file-only-snapshot": if len(td.CmdArgs) != 1 { @@ -1721,7 +1721,7 @@ func TestConcurrentExcise(t *testing.T) { case "excise": ve := &versionEdit{ - DeletedFiles: map[deletedFileEntry]*fileMetadata{}, + DeletedTables: map[deletedFileEntry]*fileMetadata{}, } var exciseSpan KeyRange if len(td.CmdArgs) != 2 { @@ -1749,7 +1749,7 @@ func TestConcurrentExcise(t *testing.T) { d.mu.Lock() d.mu.versions.logUnlock() d.mu.Unlock() - return fmt.Sprintf("would excise %d files, use ingest-and-excise to excise.\n%s", len(ve.DeletedFiles), ve.String()) + return fmt.Sprintf("would excise %d files, use ingest-and-excise to excise.\n%s", len(ve.DeletedTables), ve.String()) case "file-only-snapshot": if len(td.CmdArgs) != 1 { @@ -3156,7 +3156,7 @@ func TestIngestMemtableOverlapRace(t *testing.T) { var ve manifest.VersionEdit require.NoError(t, ve.Decode(r)) t.Log(ve.String()) - for _, f := range ve.NewFiles { + for _, f := range ve.NewTables { if largest != nil { require.Equal(t, 0, f.Level) if largest.LargestSeqNum > f.Meta.LargestSeqNum { diff --git a/internal/keyspan/keyspanimpl/level_iter_test.go b/internal/keyspan/keyspanimpl/level_iter_test.go index decff7ca91..0239f0ee44 100644 --- a/internal/keyspan/keyspanimpl/level_iter_test.go +++ b/internal/keyspan/keyspanimpl/level_iter_test.go @@ -311,7 +311,7 @@ func TestLevelIterEquivalence(t *testing.T) { for i := range metas { amap[metas[i].FileNum] = metas[i] } - b.Added[6] = amap + b.AddedTables[6] = amap v, err := b.Apply(nil, base.DefaultComparer, 0, 0) require.NoError(t, err) levelIter.Init( diff --git a/internal/manifest/l0_sublevels_test.go b/internal/manifest/l0_sublevels_test.go index 8cdf199bbb..13e23d7f69 100644 --- a/internal/manifest/l0_sublevels_test.go +++ b/internal/manifest/l0_sublevels_test.go @@ -47,7 +47,7 @@ func readManifest(filename string) (*Version, error) { return nil, err } var bve BulkVersionEdit - bve.AddedByFileNum = addedByFileNum + bve.AddedTablesByFileNum = addedByFileNum if err := bve.Accumulate(&ve); err != nil { return nil, err } diff --git a/internal/manifest/manifest_test.go b/internal/manifest/manifest_test.go index cbca5f80ce..0a89b8a7b0 100644 --- a/internal/manifest/manifest_test.go +++ b/internal/manifest/manifest_test.go @@ -114,7 +114,7 @@ func replayManifest(t *testing.T, opts *pebble.Options, dirname string) *manifes cmp := opts.Comparer var bve manifest.BulkVersionEdit - bve.AddedByFileNum = make(map[base.FileNum]*manifest.FileMetadata) + bve.AddedTablesByFileNum = make(map[base.FileNum]*manifest.FileMetadata) rr := record.NewReader(f, 0 /* logNum */) for { r, err := rr.Next() diff --git a/internal/manifest/version_edit.go b/internal/manifest/version_edit.go index 79e7ee5891..71d971026e 100644 --- a/internal/manifest/version_edit.go +++ b/internal/manifest/version_edit.go @@ -74,16 +74,16 @@ const ( customTagBlobReferences = 69 ) -// DeletedFileEntry holds the state for a file deletion from a level. The file -// itself might still be referenced by another level. -type DeletedFileEntry struct { +// DeletedTableEntry holds the state for a sstable deletion from a level. The +// table itself might still be referenced by another level. +type DeletedTableEntry struct { Level int FileNum base.FileNum } -// NewFileEntry holds the state for a new file or one moved from a different +// NewTableEntry holds the state for a new sstable or one moved from a different // level. -type NewFileEntry struct { +type NewTableEntry struct { Level int Meta *FileMetadata // BackingFileNum is only set during manifest replay, and only for virtual @@ -124,8 +124,8 @@ type VersionEdit struct { // A file num may be present in both deleted files and new files when it // is moved from a lower level to a higher level (when the compaction // found that there was no overlapping file at the higher level). - DeletedFiles map[DeletedFileEntry]*FileMetadata - NewFiles []NewFileEntry + DeletedTables map[DeletedTableEntry]*FileMetadata + NewTables []NewTableEntry // CreatedBackingTables can be used to preserve the FileBacking associated // with a physical sstable. This is useful when virtual sstables in the // latest version are reconstructed during manifest replay, and we also need @@ -252,10 +252,10 @@ func (v *VersionEdit) Decode(r io.Reader) error { if err != nil { return err } - if v.DeletedFiles == nil { - v.DeletedFiles = make(map[DeletedFileEntry]*FileMetadata) + if v.DeletedTables == nil { + v.DeletedTables = make(map[DeletedTableEntry]*FileMetadata) } - v.DeletedFiles[DeletedFileEntry{level, fileNum}] = nil + v.DeletedTables[DeletedTableEntry{level, fileNum}] = nil case tagNewFile, tagNewFile2, tagNewFile3, tagNewFile4, tagNewFile5: level, err := d.readLevel() @@ -487,14 +487,14 @@ func (v *VersionEdit) Decode(r io.Reader) error { m.InitPhysicalBacking() } - nfe := NewFileEntry{ + nfe := NewTableEntry{ Level: level, Meta: m, } if virtualState.virtual { nfe.BackingFileNum = base.DiskFileNum(virtualState.backingFileNum) } - v.NewFiles = append(v.NewFiles, nfe) + v.NewTables = append(v.NewTables, nfe) case tagNewBlobFile: fileNum, err := d.readFileNum() @@ -561,11 +561,11 @@ func (v *VersionEdit) string(verbose bool, fmtKey base.FormatKey) string { if v.LastSeqNum != 0 { fmt.Fprintf(&buf, " last-seq-num: %d\n", v.LastSeqNum) } - entries := make([]DeletedFileEntry, 0, len(v.DeletedFiles)) - for df := range v.DeletedFiles { + entries := make([]DeletedTableEntry, 0, len(v.DeletedTables)) + for df := range v.DeletedTables { entries = append(entries, df) } - slices.SortFunc(entries, func(a, b DeletedFileEntry) int { + slices.SortFunc(entries, func(a, b DeletedTableEntry) int { if v := stdcmp.Compare(a.Level, b.Level); v != 0 { return v } @@ -574,7 +574,7 @@ func (v *VersionEdit) string(verbose bool, fmtKey base.FormatKey) string { for _, df := range entries { fmt.Fprintf(&buf, " del-table: L%d %s\n", df.Level, df.FileNum) } - for _, nf := range v.NewFiles { + for _, nf := range v.NewTables { fmt.Fprintf(&buf, " add-table: L%d", nf.Level) fmt.Fprintf(&buf, " %s", nf.Meta.DebugString(fmtKey, verbose)) if nf.Meta.CreationTime != 0 { @@ -640,7 +640,7 @@ func ParseVersionEditDebug(s string) (_ *VersionEdit, err error) { if err != nil { return nil, err } - ve.NewFiles = append(ve.NewFiles, NewFileEntry{ + ve.NewTables = append(ve.NewTables, NewTableEntry{ Level: level, Meta: meta, }) @@ -648,10 +648,10 @@ func ParseVersionEditDebug(s string) (_ *VersionEdit, err error) { case "del-table": level := p.Level() num := p.FileNum() - if ve.DeletedFiles == nil { - ve.DeletedFiles = make(map[DeletedFileEntry]*FileMetadata) + if ve.DeletedTables == nil { + ve.DeletedTables = make(map[DeletedTableEntry]*FileMetadata) } - ve.DeletedFiles[DeletedFileEntry{ + ve.DeletedTables[DeletedTableEntry{ Level: level, FileNum: num, }] = nil @@ -720,12 +720,12 @@ func (v *VersionEdit) Encode(w io.Writer) error { e.writeUvarint(tagLastSequence) e.writeUvarint(uint64(v.LastSeqNum)) } - for x := range v.DeletedFiles { + for x := range v.DeletedTables { e.writeUvarint(tagDeletedFile) e.writeUvarint(uint64(x.Level)) e.writeUvarint(uint64(x.FileNum)) } - for _, x := range v.NewFiles { + for _, x := range v.NewTables { customFields := x.Meta.MarkedForCompaction || x.Meta.CreationTime != 0 || x.Meta.Virtual || len(x.Meta.BlobReferences) > 0 var tag uint64 switch { @@ -913,24 +913,24 @@ func (e versionEditEncoder) writeUvarint(u uint64) { // before (in a prior version edit). Note that a given file can be deleted from // a level and added to another level in a single version edit type BulkVersionEdit struct { - Added [NumLevels]map[base.FileNum]*FileMetadata - Deleted [NumLevels]map[base.FileNum]*FileMetadata + AddedTables [NumLevels]map[base.FileNum]*FileMetadata + DeletedTables [NumLevels]map[base.FileNum]*FileMetadata // AddedFileBacking is a map to support lookup so that we can populate the // FileBacking of virtual sstables during manifest replay. AddedFileBacking map[base.DiskFileNum]*FileBacking RemovedFileBacking []base.DiskFileNum - // AddedByFileNum maps file number to file metadata for all added files - // from accumulated version edits. AddedByFileNum is only populated if set - // to non-nil by a caller. It must be set to non-nil when replaying - // version edits read from a MANIFEST (as opposed to VersionEdits + // AddedTablesByFileNum maps file number to file metadata for all added + // files from accumulated version edits. AddedTablesByFileNum is only + // populated if set to non-nil by a caller. It must be set to non-nil when + // replaying version edits read from a MANIFEST (as opposed to VersionEdits // constructed in-memory). While replaying a MANIFEST file, - // VersionEdit.DeletedFiles map entries have nil values, because the - // on-disk deletion record encodes only the file number. Accumulate - // uses AddedByFileNum to correctly populate the BulkVersionEdit's Deleted + // VersionEdit.DeletedFiles map entries have nil values, because the on-disk + // deletion record encodes only the file number. Accumulate uses + // AddedTablesByFileNum to correctly populate the BulkVersionEdit's Deleted // field with non-nil *FileMetadata. - AddedByFileNum map[base.FileNum]*FileMetadata + AddedTablesByFileNum map[base.FileNum]*FileMetadata // MarkedForCompactionCountDiff holds the aggregated count of files // marked for compaction added or removed. @@ -953,19 +953,19 @@ type BulkVersionEdit struct { // of the accumulation, because we need to decrease the refcount of the // deleted file in Apply. func (b *BulkVersionEdit) Accumulate(ve *VersionEdit) error { - for df, m := range ve.DeletedFiles { - dmap := b.Deleted[df.Level] + for df, m := range ve.DeletedTables { + dmap := b.DeletedTables[df.Level] if dmap == nil { dmap = make(map[base.FileNum]*FileMetadata) - b.Deleted[df.Level] = dmap + b.DeletedTables[df.Level] = dmap } if m == nil { // m is nil only when replaying a MANIFEST. - if b.AddedByFileNum == nil { + if b.AddedTablesByFileNum == nil { return errors.Errorf("deleted file L%d.%s's metadata is absent and bve.AddedByFileNum is nil", df.Level, df.FileNum) } - m = b.AddedByFileNum[df.FileNum] + m = b.AddedTablesByFileNum[df.FileNum] if m == nil { return base.CorruptionErrorf("pebble: file deleted L%d.%s before it was inserted", df.Level, df.FileNum) } @@ -973,11 +973,11 @@ func (b *BulkVersionEdit) Accumulate(ve *VersionEdit) error { if m.MarkedForCompaction { b.MarkedForCompactionCountDiff-- } - if _, ok := b.Added[df.Level][df.FileNum]; !ok { + if _, ok := b.AddedTables[df.Level][df.FileNum]; !ok { dmap[df.FileNum] = m } else { // Present in b.Added for the same level. - delete(b.Added[df.Level], df.FileNum) + delete(b.AddedTables[df.Level], df.FileNum) } } @@ -997,10 +997,10 @@ func (b *BulkVersionEdit) Accumulate(ve *VersionEdit) error { b.AddedFileBacking[fb.DiskFileNum] = fb } - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { // A new file should not have been deleted in this or a preceding // VersionEdit at the same level (though files can move across levels). - if dmap := b.Deleted[nf.Level]; dmap != nil { + if dmap := b.DeletedTables[nf.Level]; dmap != nil { if _, ok := dmap[nf.Meta.FileNum]; ok { return base.CorruptionErrorf("pebble: file deleted L%d.%s before it was inserted", nf.Level, nf.Meta.FileNum) } @@ -1016,12 +1016,12 @@ func (b *BulkVersionEdit) Accumulate(ve *VersionEdit) error { return errors.Errorf("Added file L%d.%s's has no FileBacking", nf.Level, nf.Meta.FileNum) } - if b.Added[nf.Level] == nil { - b.Added[nf.Level] = make(map[base.FileNum]*FileMetadata) + if b.AddedTables[nf.Level] == nil { + b.AddedTables[nf.Level] = make(map[base.FileNum]*FileMetadata) } - b.Added[nf.Level][nf.Meta.FileNum] = nf.Meta - if b.AddedByFileNum != nil { - b.AddedByFileNum[nf.Meta.FileNum] = nf.Meta + b.AddedTables[nf.Level][nf.Meta.FileNum] = nf.Meta + if b.AddedTablesByFileNum != nil { + b.AddedTablesByFileNum[nf.Meta.FileNum] = nf.Meta } if nf.Meta.MarkedForCompaction { b.MarkedForCompactionCountDiff++ @@ -1076,7 +1076,7 @@ func (b *BulkVersionEdit) Apply( v.RangeKeyLevels[level] = curr.RangeKeyLevels[level].clone() } - if len(b.Added[level]) == 0 && len(b.Deleted[level]) == 0 { + if len(b.AddedTables[level]) == 0 && len(b.DeletedTables[level]) == 0 { // There are no edits on this level. if level == 0 { // Initialize L0Sublevels. @@ -1096,19 +1096,19 @@ func (b *BulkVersionEdit) Apply( lm := &v.Levels[level] lmRange := &v.RangeKeyLevels[level] - addedFilesMap := b.Added[level] - deletedFilesMap := b.Deleted[level] - if n := v.Levels[level].Len() + len(addedFilesMap); n == 0 { + addedTablesMap := b.AddedTables[level] + deletedTablesMap := b.DeletedTables[level] + if n := v.Levels[level].Len() + len(addedTablesMap); n == 0 { return nil, base.CorruptionErrorf( "pebble: internal error: No current or added files but have deleted files: %d", - errors.Safe(len(deletedFilesMap))) + errors.Safe(len(deletedTablesMap))) } // NB: addedFilesMap may be empty. If a file is present in addedFilesMap // for a level, it won't be present in deletedFilesMap for the same // level. - for _, f := range deletedFilesMap { + for _, f := range deletedTablesMap { if obsolete := v.Levels[level].remove(f); obsolete { // Deleting a file from the B-Tree may decrement its // reference count. However, because we cloned the @@ -1129,19 +1129,19 @@ func (b *BulkVersionEdit) Apply( } } - addedFiles := make([]*FileMetadata, 0, len(addedFilesMap)) - for _, f := range addedFilesMap { - addedFiles = append(addedFiles, f) + addedTables := make([]*FileMetadata, 0, len(addedTablesMap)) + for _, f := range addedTablesMap { + addedTables = append(addedTables, f) } // Sort addedFiles by file number. This isn't necessary, but tests which // replay invalid manifests check the error output, and the error output // depends on the order in which files are added to the btree. - slices.SortFunc(addedFiles, func(a, b *FileMetadata) int { + slices.SortFunc(addedTables, func(a, b *FileMetadata) int { return stdcmp.Compare(a.FileNum, b.FileNum) }) var sm, la *FileMetadata - for _, f := range addedFiles { + for _, f := range addedTables { // NB: allowedSeeks is used for read triggered compactions. It is set using // Options.Experimental.ReadCompactionRate which defaults to 32KB. var allowedSeeks int64 @@ -1175,14 +1175,14 @@ func (b *BulkVersionEdit) Apply( } if level == 0 { - if curr != nil && curr.L0Sublevels != nil && len(deletedFilesMap) == 0 { + if curr != nil && curr.L0Sublevels != nil && len(deletedTablesMap) == 0 { // Flushes and ingestions that do not delete any L0 files do not require // a regeneration of L0Sublevels from scratch. We can instead generate // it incrementally. var err error // AddL0Files requires addedFiles to be sorted in seqnum order. - SortBySeqNum(addedFiles) - v.L0Sublevels, err = curr.L0Sublevels.AddL0Files(addedFiles, flushSplitBytes, &v.Levels[0]) + SortBySeqNum(addedTables) + v.L0Sublevels, err = curr.L0Sublevels.AddL0Files(addedTables, flushSplitBytes, &v.Levels[0]) if errors.Is(err, errInvalidL0SublevelsOpt) { err = v.InitL0Sublevels(flushSplitBytes) } else if invariants.Enabled && err == nil { @@ -1215,8 +1215,8 @@ func (b *BulkVersionEdit) Apply( // Check consistency of the level in the vicinity of our edits. if sm != nil && la != nil { overlap := v.Levels[level].Slice().Overlaps(comparer.Compare, sm.UserKeyBounds()) - // overlap contains all of the added files. We want to ensure that - // the added files are consistent with neighboring existing files + // overlap contains all of the added tables. We want to ensure that + // the added tables are consistent with neighboring existing tables // too, so reslice the overlap to pull in a neighbor on each side. check := overlap.Reslice(func(start, end *LevelIterator) { if m := start.Prev(); m == nil { diff --git a/internal/manifest/version_edit_test.go b/internal/manifest/version_edit_test.go index d12071f59c..9d6d0b56e4 100644 --- a/internal/manifest/version_edit_test.go +++ b/internal/manifest/version_edit_test.go @@ -91,7 +91,7 @@ func TestVERoundTripAndAccumulate(t *testing.T) { NextFileNum: 44, LastSeqNum: 55, CreatedBackingTables: []*FileBacking{m1.FileBacking}, - NewFiles: []NewFileEntry{ + NewTables: []NewTableEntry{ { Level: 4, Meta: m2, @@ -224,7 +224,7 @@ func TestVersionEditRoundTrip(t *testing.T) { LastSeqNum: 55, RemovedBackingTables: []base.DiskFileNum{10, 11}, CreatedBackingTables: []*FileBacking{m5.FileBacking, m6.FileBacking}, - DeletedFiles: map[DeletedFileEntry]*FileMetadata{ + DeletedTables: map[DeletedTableEntry]*FileMetadata{ { Level: 3, FileNum: 703, @@ -234,7 +234,7 @@ func TestVersionEditRoundTrip(t *testing.T) { FileNum: 704, }: nil, }, - NewFiles: []NewFileEntry{ + NewTables: []NewTableEntry{ { Level: 4, Meta: m1, @@ -413,11 +413,11 @@ func TestVersionEditApply(t *testing.T) { } bve := BulkVersionEdit{} - bve.AddedByFileNum = make(map[base.FileNum]*FileMetadata) + bve.AddedTablesByFileNum = make(map[base.FileNum]*FileMetadata) for _, l := range v.Levels { it := l.Iter() for f := it.First(); f != nil; f = it.Next() { - bve.AddedByFileNum[f.FileNum] = f + bve.AddedTablesByFileNum[f.FileNum] = f } } diff --git a/replay/replay.go b/replay/replay.go index e1f2b8bd4a..49604ed823 100644 --- a/replay/replay.go +++ b/replay/replay.go @@ -713,7 +713,7 @@ func (r *Runner) prepareWorkloadSteps(ctx context.Context) error { var v *manifest.Version var previousVersion *manifest.Version var bve manifest.BulkVersionEdit - bve.AddedByFileNum = make(map[base.FileNum]*manifest.FileMetadata) + bve.AddedTablesByFileNum = make(map[base.FileNum]*manifest.FileMetadata) applyVE := func(ve *manifest.VersionEdit) error { return bve.Accumulate(ve) } @@ -723,7 +723,7 @@ func (r *Runner) prepareWorkloadSteps(ctx context.Context) error { r.Opts.Comparer, r.Opts.FlushSplitBytes, r.Opts.Experimental.ReadCompactionRate) - bve = manifest.BulkVersionEdit{AddedByFileNum: bve.AddedByFileNum} + bve = manifest.BulkVersionEdit{AddedTablesByFileNum: bve.AddedTablesByFileNum} return v, err } @@ -783,14 +783,14 @@ func (r *Runner) prepareWorkloadSteps(ctx context.Context) error { // r.workload.manifestOff, and we should skip it. continue } - if len(ve.NewFiles) == 0 && len(ve.DeletedFiles) == 0 { + if len(ve.NewTables) == 0 && len(ve.DeletedTables) == 0 { // Skip WAL rotations and other events that don't affect the // files of the LSM. continue } s := workloadStep{ve: ve} - if len(ve.DeletedFiles) > 0 { + if len(ve.DeletedTables) > 0 { // If a version edit deletes files, we assume it's a compaction. s.kind = compactionStepKind } else { @@ -800,7 +800,7 @@ func (r *Runner) prepareWorkloadSteps(ctx context.Context) error { s.kind = ingestStepKind } var newFiles []base.DiskFileNum - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { newFiles = append(newFiles, nf.Meta.FileBacking.DiskFileNum) if s.kind == ingestStepKind && (nf.Meta.SmallestSeqNum != nf.Meta.LargestSeqNum || nf.Level != 0) { s.kind = flushStepKind diff --git a/table_stats.go b/table_stats.go index 469d394a3e..4f4b63184f 100644 --- a/table_stats.go +++ b/table_stats.go @@ -55,9 +55,9 @@ func (d *DB) maybeCollectTableStatsLocked() { // updateTableStatsLocked is called when new files are introduced, after the // read state has been updated. It may trigger a new stat collection. // DB.mu must be locked when calling. -func (d *DB) updateTableStatsLocked(newFiles []manifest.NewFileEntry) { +func (d *DB) updateTableStatsLocked(newTables []manifest.NewTableEntry) { var needStats bool - for _, nf := range newFiles { + for _, nf := range newTables { if !nf.Meta.StatsValid() { needStats = true break @@ -67,7 +67,7 @@ func (d *DB) updateTableStatsLocked(newFiles []manifest.NewFileEntry) { return } - d.mu.tableStats.pending = append(d.mu.tableStats.pending, newFiles...) + d.mu.tableStats.pending = append(d.mu.tableStats.pending, newTables...) d.maybeCollectTableStatsLocked() } @@ -169,7 +169,7 @@ type collectedStats struct { } func (d *DB) loadNewFileStats( - rs *readState, pending []manifest.NewFileEntry, + rs *readState, pending []manifest.NewTableEntry, ) ([]collectedStats, []deleteCompactionHint) { var hints []deleteCompactionHint collected := make([]collectedStats, 0, len(pending)) diff --git a/tool/db.go b/tool/db.go index 76f8bf5358..4a20779e98 100644 --- a/tool/db.go +++ b/tool/db.go @@ -690,7 +690,7 @@ func (d *dbT) runProperties(cmd *cobra.Command, args []string) { cmp := base.DefaultComparer var bve manifest.BulkVersionEdit - bve.AddedByFileNum = make(map[base.FileNum]*manifest.FileMetadata) + bve.AddedTablesByFileNum = make(map[base.FileNum]*manifest.FileMetadata) rr := record.NewReader(f, 0 /* logNum */) for { r, err := rr.Next() diff --git a/tool/find.go b/tool/find.go index 89cf0d86a7..ab9604bcad 100644 --- a/tool/find.go +++ b/tool/find.go @@ -260,11 +260,11 @@ func (f *findT) readManifests(stdout io.Writer) { if num := ve.MinUnflushedLogNum; num != 0 { f.editRefs[num] = append(f.editRefs[num], i) } - for df := range ve.DeletedFiles { + for df := range ve.DeletedTables { diskFileNum := base.PhysicalTableDiskFileNum(df.FileNum) f.editRefs[diskFileNum] = append(f.editRefs[diskFileNum], i) } - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { // The same file can be deleted and added in a single version edit // which indicates a "move" compaction. Only add the edit to the list // once. @@ -575,14 +575,14 @@ func (f *findT) tableProvenance(fileNum base.FileNum) string { for len(editRefs) > 0 { ve := f.edits[editRefs[0]] editRefs = editRefs[1:] - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { if fileNum != nf.Meta.FileNum { continue } var buf bytes.Buffer switch { - case len(ve.DeletedFiles) > 0: + case len(ve.DeletedTables) > 0: // A version edit with deleted files is a compaction. The deleted // files are the inputs to the compaction. We're going to // reconstruct the input files and display those inputs that @@ -591,7 +591,7 @@ func (f *findT) tableProvenance(fileNum base.FileNum) string { // been elided. var sourceLevels []int levels := make(map[int][]base.FileNum) - for df := range ve.DeletedFiles { + for df := range ve.DeletedTables { files := levels[df.Level] if len(files) == 0 { sourceLevels = append(sourceLevels, df.Level) @@ -655,9 +655,9 @@ func (f *findT) tableProvenance(fileNum base.FileNum) string { for len(editRefs) > 0 { ve := f.edits[editRefs[0]] editRefs = editRefs[1:] - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { if fileNum == nf.Meta.FileNum { - for df := range ve.DeletedFiles { + for df := range ve.DeletedTables { if fileNum == df.FileNum { fmt.Fprintf(&buf, ", moved to L%d", nf.Level) break diff --git a/tool/lsm.go b/tool/lsm.go index a5dc528afc..3bce20ee46 100644 --- a/tool/lsm.go +++ b/tool/lsm.go @@ -243,8 +243,8 @@ func (l *lsmT) readManifest(path string) []*manifest.VersionEdit { func (l *lsmT) buildKeys(edits []*manifest.VersionEdit) { var keys []base.InternalKey for _, ve := range edits { - for i := range ve.NewFiles { - nf := &ve.NewFiles[i] + for i := range ve.NewTables { + nf := &ve.NewTables[i] keys = append(keys, nf.Meta.Smallest) keys = append(keys, nf.Meta.Largest) } @@ -283,7 +283,7 @@ func (l *lsmT) buildEdits(edits []*manifest.VersionEdit) error { for _, i := range ve.CreatedBackingTables { backings[i.DiskFileNum] = i } - if len(ve.DeletedFiles) == 0 && len(ve.NewFiles) == 0 { + if len(ve.DeletedTables) == 0 && len(ve.NewTables) == 0 { continue } @@ -293,8 +293,8 @@ func (l *lsmT) buildEdits(edits []*manifest.VersionEdit) error { Deleted: make(map[int][]base.FileNum), } - for j := range ve.NewFiles { - nf := &ve.NewFiles[j] + for j := range ve.NewTables { + nf := &ve.NewTables[j] if b, ok := backings[nf.BackingFileNum]; ok && nf.Meta.Virtual { nf.Meta.FileBacking = b } @@ -312,7 +312,7 @@ func (l *lsmT) buildEdits(edits []*manifest.VersionEdit) error { currentFiles[nf.Level] = append(currentFiles[nf.Level], nf.Meta) } - for df := range ve.DeletedFiles { + for df := range ve.DeletedTables { edit.Deleted[df.Level] = append(edit.Deleted[df.Level], df.FileNum) for j, f := range currentFiles[df.Level] { if f.FileNum == df.FileNum { @@ -351,7 +351,7 @@ func (l *lsmT) coalesceEdits(edits []*manifest.VersionEdit) ([]*manifest.Version } be := manifest.BulkVersionEdit{} - be.AddedByFileNum = make(map[base.FileNum]*manifest.FileMetadata) + be.AddedTablesByFileNum = make(map[base.FileNum]*manifest.FileMetadata) // Coalesce all edits from [0, l.startEdit) into a BulkVersionEdit. for _, ve := range edits[:l.startEdit] { @@ -362,12 +362,12 @@ func (l *lsmT) coalesceEdits(edits []*manifest.VersionEdit) ([]*manifest.Version } startingEdit := edits[l.startEdit] - var beNewFiles []manifest.NewFileEntry - beDeletedFiles := make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) + var beNewFiles []manifest.NewTableEntry + beDeletedFiles := make(map[manifest.DeletedTableEntry]*manifest.FileMetadata) - for level, deletedFiles := range be.Deleted { + for level, deletedFiles := range be.DeletedTables { for _, file := range deletedFiles { - dfe := manifest.DeletedFileEntry{ + dfe := manifest.DeletedTableEntry{ Level: level, FileNum: file.FileNum, } @@ -376,22 +376,22 @@ func (l *lsmT) coalesceEdits(edits []*manifest.VersionEdit) ([]*manifest.Version } // Filter out added files that were also deleted in the BulkVersionEdit. - for level, newFiles := range be.Added { + for level, newFiles := range be.AddedTables { for _, file := range newFiles { - dfe := manifest.DeletedFileEntry{ + dfe := manifest.DeletedTableEntry{ Level: level, FileNum: file.FileNum, } if _, ok := beDeletedFiles[dfe]; !ok { - beNewFiles = append(beNewFiles, manifest.NewFileEntry{ + beNewFiles = append(beNewFiles, manifest.NewTableEntry{ Level: level, Meta: file, }) } } } - startingEdit.NewFiles = append(beNewFiles, startingEdit.NewFiles...) + startingEdit.NewTables = append(beNewFiles, startingEdit.NewTables...) edits = edits[l.startEdit:] return edits, nil @@ -402,14 +402,14 @@ func (l *lsmT) findKey(key base.InternalKey) int { } func (l *lsmT) reason(ve *manifest.VersionEdit) string { - if len(ve.DeletedFiles) > 0 { + if len(ve.DeletedTables) > 0 { return "compacted" } if ve.MinUnflushedLogNum != 0 { return "flushed" } - for i := range ve.NewFiles { - nf := &ve.NewFiles[i] + for i := range ve.NewTables { + nf := &ve.NewTables[i] if nf.Meta.SmallestSeqNum == nf.Meta.LargestSeqNum { return "ingested" } diff --git a/tool/manifest.go b/tool/manifest.go index c9ba098ec6..e0b50e941b 100644 --- a/tool/manifest.go +++ b/tool/manifest.go @@ -151,7 +151,7 @@ func (m *manifestT) runDump(cmd *cobra.Command, args []string) { fmt.Fprintf(stdout, "%s\n", arg) var bve manifest.BulkVersionEdit - bve.AddedByFileNum = make(map[base.FileNum]*manifest.FileMetadata) + bve.AddedTablesByFileNum = make(map[base.FileNum]*manifest.FileMetadata) var comparer *base.Comparer var editIdx int rr := record.NewReader(f, 0 /* logNum */) @@ -206,12 +206,12 @@ func (m *manifestT) runDump(cmd *cobra.Command, args []string) { empty = false fmt.Fprintf(stdout, " last-seq-num: %d\n", ve.LastSeqNum) } - entries := make([]manifest.DeletedFileEntry, 0, len(ve.DeletedFiles)) - for df := range ve.DeletedFiles { + entries := make([]manifest.DeletedTableEntry, 0, len(ve.DeletedTables)) + for df := range ve.DeletedTables { empty = false entries = append(entries, df) } - slices.SortFunc(entries, func(a, b manifest.DeletedFileEntry) int { + slices.SortFunc(entries, func(a, b manifest.DeletedTableEntry) int { if v := cmp.Compare(a.Level, b.Level); v != 0 { return v } @@ -220,7 +220,7 @@ func (m *manifestT) runDump(cmd *cobra.Command, args []string) { for _, df := range entries { fmt.Fprintf(stdout, " deleted: L%d %s\n", df.Level, df.FileNum) } - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { empty = false fmt.Fprintf(stdout, " added: L%d %s:%d", nf.Level, nf.Meta.FileNum, nf.Meta.Size) @@ -260,12 +260,12 @@ func anyOverlap(cmp base.Compare, ve *manifest.VersionEdit, start, end key) bool if start == nil && end == nil { return true } - for _, df := range ve.DeletedFiles { + for _, df := range ve.DeletedTables { if anyOverlapFile(cmp, df, start, end) { return true } } - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { if anyOverlapFile(cmp, nf.Meta, start, end) { return true } @@ -323,7 +323,7 @@ func (m *manifestT) runSummarizeOne(stdout io.Writer, arg string) error { buckets = map[time.Time]*summaryBucket{} metadatas = map[base.FileNum]*manifest.FileMetadata{} ) - bve.AddedByFileNum = make(map[base.FileNum]*manifest.FileMetadata) + bve.AddedTablesByFileNum = make(map[base.FileNum]*manifest.FileMetadata) rr := record.NewReader(f, 0 /* logNum */) numHistErrors := 0 for i := 0; ; i++ { @@ -354,10 +354,10 @@ func (m *manifestT) runSummarizeOne(stdout io.Writer, arg string) error { // // TODO(sumeer): this summarization needs a rewrite. We could do that // after adding an enum to the VersionEdit to aid the summarization. - isLikelyCompaction := len(ve.NewFiles) > 0 && len(ve.DeletedFiles) > 0 && len(ve.CreatedBackingTables) == 0 - isIntraL0Compaction := isLikelyCompaction && ve.NewFiles[0].Level == 0 + isLikelyCompaction := len(ve.NewTables) > 0 && len(ve.DeletedTables) > 0 && len(ve.CreatedBackingTables) == 0 + isIntraL0Compaction := isLikelyCompaction && ve.NewTables[0].Level == 0 veNewest := newestOverall - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { _, seen := metadatas[nf.Meta.FileNum] if seen && !isLikelyCompaction { // Output error and continue processing as usual. @@ -396,7 +396,7 @@ func (m *manifestT) runSummarizeOne(stdout io.Writer, arg string) error { buckets[bucketKey] = b } - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { if !isLikelyCompaction { b.bytesAdded[nf.Level] += nf.Meta.Size } else if !isIntraL0Compaction { @@ -405,7 +405,7 @@ func (m *manifestT) runSummarizeOne(stdout io.Writer, arg string) error { } } - for dfe := range ve.DeletedFiles { + for dfe := range ve.DeletedTables { // Increase `bytesCompactOut` for the input level of any compactions // that remove bytes from a level (excluding intra-L0 compactions). if isLikelyCompaction && !isIntraL0Compaction && dfe.Level != manifest.NumLevels-1 { @@ -598,7 +598,7 @@ func (m *manifestT) runCheck(cmd *cobra.Command, args []string) { break } var bve manifest.BulkVersionEdit - bve.AddedByFileNum = addedByFileNum + bve.AddedTablesByFileNum = addedByFileNum if err := bve.Accumulate(&ve); err != nil { fmt.Fprintf(stderr, "%s\n", err) ok = false @@ -618,8 +618,8 @@ func (m *manifestT) runCheck(cmd *cobra.Command, args []string) { m.fmtKey.setForComparer(ve.ComparerName, m.comparers) } empty = empty && ve.MinUnflushedLogNum == 0 && ve.ObsoletePrevLogNum == 0 && - ve.LastSeqNum == 0 && len(ve.DeletedFiles) == 0 && - len(ve.NewFiles) == 0 + ve.LastSeqNum == 0 && len(ve.DeletedTables) == 0 && + len(ve.NewTables) == 0 if empty { continue } @@ -632,10 +632,10 @@ func (m *manifestT) runCheck(cmd *cobra.Command, args []string) { fmt.Fprintf(stdout, "Version state before failed Apply\n") m.printLevels(cmp.Compare, stdout, v) fmt.Fprintf(stdout, "Version edit that failed\n") - for df := range ve.DeletedFiles { + for df := range ve.DeletedTables { fmt.Fprintf(stdout, " deleted: L%d %s\n", df.Level, df.FileNum) } - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { fmt.Fprintf(stdout, " added: L%d %s:%d", nf.Level, nf.Meta.FileNum, nf.Meta.Size) formatSeqNumRange(stdout, nf.Meta.SmallestSeqNum, nf.Meta.LargestSeqNum) diff --git a/version_set.go b/version_set.go index b8dc4f2a27..d2e7230fad 100644 --- a/version_set.go +++ b/version_set.go @@ -26,12 +26,12 @@ const manifestMarkerName = `manifest` // Provide type aliases for the various manifest structs. type bulkVersionEdit = manifest.BulkVersionEdit -type deletedFileEntry = manifest.DeletedFileEntry +type deletedFileEntry = manifest.DeletedTableEntry type fileMetadata = manifest.FileMetadata type physicalMeta = manifest.PhysicalFileMeta type virtualMeta = manifest.VirtualFileMeta type fileBacking = manifest.FileBacking -type newFileEntry = manifest.NewFileEntry +type newTableEntry = manifest.NewTableEntry type version = manifest.Version type versionEdit = manifest.VersionEdit type versionList = manifest.VersionList @@ -232,7 +232,7 @@ func (vs *versionSet) load( // Read the versionEdits in the manifest file. var bve bulkVersionEdit - bve.AddedByFileNum = make(map[base.FileNum]*fileMetadata) + bve.AddedTablesByFileNum = make(map[base.FileNum]*fileMetadata) manifest, err := vs.fs.Open(manifestPath) if err != nil { return errors.Wrapf(err, "pebble: could not open manifest file %q for DB %q", @@ -316,7 +316,7 @@ func (vs *versionSet) load( vs.virtualBackings.AddAndRef(b) } - for _, addedLevel := range bve.Added { + for _, addedLevel := range bve.AddedTables { for _, m := range addedLevel { if m.Virtual { vs.virtualBackings.AddTable(m) @@ -327,7 +327,7 @@ func (vs *versionSet) load( if invariants.Enabled { // There should be no deleted tables or backings, since we're starting from // an empty state. - for _, deletedLevel := range bve.Deleted { + for _, deletedLevel := range bve.DeletedTables { if len(deletedLevel) != 0 { panic("deleted files after manifest replay") } @@ -518,7 +518,7 @@ func (vs *versionSet) logAndApply( // // The logic below uses the min of the last snapshot file count and the file // count in the current version. - vs.rotationHelper.AddRecord(int64(len(ve.DeletedFiles) + len(ve.NewFiles))) + vs.rotationHelper.AddRecord(int64(len(ve.DeletedTables) + len(ve.NewTables))) sizeExceeded := vs.manifest.Size() >= vs.opts.MaxManifestFileSize requireRotation := forceRotation || vs.manifest == nil @@ -743,7 +743,7 @@ func getZombiesAndUpdateVirtualBackings( // Note that for the common case where there are very few elements, the map // will stay on the stack. stillUsed := make(map[base.DiskFileNum]struct{}) - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { if !nf.Meta.Virtual { stillUsed[nf.Meta.FileBacking.DiskFileNum] = struct{}{} _, localFileDelta := sizeIfLocal(nf.Meta.FileBacking, provider) @@ -753,7 +753,7 @@ func getZombiesAndUpdateVirtualBackings( for _, b := range ve.CreatedBackingTables { stillUsed[b.DiskFileNum] = struct{}{} } - for _, m := range ve.DeletedFiles { + for _, m := range ve.DeletedTables { if !m.Virtual { // NB: this deleted file may also be in NewFiles or // CreatedBackingTables, due to a file moving between levels, or @@ -780,12 +780,12 @@ func getZombiesAndUpdateVirtualBackings( _, localFileDelta := sizeIfLocal(b, provider) localLiveSizeDelta += localFileDelta } - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { if nf.Meta.Virtual { virtualBackings.AddTable(nf.Meta) } } - for _, m := range ve.DeletedFiles { + for _, m := range ve.DeletedTables { if m.Virtual { virtualBackings.RemoveTable(m) } @@ -910,7 +910,7 @@ func (vs *versionSet) createManifest( for level, levelMetadata := range vs.currentVersion().Levels { iter := levelMetadata.Iter() for meta := iter.First(); meta != nil; meta = iter.Next() { - snapshot.NewFiles = append(snapshot.NewFiles, newFileEntry{ + snapshot.NewTables = append(snapshot.NewTables, newTableEntry{ Level: level, Meta: meta, }) @@ -1116,7 +1116,7 @@ func findCurrentManifest( return marker, manifestNum, true, nil } -func newFileMetrics(newFiles []manifest.NewFileEntry) map[int]*LevelMetrics { +func newFileMetrics(newFiles []manifest.NewTableEntry) map[int]*LevelMetrics { m := map[int]*LevelMetrics{} for _, nf := range newFiles { lm := m[nf.Level] diff --git a/version_set_test.go b/version_set_test.go index fb37bc21e5..577cf74f74 100644 --- a/version_set_test.go +++ b/version_set_test.go @@ -88,7 +88,7 @@ func TestVersionSet(t *testing.T) { if err != nil { td.Fatalf(t, "%v", err) } - for _, nf := range ve.NewFiles { + for _, nf := range ve.NewTables { // Set a size that depends on FileNum. nf.Meta.Size = uint64(nf.Meta.FileNum) * 100 nf.Meta.FileBacking = dedupBacking(nf.Meta.FileBacking) @@ -97,20 +97,20 @@ func TestVersionSet(t *testing.T) { createFile(nf.Meta.FileBacking.DiskFileNum) } } - for de := range ve.DeletedFiles { + for de := range ve.DeletedTables { m := metas[de.FileNum] if m == nil { td.Fatalf(t, "unknown FileNum %s", de.FileNum) } - ve.DeletedFiles[de] = m + ve.DeletedTables[de] = m } for i := range ve.CreatedBackingTables { ve.CreatedBackingTables[i] = dedupBacking(ve.CreatedBackingTables[i]) createFile(ve.CreatedBackingTables[i].DiskFileNum) } - fileMetrics := newFileMetrics(ve.NewFiles) - for de, f := range ve.DeletedFiles { + fileMetrics := newFileMetrics(ve.NewTables) + for de, f := range ve.DeletedTables { lm := fileMetrics[de.Level] if lm == nil { lm = &LevelMetrics{}