Skip to content

Commit 88e7636

Browse files
committed
db: rename FileNum to TableNum in top-level package
1 parent 6eb3465 commit 88e7636

30 files changed

+265
-272
lines changed

batch.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1179,9 +1179,9 @@ func (b *Batch) LogData(data []byte, _ *WriteOptions) error {
11791179
return nil
11801180
}
11811181

1182-
// IngestSST adds the FileNum for an sstable to the batch. The data will only be
1182+
// IngestSST adds the TableNum for an sstable to the batch. The data will only be
11831183
// written to the WAL (not added to memtables or sstables).
1184-
func (b *Batch) ingestSST(fileNum base.FileNum) {
1184+
func (b *Batch) ingestSST(tableNum base.TableNum) {
11851185
if b.Empty() {
11861186
b.ingestedSSTBatch = true
11871187
} else if !b.ingestedSSTBatch {
@@ -1191,7 +1191,7 @@ func (b *Batch) ingestSST(fileNum base.FileNum) {
11911191

11921192
origMemTableSize := b.memTableSize
11931193
var buf [binary.MaxVarintLen64]byte
1194-
length := binary.PutUvarint(buf[:], uint64(fileNum))
1194+
length := binary.PutUvarint(buf[:], uint64(tableNum))
11951195
b.prepareDeferredKeyRecord(length, InternalKeyKindIngestSST)
11961196
copy(b.deferredOp.Key, buf[:length])
11971197
// Since IngestSST writes only to the WAL and does not affect the memtable,

batch_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -68,22 +68,22 @@ func testBatch(t *testing.T, size int) {
6868
}
6969
}
7070

71-
encodeFileNum := func(n base.FileNum) string {
71+
encodeTableNum := func(n base.TableNum) string {
7272
return string(binary.AppendUvarint(nil, uint64(n)))
7373
}
74-
decodeFileNum := func(d []byte) base.FileNum {
74+
decodeTableNum := func(d []byte) base.TableNum {
7575
val, n := binary.Uvarint(d)
7676
if n <= 0 {
7777
t.Fatalf("invalid filenum encoding")
7878
}
79-
return base.FileNum(val)
79+
return base.TableNum(val)
8080
}
8181

8282
// RangeKeySet and RangeKeyUnset are untested here because they don't expose
8383
// deferred variants. This is a consequence of these keys' more complex
8484
// value encodings.
8585
testCases := []testCase{
86-
{InternalKeyKindIngestSST, encodeFileNum(1), "", 0},
86+
{InternalKeyKindIngestSST, encodeTableNum(1), "", 0},
8787
{InternalKeyKindSet, "roses", "red", 0},
8888
{InternalKeyKindSet, "violets", "blue", 0},
8989
{InternalKeyKindDelete, "roses", "", 0},
@@ -131,7 +131,7 @@ func testBatch(t *testing.T, size int) {
131131
case InternalKeyKindRangeKeyDelete:
132132
_ = b.RangeKeyDelete([]byte(tc.key), []byte(tc.value), nil)
133133
case InternalKeyKindIngestSST:
134-
b.ingestSST(decodeFileNum([]byte(tc.key)))
134+
b.ingestSST(decodeTableNum([]byte(tc.key)))
135135
}
136136
}
137137
verifyTestCases(b, testCases, false /* indexedKindsOnly */)
@@ -175,7 +175,7 @@ func testBatch(t *testing.T, size int) {
175175
case InternalKeyKindLogData:
176176
_ = b.LogData([]byte(tc.key), nil)
177177
case InternalKeyKindIngestSST:
178-
b.ingestSST(decodeFileNum([]byte(tc.key)))
178+
b.ingestSST(decodeTableNum([]byte(tc.key)))
179179
case InternalKeyKindRangeKeyDelete:
180180
d := b.RangeKeyDeleteDeferred(len(key), len(value))
181181
copy(d.Key, key)

compaction.go

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ const (
137137
// retained and linked in a new level without being obsoleted.
138138
compactionKindMove
139139
// compactionKindCopy denotes a copy compaction where the input file is
140-
// copied byte-by-byte into a new file with a new FileNum in the output level.
140+
// copied byte-by-byte into a new file with a new TableNum in the output level.
141141
compactionKindCopy
142142
// compactionKindDeleteOnly denotes a compaction that only deletes input
143143
// files. It can occur when wide range tombstones completely contain sstables.
@@ -1060,7 +1060,7 @@ type readCompaction struct {
10601060
// The file associated with the compaction.
10611061
// If the file no longer belongs in the same
10621062
// level, then we skip the compaction.
1063-
fileNum base.FileNum
1063+
tableNum base.TableNum
10641064
}
10651065

10661066
func (d *DB) addInProgressCompaction(c *compaction) {
@@ -1345,7 +1345,7 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) {
13451345
},
13461346
v: c.version,
13471347
}
1348-
replacedFiles := make(map[base.FileNum][]newTableEntry)
1348+
replacedTables := make(map[base.TableNum][]newTableEntry)
13491349
for _, file := range ingestFlushable.files {
13501350
var fileToSplit *tableMetadata
13511351
var level int
@@ -1396,14 +1396,14 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) {
13961396
return nil, err
13971397
}
13981398
newFiles := applyExciseToVersionEdit(ve, m, leftTable, rightTable, layer.Level())
1399-
replacedFiles[m.TableNum] = newFiles
1399+
replacedTables[m.TableNum] = newFiles
14001400
updateLevelMetricsOnExcise(m, layer.Level(), newFiles)
14011401
}
14021402
}
14031403
}
14041404

14051405
if len(ingestSplitFiles) > 0 {
1406-
if err := d.ingestSplit(context.TODO(), ve, updateLevelMetricsOnExcise, ingestSplitFiles, replacedFiles); err != nil {
1406+
if err := d.ingestSplit(context.TODO(), ve, updateLevelMetricsOnExcise, ingestSplitFiles, replacedTables); err != nil {
14071407
return nil, err
14081408
}
14091409
}
@@ -2451,9 +2451,9 @@ func (d *DB) cleanupVersionEdit(ve *versionEdit) {
24512451
TableBackings: make([]*manifest.TableBacking, 0, len(ve.NewTables)),
24522452
BlobFiles: make([]*manifest.BlobFileMetadata, 0, len(ve.NewBlobFiles)),
24532453
}
2454-
deletedFiles := make(map[base.FileNum]struct{})
2454+
deletedTables := make(map[base.TableNum]struct{})
24552455
for key := range ve.DeletedTables {
2456-
deletedFiles[key.FileNum] = struct{}{}
2456+
deletedTables[key.FileNum] = struct{}{}
24572457
}
24582458
for i := range ve.NewBlobFiles {
24592459
obsoleteFiles.AddBlob(ve.NewBlobFiles[i])
@@ -2470,7 +2470,7 @@ func (d *DB) cleanupVersionEdit(ve *versionEdit) {
24702470
// We handle backing files separately.
24712471
continue
24722472
}
2473-
if _, ok := deletedFiles[ve.NewTables[i].Meta.TableNum]; ok {
2473+
if _, ok := deletedTables[ve.NewTables[i].Meta.TableNum]; ok {
24742474
// This file is being moved in this ve to a different level.
24752475
// Don't mark it as obsolete.
24762476
continue
@@ -2578,7 +2578,7 @@ func (d *DB) compact1(c *compaction, errChannel chan error) (err error) {
25782578
return err
25792579
}
25802580

2581-
// runCopyCompaction runs a copy compaction where a new FileNum is created that
2581+
// runCopyCompaction runs a copy compaction where a new TableNum is created that
25822582
// is a byte-for-byte copy of the input file or span thereof in some cases. This
25832583
// is used in lieu of a move compaction when a file is being moved across the
25842584
// local/remote storage boundary. It could also be used in lieu of a rewrite
@@ -2647,7 +2647,7 @@ func (d *DB) runCopyCompaction(
26472647
if inputMeta.HasRangeKeys {
26482648
newMeta.ExtendRangeKeyBounds(c.cmp, inputMeta.RangeKeyBounds.Smallest(), inputMeta.RangeKeyBounds.Largest())
26492649
}
2650-
newMeta.TableNum = d.mu.versions.getNextFileNum()
2650+
newMeta.TableNum = d.mu.versions.getNextTableNum()
26512651
if objMeta.IsExternal() {
26522652
// external -> local/shared copy. File must be virtual.
26532653
// We will update this size later after we produce the new backing file.

compaction_picker.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2003,7 +2003,7 @@ func pickReadTriggeredCompactionHelper(
20032003
overlapSlice := p.vers.Overlaps(rc.level, base.UserKeyBoundsInclusive(rc.start, rc.end))
20042004
var fileMatches bool
20052005
for f := range overlapSlice.All() {
2006-
if f.TableNum == rc.fileNum {
2006+
if f.TableNum == rc.tableNum {
20072007
fileMatches = true
20082008
break
20092009
}

0 commit comments

Comments
 (0)